Commit 829d3cff authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Fix the patch preprocessor

parent beeeb418
Pipeline #46487 passed with stage
in 10 minutes and 44 seconds
from bob.bio.base.annotator.FailSafe import translate_kwargs
from sklearn.base import BaseEstimator, TransformerMixin
from ..utils import extract_patches from ..utils import extract_patches
from bob.bio.base.preprocessor import Preprocessor from bob.bio.video import VideoLikeContainer
from bob.bio.video.transformer import VideoWrapper
from collections import OrderedDict from collections import OrderedDict
class ImagePatches(Preprocessor): class ImagePatches(TransformerMixin, BaseEstimator):
"""Extracts patches of images and returns it in a FrameContainer. You need """Extracts patches of images and returns it in a VideoLikeContainer. You need
to wrap the further blocks (extractor and algorithm) that come after this to wrap the further blocks (extractor and algorithm) that come after this
in bob.bio.video wrappers. in bob.bio.video wrappers.
""" """
def __init__(self, block_size, block_overlap=(0, 0), n_random_patches=None, def __init__(
**kwargs): self, block_size, block_overlap=(0, 0), n_random_patches=None, **kwargs
):
super(ImagePatches, self).__init__(**kwargs) super(ImagePatches, self).__init__(**kwargs)
self.block_size = block_size self.block_size = block_size
self.block_overlap = block_overlap self.block_overlap = block_overlap
self.n_random_patches = n_random_patches self.n_random_patches = n_random_patches
def __call__(self, image, annotations=None): def transform(self, images):
fc = FrameContainer() return [self.transform_one_image(img) for img in images]
patches = extract_patches(image, self.block_size, self.block_overlap, def transform_one_image(self, image):
self.n_random_patches)
for i, patch in enumerate(patches):
fc.add(i, patch)
if not len(fc): patches = extract_patches(
return None image, self.block_size, self.block_overlap, self.n_random_patches
)
vc = VideoLikeContainer(patches, range(len(patches)))
return fc return vc
class VideoPatches(VideoWrapper): class VideoPatches(TransformerMixin, BaseEstimator):
"""Extracts patches of images from video containers and returns it in a """Extracts patches of images from video containers and returns it in a
FrameContainer. VideoLikeContainer.
""" """
def __init__(self, block_size, block_overlap=(0, 0), n_random_patches=None, def __init__(
normalizer=None, self,
**kwargs): face_cropper,
block_size,
block_overlap=(0, 0),
n_random_patches=None,
normalizer=None,
**kwargs,
):
super(VideoPatches, self).__init__(**kwargs) super(VideoPatches, self).__init__(**kwargs)
self.face_cropper = face_cropper
self.block_size = block_size self.block_size = block_size
self.block_overlap = block_overlap self.block_overlap = block_overlap
self.n_random_patches = n_random_patches self.n_random_patches = n_random_patches
self.normalizer = normalizer self.normalizer = normalizer
def __call__(self, frames, annotations=None): def transform(self, videos, annotations=None):
fc = FrameContainer() kwargs = translate_kwargs(dict(annotations=annotations), len(videos))
return [self.transform_one_video(vid, **kw) for vid, kw in zip(videos, kwargs)]
def transform_one_video(self, frames, annotations=None):
annotations = annotations or {}
if self.normalizer is not None: if self.normalizer is not None:
annotations = OrderedDict(self.normalizer(annotations)) annotations = OrderedDict(self.normalizer(annotations))
for index, frame, _ in frames: all_patches = []
for frame, index in zip(frames, frames.indices):
# if annotations are given, and if particular frame annotations are # if annotations are given, and if particular frame annotations are
# not missing we take them: # not missing we take them:
annots = annotations[index] if annotations is not None and \ annots = annotations.get(str(index))
index in annotations else None
# preprocess image (by default: detect a face) # preprocess image (by default: crops a face)
preprocessed = self.preprocessor(frame, annots) preprocessed = self.face_cropper(frame, annots)
if preprocessed is None: if preprocessed is None:
continue continue
# extract patches # extract patches
patches = extract_patches( patches = extract_patches(
preprocessed, self.block_size, self.block_overlap, preprocessed, self.block_size, self.block_overlap, self.n_random_patches
self.n_random_patches) )
for i, patch in enumerate(patches): all_patches.extend(patches)
fc.add('{}_{}'.format(index, i), patch)
vc = VideoLikeContainer(all_patches, range(len(all_patches)))
if not len(fc): if not len(vc):
return None return None
return fc return vc
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment