Commit af2ae899 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Merge branch 'dask-pipelines' into 'master'

Dask pipelines fixes

See merge request !113
parents fe2730d5 829d3cff
Pipeline #46488 passed with stages
in 6 minutes and 20 seconds
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This file contains configurations to run Image Quality Measures (IQM) and one-class GMM based face PAD algorithm.
The settings of the preprocessor and extractor are tuned for the Replay-attack database.
The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
"""
#=======================================================================================
sub_directory = 'qm_one_class_gmm'
"""
Sub-directory where results will be placed.
You may change this setting using the ``--sub-directory`` command-line option
or the attribute ``sub_directory`` in a configuration file loaded **after**
this resource.
"""
#=======================================================================================
# define preprocessor:
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = True # RGB output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
use_face_alignment = USE_FACE_ALIGNMENT,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
min_face_size = MIN_FACE_SIZE)
_frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
"""
#=======================================================================================
# define extractor:
from ..extractor import ImageQualityMeasure
from bob.bio.video.extractor import Wrapper
GALBALLY = True
MSU = True
DTYPE = None
extractor = Wrapper(ImageQualityMeasure(galbally=GALBALLY, msu=MSU, dtype=DTYPE))
"""
In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
"""
#=======================================================================================
# define algorithm:
from bob.pad.base.algorithm import OneClassGMM
N_COMPONENTS = 50
RANDOM_STATE = 3
FRAME_LEVEL_SCORES_FLAG = True
algorithm = OneClassGMM(
n_components=N_COMPONENTS,
random_state=RANDOM_STATE,
frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
"""
The GMM with 50 clusters is trained using samples from the real class only. The pre-trained
GMM is next used to classify the data into *real* and *attack* classes.
One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
"""
from bob.bio.base.annotator.FailSafe import translate_kwargs
from sklearn.base import BaseEstimator, TransformerMixin
from ..utils import extract_patches
from bob.bio.base.preprocessor import Preprocessor
from bob.bio.video.transformer import VideoWrapper
from bob.bio.video import VideoLikeContainer
from collections import OrderedDict
class ImagePatches(Preprocessor):
"""Extracts patches of images and returns it in a FrameContainer. You need
class ImagePatches(TransformerMixin, BaseEstimator):
"""Extracts patches of images and returns it in a VideoLikeContainer. You need
to wrap the further blocks (extractor and algorithm) that come after this
in bob.bio.video wrappers.
"""
def __init__(self, block_size, block_overlap=(0, 0), n_random_patches=None,
**kwargs):
def __init__(
self, block_size, block_overlap=(0, 0), n_random_patches=None, **kwargs
):
super(ImagePatches, self).__init__(**kwargs)
self.block_size = block_size
self.block_overlap = block_overlap
self.n_random_patches = n_random_patches
def __call__(self, image, annotations=None):
fc = FrameContainer()
def transform(self, images):
return [self.transform_one_image(img) for img in images]
patches = extract_patches(image, self.block_size, self.block_overlap,
self.n_random_patches)
for i, patch in enumerate(patches):
fc.add(i, patch)
def transform_one_image(self, image):
if not len(fc):
return None
patches = extract_patches(
image, self.block_size, self.block_overlap, self.n_random_patches
)
vc = VideoLikeContainer(patches, range(len(patches)))
return fc
return vc
class VideoPatches(VideoWrapper):
class VideoPatches(TransformerMixin, BaseEstimator):
"""Extracts patches of images from video containers and returns it in a
FrameContainer.
VideoLikeContainer.
"""
def __init__(self, block_size, block_overlap=(0, 0), n_random_patches=None,
def __init__(
self,
face_cropper,
block_size,
block_overlap=(0, 0),
n_random_patches=None,
normalizer=None,
**kwargs):
**kwargs,
):
super(VideoPatches, self).__init__(**kwargs)
self.face_cropper = face_cropper
self.block_size = block_size
self.block_overlap = block_overlap
self.n_random_patches = n_random_patches
self.normalizer = normalizer
def __call__(self, frames, annotations=None):
fc = FrameContainer()
def transform(self, videos, annotations=None):
kwargs = translate_kwargs(dict(annotations=annotations), len(videos))
return [self.transform_one_video(vid, **kw) for vid, kw in zip(videos, kwargs)]
def transform_one_video(self, frames, annotations=None):
annotations = annotations or {}
if self.normalizer is not None:
annotations = OrderedDict(self.normalizer(annotations))
for index, frame, _ in frames:
all_patches = []
for frame, index in zip(frames, frames.indices):
# if annotations are given, and if particular frame annotations are
# not missing we take them:
annots = annotations[index] if annotations is not None and \
index in annotations else None
annots = annotations.get(str(index))
# preprocess image (by default: detect a face)
preprocessed = self.preprocessor(frame, annots)
# preprocess image (by default: crops a face)
preprocessed = self.face_cropper(frame, annots)
if preprocessed is None:
continue
# extract patches
patches = extract_patches(
preprocessed, self.block_size, self.block_overlap,
self.n_random_patches)
for i, patch in enumerate(patches):
fc.add('{}_{}'.format(index, i), patch)
preprocessed, self.block_size, self.block_overlap, self.n_random_patches
)
all_patches.extend(patches)
vc = VideoLikeContainer(all_patches, range(len(all_patches)))
if not len(fc):
if not len(vc):
return None
return fc
return vc
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment