Commit 710d971d authored by Olegs NIKISINS's avatar Olegs NIKISINS
Browse files

Merge branch 'preproc_refactor' into 'master'

Preprocessor refactoring, VideoFaceCrop+ImageFaceCrop replaced with FaceCropAlign+Wrapper

See merge request !57
parents e0fc9e9b 115d5ef1
Pipeline #17476 passed with stage
in 21 minutes and 24 seconds
......@@ -20,35 +20,35 @@ this resource.
#=======================================================================================
# define preprocessor:
from ..preprocessor import VideoFaceCrop
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50 # Minimal possible size of the face
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
COLOR_CHANNEL = 'gray' # Convert image to gray-scale format
preprocessor = VideoFaceCrop(
cropped_image_size=CROPPED_IMAGE_SIZE,
cropped_positions=CROPPED_POSITIONS,
fixed_positions=FIXED_POSITIONS,
mask_sigma=MASK_SIGMA,
mask_neighbors=MASK_NEIGHBORS,
mask_seed=None,
check_face_size_flag=CHECK_FACE_SIZE_FLAG,
min_face_size=MIN_FACE_SIZE,
use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
color_channel=COLOR_CHANNEL)
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = False # Gray-scale output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
use_face_alignment = USE_FACE_ALIGNMENT,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
min_face_size = MIN_FACE_SIZE)
_frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``cropped_image_size`` dimensions. The faces with the size
below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``use_local_cropper_flag = True``.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``.
"""
#=======================================================================================
......
......@@ -22,35 +22,35 @@ this resource.
#=======================================================================================
# define preprocessor:
from ..preprocessor import VideoFaceCrop
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50 # Minimal possible size of the face
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
COLOR_CHANNEL = 'gray' # Convert image to gray-scale format
preprocessor = VideoFaceCrop(
cropped_image_size=CROPPED_IMAGE_SIZE,
cropped_positions=CROPPED_POSITIONS,
fixed_positions=FIXED_POSITIONS,
mask_sigma=MASK_SIGMA,
mask_neighbors=MASK_NEIGHBORS,
mask_seed=None,
check_face_size_flag=CHECK_FACE_SIZE_FLAG,
min_face_size=MIN_FACE_SIZE,
use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
color_channel=COLOR_CHANNEL)
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = False # Gray-scale output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
use_face_alignment = USE_FACE_ALIGNMENT,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
min_face_size = MIN_FACE_SIZE)
_frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``cropped_image_size`` dimensions. The faces with the size
below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``use_local_cropper_flag = True``.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``.
"""
#=======================================================================================
......
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from bob.pad.face.preprocessor import VideoFaceCrop
from ..preprocessor import FaceCropAlign
#=======================================================================================
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
# =======================================================================================
# Define instances here:
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
DETECT_FACES_FLAG = True # find annotations locally replacing the database annotations
FACE_DETECTION_METHOD = "dlib"
rgb_face_detector_dlib = VideoFaceCrop(
cropped_image_size=CROPPED_IMAGE_SIZE,
cropped_positions=CROPPED_POSITIONS,
fixed_positions=FIXED_POSITIONS,
mask_sigma=MASK_SIGMA,
mask_neighbors=MASK_NEIGHBORS,
mask_seed=None,
check_face_size_flag=CHECK_FACE_SIZE_FLAG,
min_face_size=MIN_FACE_SIZE,
use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
rgb_output_flag=RGB_OUTPUT_FLAG,
detect_faces_flag=DETECT_FACES_FLAG,
face_detection_method=FACE_DETECTION_METHOD)
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
DETECT_FACES_FLAG = True # find annotations locally replacing the database annotations
FACE_DETECTION_METHOD = "mtcnn"
rgb_face_detector_mtcnn = VideoFaceCrop(
cropped_image_size=CROPPED_IMAGE_SIZE,
cropped_positions=CROPPED_POSITIONS,
fixed_positions=FIXED_POSITIONS,
mask_sigma=MASK_SIGMA,
mask_neighbors=MASK_NEIGHBORS,
mask_seed=None,
check_face_size_flag=CHECK_FACE_SIZE_FLAG,
min_face_size=MIN_FACE_SIZE,
use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
rgb_output_flag=RGB_OUTPUT_FLAG,
detect_faces_flag=DETECT_FACES_FLAG,
face_detection_method=FACE_DETECTION_METHOD)
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = True # RGB output
USE_FACE_ALIGNMENT = False #
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = "dlib" # use dlib face detection
MIN_FACE_SIZE = 50 # skip small faces
_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
rgb_output_flag=RGB_OUTPUT_FLAG,
use_face_alignment=USE_FACE_ALIGNMENT,
max_image_size=MAX_IMAGE_SIZE,
face_detection_method=FACE_DETECTION_METHOD,
min_face_size=MIN_FACE_SIZE)
_frame_selector = FrameSelector(selection_style = "all")
rgb_face_detector_dlib = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
# =======================================================================================
FACE_DETECTION_METHOD = "mtcnn" # use mtcnn face detection
_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
rgb_output_flag=RGB_OUTPUT_FLAG,
use_face_alignment=USE_FACE_ALIGNMENT,
max_image_size=MAX_IMAGE_SIZE,
face_detection_method=FACE_DETECTION_METHOD,
min_face_size=MIN_FACE_SIZE)
rgb_face_detector_mtcnn = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
......@@ -19,35 +19,35 @@ this resource.
#=======================================================================================
# define preprocessor:
from ..preprocessor import VideoFaceCrop
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
preprocessor = VideoFaceCrop(
cropped_image_size=CROPPED_IMAGE_SIZE,
cropped_positions=CROPPED_POSITIONS,
fixed_positions=FIXED_POSITIONS,
mask_sigma=MASK_SIGMA,
mask_neighbors=MASK_NEIGHBORS,
mask_seed=None,
check_face_size_flag=CHECK_FACE_SIZE_FLAG,
min_face_size=MIN_FACE_SIZE,
use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
rgb_output_flag=RGB_OUTPUT_FLAG)
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = True # RGB output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
use_face_alignment = USE_FACE_ALIGNMENT,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
min_face_size = MIN_FACE_SIZE)
_frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
"""
......
......@@ -19,35 +19,35 @@ this resource.
#=======================================================================================
# define preprocessor:
from ..preprocessor import VideoFaceCrop
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
preprocessor = VideoFaceCrop(
cropped_image_size=CROPPED_IMAGE_SIZE,
cropped_positions=CROPPED_POSITIONS,
fixed_positions=FIXED_POSITIONS,
mask_sigma=MASK_SIGMA,
mask_neighbors=MASK_NEIGHBORS,
mask_seed=None,
check_face_size_flag=CHECK_FACE_SIZE_FLAG,
min_face_size=MIN_FACE_SIZE,
use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
rgb_output_flag=RGB_OUTPUT_FLAG)
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = True # RGB output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
use_face_alignment = USE_FACE_ALIGNMENT,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
min_face_size = MIN_FACE_SIZE)
_frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
"""
......
......@@ -21,35 +21,35 @@ this resource.
#=======================================================================================
# define preprocessor:
from ..preprocessor import VideoFaceCrop
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
preprocessor = VideoFaceCrop(
cropped_image_size=CROPPED_IMAGE_SIZE,
cropped_positions=CROPPED_POSITIONS,
fixed_positions=FIXED_POSITIONS,
mask_sigma=MASK_SIGMA,
mask_neighbors=MASK_NEIGHBORS,
mask_seed=None,
check_face_size_flag=CHECK_FACE_SIZE_FLAG,
min_face_size=MIN_FACE_SIZE,
use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
rgb_output_flag=RGB_OUTPUT_FLAG)
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = True # RGB output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
use_face_alignment = USE_FACE_ALIGNMENT,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
min_face_size = MIN_FACE_SIZE)
_frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
"""
......
......@@ -21,35 +21,35 @@ this resource.
#=======================================================================================
# define preprocessor:
from ..preprocessor import VideoFaceCrop
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
preprocessor = VideoFaceCrop(
cropped_image_size=CROPPED_IMAGE_SIZE,
cropped_positions=CROPPED_POSITIONS,
fixed_positions=FIXED_POSITIONS,
mask_sigma=MASK_SIGMA,
mask_neighbors=MASK_NEIGHBORS,
mask_seed=None,
check_face_size_flag=CHECK_FACE_SIZE_FLAG,
min_face_size=MIN_FACE_SIZE,
use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
rgb_output_flag=RGB_OUTPUT_FLAG)
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = True # RGB output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
use_face_alignment = USE_FACE_ALIGNMENT,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
min_face_size = MIN_FACE_SIZE)
_frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
"""
......
......@@ -19,35 +19,35 @@ this resource.
#=======================================================================================
# define preprocessor:
from ..preprocessor import VideoFaceCrop
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
preprocessor = VideoFaceCrop(
cropped_image_size=CROPPED_IMAGE_SIZE,
cropped_positions=CROPPED_POSITIONS,
fixed_positions=FIXED_POSITIONS,
mask_sigma=MASK_SIGMA,
mask_neighbors=MASK_NEIGHBORS,
mask_seed=None,
check_face_size_flag=CHECK_FACE_SIZE_FLAG,
min_face_size=MIN_FACE_SIZE,
use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
rgb_output_flag=RGB_OUTPUT_FLAG)
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = True # RGB output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
use_face_alignment = USE_FACE_ALIGNMENT,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
min_face_size = MIN_FACE_SIZE)
_frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
"""
......
......@@ -21,35 +21,35 @@ this resource.
#=======================================================================================
# define preprocessor:
from ..preprocessor import VideoFaceCrop
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating