diff --git a/bob/pad/face/config/grid.py b/bob/pad/face/config/grid.py new file mode 100644 index 0000000000000000000000000000000000000000..3cddf7e5e24132d93cad198279d0efbb85356fe6 --- /dev/null +++ b/bob/pad/face/config/grid.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : + +from bob.bio.base.grid import Grid + +# Configuration to run on computation cluster: +idiap = Grid( + training_queue='32G', + + number_of_preprocessing_jobs=32, + preprocessing_queue='4G-io-big', + + number_of_extraction_jobs=32, + extraction_queue='8G-io-big', + + number_of_projection_jobs=32, + projection_queue='8G-io-big', + + number_of_enrollment_jobs=32, + enrollment_queue='8G-io-big', + + number_of_scoring_jobs=50, + scoring_queue='8G-io-big', + ) + +# Configuration to run on user machines: +idiap_user_machines = Grid( + training_queue='32G', + + number_of_preprocessing_jobs=32, + preprocessing_queue='4G', + + number_of_extraction_jobs=32, + extraction_queue='8G', + + number_of_projection_jobs=32, + projection_queue='8G', + + number_of_enrollment_jobs=32, + enrollment_queue='8G', + + number_of_scoring_jobs=50, + scoring_queue='8G', + ) diff --git a/bob/pad/face/config/preprocessor/__init__.py b/bob/pad/face/config/preprocessor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/bob/pad/face/config/preprocessor/video_face_crop.py b/bob/pad/face/config/preprocessor/video_face_crop.py new file mode 100644 index 0000000000000000000000000000000000000000..b210d189b7c2f0e7015a2c0fc7f3a82b7b646126 --- /dev/null +++ b/bob/pad/face/config/preprocessor/video_face_crop.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +from bob.pad.face.preprocessor import VideoFaceCrop + + +#======================================================================================= +# Define instances here: + +cropped_image_size = (100, 100) # The size of the resulting face +cropped_positions = { 'topleft' : (0,0) , 'bottomright' : cropped_image_size} +fixed_positions = None +mask_sigma = None # The sigma for random values areas outside image +mask_neighbors = 5 # The number of neighbors to consider while extrapolating +mask_seed = None # The seed for generating random values during extrapolation +color_channel = 'gray' # Convert image to gray-scale format + +video_face_crop_preproc_100_100 = VideoFaceCrop(cropped_image_size = cropped_image_size, + cropped_positions = cropped_positions, + fixed_positions = fixed_positions, + mask_sigma = mask_sigma, + mask_neighbors = mask_neighbors, + mask_seed = None, + color_channel = color_channel) diff --git a/setup.py b/setup.py index 3ccff59989c4aa43e4f921a72c89f461513a1b32..7b46b14a200f9fa1d3a8eb795e8c39a208e4cba6 100644 --- a/setup.py +++ b/setup.py @@ -93,10 +93,22 @@ setup( 'version.py = bob.pad.face.script.version:main', ], + # registered databases: 'bob.pad.database': [ 'replay = bob.pad.face.config.database.replay:database', ], + # registered preprocessors: + 'bob.pad.preprocessor': [ + 'video-face-crop-preproc-100 = bob.pad.face.config.preprocessor.video_face_crop:video_face_crop_preproc_100_100', + ], + + # registered grid configurations: + 'bob.pad.grid': [ + 'idiap = bob.pad.face.config.grid:idiap', + 'idiap-user-machines = bob.pad.face.config.grid:idiap_user_machines', + ], + }, # Classifiers are important if you plan to distribute this package through