Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.pad.face
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
14
Issues
14
List
Boards
Labels
Milestones
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.pad.face
Commits
b00b936b
Commit
b00b936b
authored
Nov 23, 2017
by
Amir MOHAMMADI
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
pep8 formatting done by yapf -ri .
parent
270227f3
Pipeline
#14260
passed with stages
in 11 minutes and 6 seconds
Changes
59
Pipelines
1
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
59 changed files
with
2484 additions
and
2095 deletions
+2484
-2095
bob/pad/face/__init__.py
bob/pad/face/__init__.py
+0
-1
bob/pad/face/algorithm/VideoCascadeSvmPadAlgorithm.py
bob/pad/face/algorithm/VideoCascadeSvmPadAlgorithm.py
+176
-138
bob/pad/face/algorithm/VideoGmmPadAlgorithm.py
bob/pad/face/algorithm/VideoGmmPadAlgorithm.py
+49
-27
bob/pad/face/algorithm/VideoLRPadAlgorithm.py
bob/pad/face/algorithm/VideoLRPadAlgorithm.py
+76
-71
bob/pad/face/algorithm/VideoSvmPadAlgorithm.py
bob/pad/face/algorithm/VideoSvmPadAlgorithm.py
+165
-103
bob/pad/face/algorithm/__init__.py
bob/pad/face/algorithm/__init__.py
+1
-0
bob/pad/face/config/aggregated_db.py
bob/pad/face/config/aggregated_db.py
+1
-2
bob/pad/face/config/algorithm/video_cascade_svm_pad_algorithm.py
.../face/config/algorithm/video_cascade_svm_pad_algorithm.py
+119
-114
bob/pad/face/config/algorithm/video_gmm_pad_algorithm.py
bob/pad/face/config/algorithm/video_gmm_pad_algorithm.py
+90
-89
bob/pad/face/config/algorithm/video_svm_pad_algorithm.py
bob/pad/face/config/algorithm/video_svm_pad_algorithm.py
+36
-28
bob/pad/face/config/database/aggregated_db.py
bob/pad/face/config/database/aggregated_db.py
+2
-3
bob/pad/face/config/database/mifs.py
bob/pad/face/config/database/mifs.py
+1
-4
bob/pad/face/config/database/msu_mfsd.py
bob/pad/face/config/database/msu_mfsd.py
+2
-4
bob/pad/face/config/database/replay_attack.py
bob/pad/face/config/database/replay_attack.py
+2
-5
bob/pad/face/config/database/replay_mobile.py
bob/pad/face/config/database/replay_mobile.py
+1
-4
bob/pad/face/config/extractor/frame_diff_features.py
bob/pad/face/config/extractor/frame_diff_features.py
+4
-8
bob/pad/face/config/extractor/video_lbp_histogram.py
bob/pad/face/config/extractor/video_lbp_histogram.py
+13
-13
bob/pad/face/config/extractor/video_quality_measure.py
bob/pad/face/config/extractor/video_quality_measure.py
+5
-7
bob/pad/face/config/frame_diff_svm.py
bob/pad/face/config/frame_diff_svm.py
+24
-30
bob/pad/face/config/frame_diff_svm_aggregated_db.py
bob/pad/face/config/frame_diff_svm_aggregated_db.py
+30
-39
bob/pad/face/config/grid.py
bob/pad/face/config/grid.py
+2
-12
bob/pad/face/config/lbp_svm.py
bob/pad/face/config/lbp_svm.py
+49
-48
bob/pad/face/config/lbp_svm_aggregated_db.py
bob/pad/face/config/lbp_svm_aggregated_db.py
+56
-55
bob/pad/face/config/mifs.py
bob/pad/face/config/mifs.py
+1
-3
bob/pad/face/config/msu_mfsd.py
bob/pad/face/config/msu_mfsd.py
+1
-2
bob/pad/face/config/preprocessor/filename.py
bob/pad/face/config/preprocessor/filename.py
+0
-1
bob/pad/face/config/preprocessor/video_face_crop.py
bob/pad/face/config/preprocessor/video_face_crop.py
+45
-50
bob/pad/face/config/preprocessor/video_sparse_coding.py
bob/pad/face/config/preprocessor/video_sparse_coding.py
+19
-16
bob/pad/face/config/qm_lr.py
bob/pad/face/config/qm_lr.py
+28
-37
bob/pad/face/config/qm_one_class_gmm.py
bob/pad/face/config/qm_one_class_gmm.py
+28
-38
bob/pad/face/config/qm_one_class_svm_aggregated_db.py
bob/pad/face/config/qm_one_class_svm_aggregated_db.py
+43
-47
bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
...pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
+31
-37
bob/pad/face/config/qm_svm.py
bob/pad/face/config/qm_svm.py
+38
-41
bob/pad/face/config/qm_svm_aggregated_db.py
bob/pad/face/config/qm_svm_aggregated_db.py
+44
-52
bob/pad/face/config/replay_attack.py
bob/pad/face/config/replay_attack.py
+1
-2
bob/pad/face/config/replay_mobile.py
bob/pad/face/config/replay_mobile.py
+1
-2
bob/pad/face/database/__init__.py
bob/pad/face/database/__init__.py
+5
-3
bob/pad/face/database/aggregated_db.py
bob/pad/face/database/aggregated_db.py
+232
-108
bob/pad/face/database/mifs.py
bob/pad/face/database/mifs.py
+30
-24
bob/pad/face/database/msu_mfsd.py
bob/pad/face/database/msu_mfsd.py
+50
-34
bob/pad/face/database/replay.py
bob/pad/face/database/replay.py
+50
-32
bob/pad/face/database/replay_mobile.py
bob/pad/face/database/replay_mobile.py
+48
-32
bob/pad/face/extractor/FrameDiffFeatures.py
bob/pad/face/extractor/FrameDiffFeatures.py
+15
-25
bob/pad/face/extractor/ImageQualityMeasure.py
bob/pad/face/extractor/ImageQualityMeasure.py
+6
-14
bob/pad/face/extractor/LBPHistogram.py
bob/pad/face/extractor/LBPHistogram.py
+42
-17
bob/pad/face/extractor/VideoDataLoader.py
bob/pad/face/extractor/VideoDataLoader.py
+8
-10
bob/pad/face/extractor/VideoLBPHistogram.py
bob/pad/face/extractor/VideoLBPHistogram.py
+16
-20
bob/pad/face/extractor/VideoQualityMeasure.py
bob/pad/face/extractor/VideoQualityMeasure.py
+10
-19
bob/pad/face/preprocessor/FrameDifference.py
bob/pad/face/preprocessor/FrameDifference.py
+69
-50
bob/pad/face/preprocessor/ImageFaceCrop.py
bob/pad/face/preprocessor/ImageFaceCrop.py
+18
-22
bob/pad/face/preprocessor/VideoFaceCrop.py
bob/pad/face/preprocessor/VideoFaceCrop.py
+80
-62
bob/pad/face/preprocessor/VideoSparseCoding.py
bob/pad/face/preprocessor/VideoSparseCoding.py
+157
-123
bob/pad/face/preprocessor/__init__.py
bob/pad/face/preprocessor/__init__.py
+0
-1
bob/pad/face/test/test.py
bob/pad/face/test/test.py
+188
-174
bob/pad/face/test/test_databases.py
bob/pad/face/test/test_databases.py
+126
-46
bob/pad/face/utils/face_detection_utils.py
bob/pad/face/utils/face_detection_utils.py
+33
-35
bootstrap-buildout.py
bootstrap-buildout.py
+55
-44
doc/conf.py
doc/conf.py
+26
-28
setup.py
setup.py
+36
-39
No files found.
bob/pad/face/__init__.py
View file @
b00b936b
...
...
@@ -11,4 +11,3 @@ def get_config():
# gets sphinx autodoc done right - don't remove it
__all__
=
[
_
for
_
in
dir
()
if
not
_
.
startswith
(
'_'
)]
bob/pad/face/algorithm/VideoCascadeSvmPadAlgorithm.py
View file @
b00b936b
This diff is collapsed.
Click to expand it.
bob/pad/face/algorithm/VideoGmmPadAlgorithm.py
View file @
b00b936b
...
...
@@ -19,10 +19,10 @@ import bob.io.base
from
sklearn
import
mixture
# ==============================================================================
# Main body :
class
VideoGmmPadAlgorithm
(
Algorithm
):
"""
This class is designed to train a GMM based PAD system. The GMM is trained
...
...
@@ -55,12 +55,13 @@ class VideoGmmPadAlgorithm(Algorithm):
random_state
=
3
,
frame_level_scores_flag
=
False
):
Algorithm
.
__init__
(
self
,
n_components
=
n_components
,
random_state
=
random_state
,
frame_level_scores_flag
=
frame_level_scores_flag
,
performs_projection
=
True
,
requires_projector_training
=
True
)
Algorithm
.
__init__
(
self
,
n_components
=
n_components
,
random_state
=
random_state
,
frame_level_scores_flag
=
frame_level_scores_flag
,
performs_projection
=
True
,
requires_projector_training
=
True
)
self
.
n_components
=
n_components
...
...
@@ -75,8 +76,11 @@ class VideoGmmPadAlgorithm(Algorithm):
self
.
features_std
=
None
# this argument will be updated with features std
# names of the arguments of the pretrained GMM machine to be saved/loaded to/from HDF5 file:
self
.
gmm_param_keys
=
[
"covariance_type"
,
"covariances_"
,
"lower_bound_"
,
"means_"
,
"n_components"
,
"weights_"
,
"converged_"
,
"precisions_"
,
"precisions_cholesky_"
]
self
.
gmm_param_keys
=
[
"covariance_type"
,
"covariances_"
,
"lower_bound_"
,
"means_"
,
"n_components"
,
"weights_"
,
"converged_"
,
"precisions_"
,
"precisions_cholesky_"
]
# ==========================================================================
def
convert_frame_cont_to_array
(
self
,
frame_container
):
...
...
@@ -132,7 +136,9 @@ class VideoGmmPadAlgorithm(Algorithm):
An array containing features for all samples and frames.
"""
if
isinstance
(
features
[
0
],
FrameContainer
):
# if FrameContainer convert to 2D numpy array
if
isinstance
(
features
[
0
],
FrameContainer
):
# if FrameContainer convert to 2D numpy array
return
self
.
convert_list_of_frame_cont_to_array
(
features
)
else
:
return
np
.
vstack
(
features
)
...
...
@@ -160,7 +166,8 @@ class VideoGmmPadAlgorithm(Algorithm):
feature_vectors
=
[]
for
frame_container
in
frame_containers
:
video_features_array
=
self
.
convert_frame_cont_to_array
(
frame_container
)
video_features_array
=
self
.
convert_frame_cont_to_array
(
frame_container
)
feature_vectors
.
append
(
video_features_array
)
...
...
@@ -169,7 +176,10 @@ class VideoGmmPadAlgorithm(Algorithm):
return
features_array
# ==========================================================================
def
mean_std_normalize
(
self
,
features
,
features_mean
=
None
,
features_std
=
None
):
def
mean_std_normalize
(
self
,
features
,
features_mean
=
None
,
features_std
=
None
):
"""
The features in the input 2D array are mean-std normalized.
The rows are samples, the columns are features. If ``features_mean``
...
...
@@ -250,19 +260,22 @@ class VideoGmmPadAlgorithm(Algorithm):
Standart deviation of the features.
"""
features_norm
,
features_mean
,
features_std
=
self
.
mean_std_normalize
(
real
)
features_norm
,
features_mean
,
features_std
=
self
.
mean_std_normalize
(
real
)
# real is now mean-std normalized
machine
=
mixture
.
GaussianMixture
(
n_components
=
n_components
,
random_state
=
random_state
,
covariance_type
=
'full'
)
machine
=
mixture
.
GaussianMixture
(
n_components
=
n_components
,
random_state
=
random_state
,
covariance_type
=
'full'
)
machine
.
fit
(
features_norm
)
return
machine
,
features_mean
,
features_std
# ==========================================================================
def
save_gmm_machine_and_mean_std
(
self
,
projector_file
,
machine
,
features_mean
,
features_std
):
def
save_gmm_machine_and_mean_std
(
self
,
projector_file
,
machine
,
features_mean
,
features_std
):
"""
Saves the GMM machine, features mean and std to the hdf5 file.
The absolute name of the file is specified in ``projector_file`` string.
...
...
@@ -284,7 +297,8 @@ class VideoGmmPadAlgorithm(Algorithm):
Standart deviation of the features.
"""
f
=
bob
.
io
.
base
.
HDF5File
(
projector_file
,
'w'
)
# open hdf5 file to save to
f
=
bob
.
io
.
base
.
HDF5File
(
projector_file
,
'w'
)
# open hdf5 file to save to
for
key
in
self
.
gmm_param_keys
:
data
=
getattr
(
machine
,
key
)
...
...
@@ -317,18 +331,21 @@ class VideoGmmPadAlgorithm(Algorithm):
"""
# training_features[0] - training features for the REAL class.
real
=
self
.
convert_and_prepare_features
(
training_features
[
0
])
# output is array
real
=
self
.
convert_and_prepare_features
(
training_features
[
0
])
# output is array
# training_features[1] - training features for the ATTACK class.
# attack = self.convert_and_prepare_features(training_features[1]) # output is array
# Train the GMM machine and get normalizers:
machine
,
features_mean
,
features_std
=
self
.
train_gmm
(
real
=
real
,
n_components
=
self
.
n_components
,
random_state
=
self
.
random_state
)
machine
,
features_mean
,
features_std
=
self
.
train_gmm
(
real
=
real
,
n_components
=
self
.
n_components
,
random_state
=
self
.
random_state
)
# Save the GNN machine and normalizers:
self
.
save_gmm_machine_and_mean_std
(
projector_file
,
machine
,
features_mean
,
features_std
)
self
.
save_gmm_machine_and_mean_std
(
projector_file
,
machine
,
features_mean
,
features_std
)
# ==========================================================================
def
load_gmm_machine_and_mean_std
(
self
,
projector_file
):
...
...
@@ -354,7 +371,8 @@ class VideoGmmPadAlgorithm(Algorithm):
Standart deviation of the features.
"""
f
=
bob
.
io
.
base
.
HDF5File
(
projector_file
,
'r'
)
# file to read the machine from
f
=
bob
.
io
.
base
.
HDF5File
(
projector_file
,
'r'
)
# file to read the machine from
# initialize the machine:
machine
=
mixture
.
GaussianMixture
()
...
...
@@ -397,7 +415,8 @@ class VideoGmmPadAlgorithm(Algorithm):
``load_cascade_of_machines`` methods of this class for more details.
"""
machine
,
features_mean
,
features_std
=
self
.
load_gmm_machine_and_mean_std
(
projector_file
)
machine
,
features_mean
,
features_std
=
self
.
load_gmm_machine_and_mean_std
(
projector_file
)
self
.
machine
=
machine
...
...
@@ -437,7 +456,9 @@ class VideoGmmPadAlgorithm(Algorithm):
"""
# 1. Convert input array to numpy array if necessary.
if
isinstance
(
feature
,
FrameContainer
):
# if FrameContainer convert to 2D numpy array
if
isinstance
(
feature
,
FrameContainer
):
# if FrameContainer convert to 2D numpy array
features_array
=
self
.
convert_frame_cont_to_array
(
feature
)
...
...
@@ -445,7 +466,8 @@ class VideoGmmPadAlgorithm(Algorithm):
features_array
=
feature
features_array_norm
,
_
,
_
=
self
.
mean_std_normalize
(
features_array
,
self
.
features_mean
,
self
.
features_std
)
features_array_norm
,
_
,
_
=
self
.
mean_std_normalize
(
features_array
,
self
.
features_mean
,
self
.
features_std
)
scores
=
self
.
machine
.
score_samples
(
features_array_norm
)
...
...
bob/pad/face/algorithm/VideoLRPadAlgorithm.py
View file @
b00b936b
This diff is collapsed.
Click to expand it.
bob/pad/face/algorithm/VideoSvmPadAlgorithm.py
View file @
b00b936b
This diff is collapsed.
Click to expand it.
bob/pad/face/algorithm/__init__.py
View file @
b00b936b
...
...
@@ -3,6 +3,7 @@ from .VideoCascadeSvmPadAlgorithm import VideoCascadeSvmPadAlgorithm
from
.VideoLRPadAlgorithm
import
VideoLRPadAlgorithm
from
.VideoGmmPadAlgorithm
import
VideoGmmPadAlgorithm
def
__appropriate__
(
*
args
):
"""Says object was actually declared here, and not in the import module.
Fixing sphinx warnings of not being able to find classes, when path is
...
...
bob/pad/face/config/aggregated_db.py
View file @
b00b936b
#!/usr/bin/env python
"""Aggregated Db is a database for face PAD experiments.
This database aggregates the data from 3 publicly available data-sets:
`REPLAYATTACK`_, `REPLAY-MOBILE`_ and `MSU MFSD`_.
...
...
@@ -20,7 +19,7 @@ from bob.pad.face.database import AggregatedDbPadDatabase
ORIGINAL_DIRECTORY
=
"[YOUR_AGGREGATED_DB_DIRECTORIES]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
ORIGINAL_EXTENSION
=
".mov"
# extension of the data files
ORIGINAL_EXTENSION
=
".mov"
# extension of the data files
database
=
AggregatedDbPadDatabase
(
protocol
=
'grandtest'
,
...
...
bob/pad/face/config/algorithm/video_cascade_svm_pad_algorithm.py
View file @
b00b936b
This diff is collapsed.
Click to expand it.
bob/pad/face/config/algorithm/video_gmm_pad_algorithm.py
View file @
b00b936b
This diff is collapsed.
Click to expand it.
bob/pad/face/config/algorithm/video_svm_pad_algorithm.py
View file @
b00b936b
...
...
@@ -2,7 +2,6 @@
from
bob.pad.face.algorithm
import
VideoSvmPadAlgorithm
#=======================================================================================
# Define instances here:
...
...
@@ -10,31 +9,40 @@ machine_type = 'C_SVC'
kernel_type
=
'RBF'
n_samples
=
10000
# trainer_grid_search_params = {'cost': [2**p for p in range(-5, 16, 2)], 'gamma': [2**p for p in range(-15, 4, 2)]}
trainer_grid_search_params
=
{
'cost'
:
[
2
**
p
for
p
in
range
(
-
3
,
14
,
2
)],
'gamma'
:
[
2
**
p
for
p
in
range
(
-
15
,
0
,
2
)]}
trainer_grid_search_params
=
{
'cost'
:
[
2
**
p
for
p
in
range
(
-
3
,
14
,
2
)],
'gamma'
:
[
2
**
p
for
p
in
range
(
-
15
,
0
,
2
)]
}
mean_std_norm_flag
=
True
frame_level_scores_flag
=
False
# one score per video(!) in this case
video_svm_pad_algorithm_10k_grid_mean_std
=
VideoSvmPadAlgorithm
(
machine_type
=
machine_type
,
kernel_type
=
kernel_type
,
n_samples
=
n_samples
,
trainer_grid_search_params
=
trainer_grid_search_params
,
mean_std_norm_flag
=
mean_std_norm_flag
,
frame_level_scores_flag
=
frame_level_scores_flag
)
frame_level_scores_flag
=
True
# one score per frame(!) in this case
video_svm_pad_algorithm_10k_grid_mean_std_frame_level
=
VideoSvmPadAlgorithm
(
machine_type
=
machine_type
,
kernel_type
=
kernel_type
,
n_samples
=
n_samples
,
trainer_grid_search_params
=
trainer_grid_search_params
,
mean_std_norm_flag
=
mean_std_norm_flag
,
frame_level_scores_flag
=
frame_level_scores_flag
)
trainer_grid_search_params
=
{
'cost'
:
[
1
],
'gamma'
:
[
0
]}
# set the default LibSVM parameters
video_svm_pad_algorithm_default_svm_param_mean_std_frame_level
=
VideoSvmPadAlgorithm
(
machine_type
=
machine_type
,
kernel_type
=
kernel_type
,
n_samples
=
n_samples
,
trainer_grid_search_params
=
trainer_grid_search_params
,
mean_std_norm_flag
=
mean_std_norm_flag
,
frame_level_scores_flag
=
frame_level_scores_flag
)
frame_level_scores_flag
=
False
# one score per video(!) in this case
video_svm_pad_algorithm_10k_grid_mean_std
=
VideoSvmPadAlgorithm
(
machine_type
=
machine_type
,
kernel_type
=
kernel_type
,
n_samples
=
n_samples
,
trainer_grid_search_params
=
trainer_grid_search_params
,
mean_std_norm_flag
=
mean_std_norm_flag
,
frame_level_scores_flag
=
frame_level_scores_flag
)
frame_level_scores_flag
=
True
# one score per frame(!) in this case
video_svm_pad_algorithm_10k_grid_mean_std_frame_level
=
VideoSvmPadAlgorithm
(
machine_type
=
machine_type
,
kernel_type
=
kernel_type
,
n_samples
=
n_samples
,
trainer_grid_search_params
=
trainer_grid_search_params
,
mean_std_norm_flag
=
mean_std_norm_flag
,
frame_level_scores_flag
=
frame_level_scores_flag
)
trainer_grid_search_params
=
{
'cost'
:
[
1
],
'gamma'
:
[
0
]
}
# set the default LibSVM parameters
video_svm_pad_algorithm_default_svm_param_mean_std_frame_level
=
VideoSvmPadAlgorithm
(
machine_type
=
machine_type
,
kernel_type
=
kernel_type
,
n_samples
=
n_samples
,
trainer_grid_search_params
=
trainer_grid_search_params
,
mean_std_norm_flag
=
mean_std_norm_flag
,
frame_level_scores_flag
=
frame_level_scores_flag
)
bob/pad/face/config/database/aggregated_db.py
View file @
b00b936b
#!/usr/bin/env python
"""Aggregated Db is a database for face PAD experiments.
This database aggregates the data from 3 publicly available data-sets:
`REPLAYATTACK`_, `REPLAY-MOBILE`_ and `MSU MFSD`_.
...
...
@@ -20,7 +19,7 @@ from bob.pad.face.database import AggregatedDbPadDatabase
original_directory
=
"[YOUR_AGGREGATED_DB_DIRECTORIES]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
original_extension
=
".mov"
# extension of the data files
original_extension
=
".mov"
# extension of the data files
database
=
AggregatedDbPadDatabase
(
protocol
=
'grandtest'
,
...
...
@@ -47,4 +46,4 @@ must be separated with a space. See the following note with an example of
.. note::
[YOUR_AGGREGATED_DB_DIRECTORIES] = <PATH_TO_REPLAY_ATTACK> <PATH_TO_REPLAY_MOBILE> <PATH_TO_MSU_MFSD>
"""
\ No newline at end of file
"""
bob/pad/face/config/database/mifs.py
View file @
b00b936b
#!/usr/bin/env python
"""`MIFS`_ is a face makeup spoofing database adapted for face PAD experiments.
Database assembled from a dataset consisting of 107 makeup-transformations taken
...
...
@@ -20,14 +19,12 @@ the link.
from
bob.pad.face.database.mifs
import
MIFSPadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
original_directory
=
"[YOUR_MIFS_DATABASE_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
original_extension
=
".jpg"
# extension of the data files
original_extension
=
".jpg"
# extension of the data files
database
=
MIFSPadDatabase
(
protocol
=
'grandtest'
,
...
...
bob/pad/face/config/database/msu_mfsd.py
View file @
b00b936b
#!/usr/bin/env python
"""`MSU MFSD`_ is a database for face PAD experiments.
Database created at MSU, for face-PAD experiments. The public version of the database contains
...
...
@@ -18,13 +17,12 @@ the link.
from
bob.pad.face.database
import
MsuMfsdPadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
original_directory
=
"[YOUR_MSU_MFSD_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
original_extension
=
"none"
# extension is not used to load the data in the HLDI of this database
original_extension
=
"none"
# extension is not used to load the data in the HLDI of this database
database
=
MsuMfsdPadDatabase
(
protocol
=
'grandtest'
,
...
...
@@ -45,4 +43,4 @@ Notice that ``original_directory`` is set to ``[YOUR_MSU_MFSD_DIRECTORY]``.
You must make sure to create ``${HOME}/.bob_bio_databases.txt`` setting this
value to the place where you actually installed the Replay-Mobile Database, as
explained in the section :ref:`bob.pad.face.baselines`.
"""
\ No newline at end of file
"""
bob/pad/face/config/database/replay_attack.py
View file @
b00b936b
#!/usr/bin/env python
"""`Replayattack`_ is a database for face PAD experiments.
The Replay-Attack Database for face spoofing consists of 1300 video clips of photo and video attack attempts to 50 clients,
...
...
@@ -14,14 +13,12 @@ the link.
from
bob.pad.face.database
import
ReplayPadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
original_directory
=
"[YOUR_REPLAY_ATTACK_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
original_extension
=
".mov"
# extension of the data files
original_extension
=
".mov"
# extension of the data files
database
=
ReplayPadDatabase
(
protocol
=
'grandtest'
,
...
...
@@ -42,4 +39,4 @@ Notice that ``original_directory`` is set to ``[YOUR_REPLAY_ATTACK_DIRECTORY]``.
You must make sure to create ``${HOME}/.bob_bio_databases.txt`` setting this
value to the place where you actually installed the Replayattack Database, as
explained in the section :ref:`bob.pad.face.baselines`.
"""
\ No newline at end of file
"""
bob/pad/face/config/database/replay_mobile.py
View file @
b00b936b
#!/usr/bin/env python
"""`Replay-Mobile`_ is a database for face PAD experiments.
The Replay-Mobile Database for face spoofing consists of 1030 video clips of photo and video attack attempts to 40 clients,
...
...
@@ -17,14 +16,12 @@ the link.
from
bob.pad.face.database
import
ReplayMobilePadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
original_directory
=
"[YOUR_REPLAY_MOBILE_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
original_extension
=
".mov"
# extension of the data files
original_extension
=
".mov"
# extension of the data files
database
=
ReplayMobilePadDatabase
(
protocol
=
'grandtest'
,
...
...
bob/pad/face/config/extractor/frame_diff_features.py
View file @
b00b936b
...
...
@@ -2,15 +2,11 @@
from
bob.pad.face.extractor
import
FrameDiffFeatures
#=======================================================================================
# Define instances here:
window_size
=
20
overlap
=
0
frame_diff_feat_extr_w20_over0
=
FrameDiffFeatures
(
window_size
=
window_size
,
overlap
=
overlap
)
window_size
=
20
overlap
=
0
frame_diff_feat_extr_w20_over0
=
FrameDiffFeatures
(
window_size
=
window_size
,
overlap
=
overlap
)
bob/pad/face/config/extractor/video_lbp_histogram.py
View file @
b00b936b
...
...
@@ -2,20 +2,20 @@
from
bob.pad.face.extractor
import
VideoLBPHistogram
#=======================================================================================
# Define instances here:
lbptype
=
'uniform'
elbptype
=
'regular'
rad
=
1
neighbors
=
8
circ
=
False
dtype
=
None
lbptype
=
'uniform'
elbptype
=
'regular'
rad
=
1
neighbors
=
8
circ
=
False
dtype
=
None
video_lbp_histogram_extractor_n8r1_uniform
=
VideoLBPHistogram
(
lbptype
=
lbptype
,
elbptype
=
elbptype
,
rad
=
rad
,
neighbors
=
neighbors
,
circ
=
circ
,
dtype
=
dtype
)
video_lbp_histogram_extractor_n8r1_uniform
=
VideoLBPHistogram
(
lbptype
=
lbptype
,
elbptype
=
elbptype
,
rad
=
rad
,
neighbors
=
neighbors
,
circ
=
circ
,
dtype
=
dtype
)
bob/pad/face/config/extractor/video_quality_measure.py
View file @
b00b936b
...
...
@@ -2,14 +2,12 @@
from
bob.pad.face.extractor
import
VideoQualityMeasure
#=======================================================================================
# Define instances here:
galbally
=
True
msu
=
True
dtype
=
None
galbally
=
True
msu
=
True
dtype
=
None
video_quality_measure_galbally_msu
=
VideoQualityMeasure
(
galbally
=
galbally
,
msu
=
msu
,
dtype
=
dtype
)
video_quality_measure_galbally_msu
=
VideoQualityMeasure
(
galbally
=
galbally
,
msu
=
msu
,
dtype
=
dtype
)
bob/pad/face/config/frame_diff_svm.py
View file @
b00b936b
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This file contains configurations to run Frame Differences and SVM based face PAD baseline.
The settings are tuned for the Replay-attack database.
The idea of the algorithms is inherited from the following paper: [AM11]_.
"""
#=======================================================================================
sub_directory
=
'frame_diff_svm'
"""
...
...
@@ -18,19 +16,19 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
this resource.
"""
#=======================================================================================
# define preprocessor:
from
..preprocessor
import
FrameDifference
NUMBER_OF_FRAMES
=
None
# process all frames
CHECK_FACE_SIZE_FLAG
=
True
# Check size of the face
MIN_FACE_SIZE
=
50
# Minimal size of the face to consider
NUMBER_OF_FRAMES
=
None
# process all frames
CHECK_FACE_SIZE_FLAG
=
True
# Check size of the face
MIN_FACE_SIZE
=
50
# Minimal size of the face to consider
preprocessor
=
FrameDifference
(
number_of_frames
=
NUMBER_OF_FRAMES
,
check_face_size_flag
=
CHECK_FACE_SIZE_FLAG
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
FrameDifference
(
number_of_frames
=
NUMBER_OF_FRAMES
,
check_face_size_flag
=
CHECK_FACE_SIZE_FLAG
,
min_face_size
=
MIN_FACE_SIZE
)
"""
In the preprocessing stage the frame differences are computed for both facial and non-facial/background
regions. In this case all frames of the input video are considered, which is defined by
...
...
@@ -39,17 +37,15 @@ are discarded. Both RGB and gray-scale videos are acceptable by the preprocessor
The preprocessing idea is introduced in [AM11]_.
"""
#=======================================================================================
# define extractor:
from
..extractor
import
FrameDiffFeatures
WINDOW_SIZE
=
20
OVERLAP
=
0
WINDOW_SIZE
=
20
OVERLAP
=
0
extractor
=
FrameDiffFeatures
(
window_size
=
WINDOW_SIZE
,
overlap
=
OVERLAP
)
extractor
=
FrameDiffFeatures
(
window_size
=
WINDOW_SIZE
,
overlap
=
OVERLAP
)
"""
In the feature extraction stage 5 features are extracted for all non-overlapping windows in
the Frame Difference input signals. Five features are computed for each of windows in the
...
...
@@ -59,7 +55,6 @@ argument.
The features are introduced in the following paper: [AM11]_.
"""
#=======================================================================================
# define algorithm:
...
...
@@ -68,16 +63,20 @@ from ..algorithm import VideoSvmPadAlgorithm
MACHINE_TYPE
=
'C_SVC'
KERNEL_TYPE
=
'RBF'
N_SAMPLES
=
10000
TRAINER_GRID_SEARCH_PARAMS
=
{
'cost'
:
[
2
**
P
for
P
in
range
(
-
3
,
14
,
2
)],
'gamma'
:
[
2
**
P
for
P
in
range
(
-
15
,
0
,
2
)]}
MEAN_STD_NORM_FLAG
=
True
# enable mean-std normalization
FRAME_LEVEL_SCORES_FLAG
=
True
# one score per frame(!) in this case
algorithm
=
VideoSvmPadAlgorithm
(
machine_type
=
MACHINE_TYPE
,
kernel_type
=
KERNEL_TYPE
,
n_samples
=
N_SAMPLES
,
trainer_grid_search_params
=
TRAINER_GRID_SEARCH_PARAMS
,
mean_std_norm_flag
=
MEAN_STD_NORM_FLAG
,
frame_level_scores_flag
=
FRAME_LEVEL_SCORES_FLAG
)
TRAINER_GRID_SEARCH_PARAMS
=
{
'cost'
:
[
2
**
P
for
P
in
range
(
-
3
,
14
,
2
)],
'gamma'
:
[
2
**
P
for
P
in
range
(
-
15
,
0
,
2
)]
}
MEAN_STD_NORM_FLAG
=
True
# enable mean-std normalization
FRAME_LEVEL_SCORES_FLAG
=
True
# one score per frame(!) in this case
algorithm
=
VideoSvmPadAlgorithm
(
machine_type
=
MACHINE_TYPE
,
kernel_type
=
KERNEL_TYPE
,
n_samples
=
N_SAMPLES
,
trainer_grid_search_params
=
TRAINER_GRID_SEARCH_PARAMS
,
mean_std_norm_flag
=
MEAN_STD_NORM_FLAG
,
frame_level_scores_flag
=
FRAME_LEVEL_SCORES_FLAG
)
"""
The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
...
...
@@ -87,8 +86,3 @@ The size of this subset is defined by ``n_samples`` parameter.
The data is also mean-std normalized, ``mean_std_norm_flag = True``.
"""
bob/pad/face/config/frame_diff_svm_aggregated_db.py
View file @
b00b936b
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This file contains configurations to run Frame Differences and SVM based face PAD baseline.
The settings of the preprocessor and extractor are tuned for the Replay-attack database.
...
...
@@ -9,7 +8,6 @@ large data sets, such as Aggregated PAD database.
The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
"""
#=======================================================================================
sub_directory
=
'frame_diff_svm'
"""
...
...
@@ -20,19 +18,19 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
this resource.
"""
#=======================================================================================
# define preprocessor:
from
..preprocessor
import
FrameDifference
NUMBER_OF_FRAMES
=
None
# process all frames
CHECK_FACE_SIZE_FLAG
=
True
# Check size of the face
MIN_FACE_SIZE
=
50
# Minimal size of the face to consider
NUMBER_OF_FRAMES
=
None
# process all frames
CHECK_FACE_SIZE_FLAG
=
True
# Check size of the face
MIN_FACE_SIZE
=
50
# Minimal size of the face to consider
preprocessor
=
FrameDifference
(
number_of_frames
=
NUMBER_OF_FRAMES
,
check_face_size_flag
=
CHECK_FACE_SIZE_FLAG
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
FrameDifference
(
number_of_frames
=
NUMBER_OF_FRAMES
,
check_face_size_flag
=
CHECK_FACE_SIZE_FLAG
,
min_face_size
=
MIN_FACE_SIZE
)
"""
In the preprocessing stage the frame differences are computed for both facial and non-facial/background
regions. In this case all frames of the input video are considered, which is defined by
...
...
@@ -41,17 +39,15 @@ are discarded. Both RGB and gray-scale videos are acceptable by the preprocessor
The preprocessing idea is introduced in [AM11]_.
"""
#=======================================================================================
# define extractor:
from
..extractor
import
FrameDiffFeatures
WINDOW_SIZE
=
20
OVERLAP
=
0
WINDOW_SIZE
=
20
OVERLAP
=
0
extractor
=
FrameDiffFeatures
(
window_size
=
WINDOW_SIZE
,
overlap
=
OVERLAP
)
extractor
=
FrameDiffFeatures
(
window_size
=
WINDOW_SIZE
,
overlap
=
OVERLAP
)
"""
In the feature extraction stage 5 features are extracted for all non-overlapping windows in
the Frame Difference input signals. Five features are computed for each of windows in the
...
...
@@ -61,7 +57,6 @@ argument.
The features are introduced in the following paper: [AM11]_.
"""
#=======================================================================================
# define algorithm:
...
...
@@ -70,22 +65,26 @@ from ..algorithm import VideoSvmPadAlgorithm
MACHINE_TYPE
=
'C_SVC'
KERNEL_TYPE
=
'RBF'
N_SAMPLES
=
10000
TRAINER_GRID_SEARCH_PARAMS
=
{
'cost'
:
[
2
**
P
for
P
in
range
(
-
3
,
14
,
2
)],
'gamma'
:
[
2
**
P
for
P
in
range
(
-
15
,
0
,
2
)]}
MEAN_STD_NORM_FLAG
=
True
# enable mean-std normalization
FRAME_LEVEL_SCORES_FLAG
=
True
# one score per frame(!) in this case
SAVE_DEBUG_DATA_FLAG
=
True
# save the data, which might be useful for debugging
REDUCED_TRAIN_DATA_FLAG
=
True
# reduce the amount of training data in the final training stage
N_TRAIN_SAMPLES
=
50000
# number of training samples per class in the final SVM training stage
algorithm
=
VideoSvmPadAlgorithm
(
machine_type
=
MACHINE_TYPE
,
kernel_type
=
KERNEL_TYPE
,
n_samples
=
N_SAMPLES
,
trainer_grid_search_params
=
TRAINER_GRID_SEARCH_PARAMS
,
mean_std_norm_flag
=
MEAN_STD_NORM_FLAG
,
frame_level_scores_flag
=
FRAME_LEVEL_SCORES_FLAG
,
save_debug_data_flag
=
SAVE_DEBUG_DATA_FLAG
,
reduced_train_data_flag
=
REDUCED_TRAIN_DATA_FLAG
,
n_train_samples
=
N_TRAIN_SAMPLES
)
TRAINER_GRID_SEARCH_PARAMS
=
{
'cost'
:
[
2
**
P
for
P
in
range
(
-
3
,
14
,
2
)],
'gamma'
:
[
2
**
P
for
P
in
range
(
-
15
,
0
,
2
)]
}
MEAN_STD_NORM_FLAG
=
True
# enable mean-std normalization
FRAME_LEVEL_SCORES_FLAG
=
True
# one score per frame(!) in this case
SAVE_DEBUG_DATA_FLAG
=
True
# save the data, which might be useful for debugging
REDUCED_TRAIN_DATA_FLAG
=
True
# reduce the amount of training data in the final training stage
N_TRAIN_SAMPLES
=
50000
# number of training samples per class in the final SVM training stage