Commit 3adaaac2 authored by Pavel KORSHUNOV's avatar Pavel KORSHUNOV

Revert "merge"

This reverts commit 652e5ce0
parent 652e5ce0
Pipeline #16599 failed with stages
in 3 minutes and 2 seconds
......@@ -11,4 +11,3 @@ src
develop-eggs
sphinx
dist
record.txt
# This build file uses template features from YAML so it is generic enough for
# any Bob project. Don't modify it unless you know what you're doing.
# This build file heavily uses template features from YAML so it is generic
# enough for any Bob project. Don't modify it unless you know what you're
# doing.
# Definition of global variables (all stages)
variables:
CONDA_ROOT: "${CI_PROJECT_DIR}/miniconda"
# Definition of our build pipeline order
# Definition of our build pipeline
stages:
- build
- test
- docs
- wheels
- deploy
- pypi
# Build targets
# ---------
# Templates
# ---------
# Template for the build stage
# Needs to run on all supported architectures, platforms and python versions
.build_template: &build_job
stage: build
before_script:
- git clean -ffdx
- mkdir _ci
- curl --silent "https://gitlab.idiap.ch/bob/bob.admin/raw/master/gitlab/install.sh" > _ci/install.sh
- chmod 755 _ci/install.sh
- ./_ci/install.sh _ci master #installs ci support scripts
- ./_ci/install.sh _ci #updates
- ./_ci/before_build.sh
script:
- ./_ci/build.sh
after_script:
- ./_ci/after_build.sh
cache: &build_caches
artifacts:
expire_in: 1 week
paths:
- miniconda.sh
- ${CONDA_ROOT}/pkgs/*.tar.bz2
- ${CONDA_ROOT}/pkgs/urls.txt
- _ci/
- dist/
- sphinx/
# Template for the test stage - re-installs from uploaded wheels
# Needs to run on all supported architectures, platforms and python versions
.test_template: &test_job
stage: test
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_test.sh
script:
- ./_ci/test.sh
after_script:
- ./_ci/after_test.sh
# Template for the wheel uploading stage
# Needs to run against one supported architecture, platform and python version
.wheels_template: &wheels_job
stage: wheels
environment: intranet
only:
- master
- /^v\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_wheels.sh
script:
- ./_ci/wheels.sh
after_script:
- ./_ci/after_wheels.sh
# Template for (latest) documentation upload stage
# Only one real job needs to do this
.docs_template: &docs_job
stage: docs
environment: intranet
only:
- master
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_docs.sh
script:
- ./_ci/docs.sh
after_script:
- ./_ci/after_docs.sh
# Template for the deployment stage - re-installs from uploaded wheels
# Needs to run on a single architecture only
# Will deploy your package to PyPI and other required services
# Only runs for tags
.deploy_template: &deploy_job
stage: deploy
environment: internet
only:
- /^v\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
except:
- branches
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_deploy.sh
script:
- ./_ci/deploy.sh
after_script:
- ./_ci/after_deploy.sh
# -------------
# Build Targets
# -------------
.build_linux_template: &linux_build_job
# Linux + Python 2.7: Builds, tests, uploads wheel and deploys (if needed)
build_linux_27:
<<: *build_job
variables: &linux_27_build_variables
PYTHON_VERSION: "2.7"
WHEEL_TAG: "py27"
tags:
- docker
image: continuumio/conda-concourse-ci
artifacts:
expire_in: 1 week
paths:
- _ci/
- ${CONDA_ROOT}/conda-bld/linux-64/*.tar.bz2
cache:
<<: *build_caches
key: "linux-cache"
- conda-linux
test_linux_27:
<<: *test_job
variables: *linux_27_build_variables
dependencies:
- build_linux_27
tags:
- conda-linux
wheels_linux_27:
<<: *wheels_job
variables: *linux_27_build_variables
dependencies:
- build_linux_27
tags:
- conda-linux
.build_macosx_template: &macosx_build_job
deploy_linux_27:
<<: *deploy_job
variables: *linux_27_build_variables
dependencies:
- build_linux_27
tags:
- conda-linux
# Linux + Python 3.5: Builds, tests and uploads wheel
build_linux_35:
<<: *build_job
variables: &linux_35_build_variables
PYTHON_VERSION: "3.5"
WHEEL_TAG: "py3"
tags:
- macosx
artifacts:
expire_in: 1 week
paths:
- _ci/
- ${CONDA_ROOT}/conda-bld/osx-64/*.tar.bz2
cache:
<<: *build_caches
key: "macosx-cache"
- conda-linux
test_linux_35:
<<: *test_job
variables: *linux_35_build_variables
dependencies:
- build_linux_35
tags:
- conda-linux
build_linux_27:
<<: *linux_build_job
variables:
PYTHON_VERSION: "2.7"
wheels_linux_35:
<<: *wheels_job
variables: *linux_35_build_variables
dependencies:
- build_linux_35
tags:
- conda-linux
docs_linux_35:
<<: *docs_job
variables: *linux_35_build_variables
dependencies:
- build_linux_35
tags:
- conda-linux
# Linux + Python 3.6: Builds and tests
build_linux_36:
<<: *linux_build_job
variables:
<<: *build_job
variables: &linux_36_build_variables
PYTHON_VERSION: "3.6"
BUILD_EGG: "true"
artifacts:
expire_in: 1 week
paths:
- _ci/
- dist/*.zip
- sphinx
- ${CONDA_ROOT}/conda-bld/linux-64/*.tar.bz2
WHEEL_TAG: "py3"
tags:
- conda-linux
test_linux_36:
<<: *test_job
variables: *linux_36_build_variables
dependencies:
- build_linux_36
tags:
- conda-linux
# Mac OSX + Python 2.7: Builds and tests
build_macosx_27:
<<: *macosx_build_job
variables:
<<: *build_job
variables: &macosx_27_build_variables
PYTHON_VERSION: "2.7"
WHEEL_TAG: "py27"
tags:
- conda-macosx
build_macosx_36:
<<: *macosx_build_job
variables:
PYTHON_VERSION: "3.6"
# Deploy targets
.deploy_template: &deploy_job
stage: deploy
before_script:
- ./_ci/install.sh _ci master #updates ci support scripts
script:
- ./_ci/deploy.sh
test_macosx_27:
<<: *test_job
variables: *macosx_27_build_variables
dependencies:
- build_linux_27
- build_linux_36
- build_macosx_27
- build_macosx_36
tags:
- deployer
- conda-macosx
deploy_beta:
<<: *deploy_job
environment: beta
only:
- master
# Mac OSX + Python 3.5: Builds and tests
build_macosx_35:
<<: *build_job
variables: &macosx_35_build_variables
PYTHON_VERSION: "3.5"
WHEEL_TAG: "py3"
tags:
- conda-macosx
test_macosx_35:
<<: *test_job
variables: *macosx_35_build_variables
dependencies:
- build_macosx_35
tags:
- conda-macosx
deploy_stable:
<<: *deploy_job
environment: stable
only:
- /^v\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
except:
- branches
# Mac OSX + Python 3.6: Builds and tests
build_macosx_36:
<<: *build_job
variables: &macosx_36_build_variables
PYTHON_VERSION: "3.6"
WHEEL_TAG: "py3"
tags:
- conda-macosx
pypi:
stage: pypi
environment: pypi
only:
- /^v\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
except:
- branches
before_script:
- ./_ci/install.sh _ci master #updates ci support scripts
script:
- ./_ci/pypi.sh
test_macosx_36:
<<: *test_job
variables: *macosx_36_build_variables
dependencies:
- build_linux_36
- build_macosx_36
tags:
- deployer
- conda-macosx
......@@ -11,3 +11,4 @@ def get_config():
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -19,10 +19,10 @@ import bob.io.base
from sklearn import mixture
# ==============================================================================
# Main body :
class VideoGmmPadAlgorithm(Algorithm):
"""
This class is designed to train a GMM based PAD system. The GMM is trained
......@@ -55,13 +55,12 @@ class VideoGmmPadAlgorithm(Algorithm):
random_state=3,
frame_level_scores_flag=False):
Algorithm.__init__(
self,
n_components=n_components,
random_state=random_state,
frame_level_scores_flag=frame_level_scores_flag,
performs_projection=True,
requires_projector_training=True)
Algorithm.__init__(self,
n_components=n_components,
random_state=random_state,
frame_level_scores_flag=frame_level_scores_flag,
performs_projection=True,
requires_projector_training=True)
self.n_components = n_components
......@@ -76,11 +75,8 @@ class VideoGmmPadAlgorithm(Algorithm):
self.features_std = None # this argument will be updated with features std
# names of the arguments of the pretrained GMM machine to be saved/loaded to/from HDF5 file:
self.gmm_param_keys = [
"covariance_type", "covariances_", "lower_bound_", "means_",
"n_components", "weights_", "converged_", "precisions_",
"precisions_cholesky_"
]
self.gmm_param_keys = ["covariance_type", "covariances_", "lower_bound_", "means_", "n_components", "weights_",
"converged_", "precisions_", "precisions_cholesky_"]
# ==========================================================================
def convert_frame_cont_to_array(self, frame_container):
......@@ -136,9 +132,7 @@ class VideoGmmPadAlgorithm(Algorithm):
An array containing features for all samples and frames.
"""
if isinstance(
features[0],
FrameContainer): # if FrameContainer convert to 2D numpy array
if isinstance(features[0], FrameContainer): # if FrameContainer convert to 2D numpy array
return self.convert_list_of_frame_cont_to_array(features)
else:
return np.vstack(features)
......@@ -166,8 +160,7 @@ class VideoGmmPadAlgorithm(Algorithm):
feature_vectors = []
for frame_container in frame_containers:
video_features_array = self.convert_frame_cont_to_array(
frame_container)
video_features_array = self.convert_frame_cont_to_array(frame_container)
feature_vectors.append(video_features_array)
......@@ -176,10 +169,7 @@ class VideoGmmPadAlgorithm(Algorithm):
return features_array
# ==========================================================================
def mean_std_normalize(self,
features,
features_mean=None,
features_std=None):
def mean_std_normalize(self, features, features_mean=None, features_std=None):
"""
The features in the input 2D array are mean-std normalized.
The rows are samples, the columns are features. If ``features_mean``
......@@ -260,22 +250,19 @@ class VideoGmmPadAlgorithm(Algorithm):
Standart deviation of the features.
"""
features_norm, features_mean, features_std = self.mean_std_normalize(
real)
features_norm, features_mean, features_std = self.mean_std_normalize(real)
# real is now mean-std normalized
machine = mixture.GaussianMixture(
n_components=n_components,
random_state=random_state,
covariance_type='full')
machine = mixture.GaussianMixture(n_components=n_components,
random_state=random_state,
covariance_type='full')
machine.fit(features_norm)
return machine, features_mean, features_std
# ==========================================================================
def save_gmm_machine_and_mean_std(self, projector_file, machine,
features_mean, features_std):
def save_gmm_machine_and_mean_std(self, projector_file, machine, features_mean, features_std):
"""
Saves the GMM machine, features mean and std to the hdf5 file.
The absolute name of the file is specified in ``projector_file`` string.
......@@ -297,8 +284,7 @@ class VideoGmmPadAlgorithm(Algorithm):
Standart deviation of the features.
"""
f = bob.io.base.HDF5File(projector_file,
'w') # open hdf5 file to save to
f = bob.io.base.HDF5File(projector_file, 'w') # open hdf5 file to save to
for key in self.gmm_param_keys:
data = getattr(machine, key)
......@@ -331,21 +317,18 @@ class VideoGmmPadAlgorithm(Algorithm):
"""
# training_features[0] - training features for the REAL class.
real = self.convert_and_prepare_features(
training_features[0]) # output is array
real = self.convert_and_prepare_features(training_features[0]) # output is array
# training_features[1] - training features for the ATTACK class.
# attack = self.convert_and_prepare_features(training_features[1]) # output is array
# Train the GMM machine and get normalizers:
machine, features_mean, features_std = self.train_gmm(
real=real,
n_components=self.n_components,
random_state=self.random_state)
machine, features_mean, features_std = self.train_gmm(real=real,
n_components=self.n_components,
random_state=self.random_state)
# Save the GNN machine and normalizers:
self.save_gmm_machine_and_mean_std(projector_file, machine,
features_mean, features_std)
self.save_gmm_machine_and_mean_std(projector_file, machine, features_mean, features_std)
# ==========================================================================
def load_gmm_machine_and_mean_std(self, projector_file):
......@@ -371,8 +354,7 @@ class VideoGmmPadAlgorithm(Algorithm):
Standart deviation of the features.
"""
f = bob.io.base.HDF5File(projector_file,
'r') # file to read the machine from
f = bob.io.base.HDF5File(projector_file, 'r') # file to read the machine from
# initialize the machine:
machine = mixture.GaussianMixture()
......@@ -415,8 +397,7 @@ class VideoGmmPadAlgorithm(Algorithm):
``load_cascade_of_machines`` methods of this class for more details.
"""
machine, features_mean, features_std = self.load_gmm_machine_and_mean_std(
projector_file)
machine, features_mean, features_std = self.load_gmm_machine_and_mean_std(projector_file)
self.machine = machine
......@@ -456,9 +437,7 @@ class VideoGmmPadAlgorithm(Algorithm):
"""
# 1. Convert input array to numpy array if necessary.
if isinstance(
feature,
FrameContainer): # if FrameContainer convert to 2D numpy array
if isinstance(feature, FrameContainer): # if FrameContainer convert to 2D numpy array
features_array = self.convert_frame_cont_to_array(feature)
......@@ -466,8 +445,7 @@ class VideoGmmPadAlgorithm(Algorithm):
features_array = feature
features_array_norm, _, _ = self.mean_std_normalize(
features_array, self.features_mean, self.features_std)
features_array_norm, _, _ = self.mean_std_normalize(features_array, self.features_mean, self.features_std)
scores = self.machine.score_samples(features_array_norm)
......
......@@ -3,7 +3,6 @@ from .VideoCascadeSvmPadAlgorithm import VideoCascadeSvmPadAlgorithm
from .VideoLRPadAlgorithm import VideoLRPadAlgorithm
from .VideoGmmPadAlgorithm import VideoGmmPadAlgorithm
def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module.
Fixing sphinx warnings of not being able to find classes, when path is
......
#!/usr/bin/env python
"""Aggregated Db is a database for face PAD experiments.
This database aggregates the data from 3 publicly available data-sets:
`REPLAYATTACK`_, `REPLAY-MOBILE`_ and `MSU MFSD`_.
......@@ -19,7 +20,7 @@ from bob.pad.face.database import AggregatedDbPadDatabase
ORIGINAL_DIRECTORY = "[YOUR_AGGREGATED_DB_DIRECTORIES]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
ORIGINAL_EXTENSION = ".mov" # extension of the data files
ORIGINAL_EXTENSION = ".mov" # extension of the data files
database = AggregatedDbPadDatabase(
protocol='grandtest',
......
......@@ -2,6 +2,7 @@
from bob.pad.face.algorithm import VideoSvmPadAlgorithm
#=======================================================================================
# Define instances here:
......@@ -9,40 +10,31 @@ machine_type = 'C_SVC'
kernel_type = 'RBF'
n_samples = 10000
# trainer_grid_search_params = {'cost': [2**p for p in range(-5, 16, 2)], 'gamma': [2**p for p in range(-15, 4, 2)]}
trainer_grid_search_params = {
'cost': [2**p for p in range(-3, 14, 2)],
'gamma': [2**p for p in range(-15, 0, 2)]
}
trainer_grid_search_params = {'cost': [2**p for p in range(-3, 14, 2)], 'gamma': [2**p for p in range(-15, 0, 2)]}
mean_std_norm_flag = True
frame_level_scores_flag = False # one score per video(!) in this case
video_svm_pad_algorithm_10k_grid_mean_std = VideoSvmPadAlgorithm(
machine_type=machine_type,
kernel_type=kernel_type,
n_samples=n_samples,
trainer_grid_search_params=trainer_grid_search_params,
mean_std_norm_flag=mean_std_norm_flag,
frame_level_scores_flag=frame_level_scores_flag)
frame_level_scores_flag = True # one score per frame(!) in this case
video_svm_pad_algorithm_10k_grid_mean_std_frame_level = VideoSvmPadAlgorithm(
machine_type=machine_type,
kernel_type=kernel_type,
n_samples=n_samples,
trainer_grid_search_params=trainer_grid_search_params,
mean_std_norm_flag=mean_std_norm_flag,
frame_level_scores_flag=frame_level_scores_flag)
trainer_grid_search_params = {
'cost': [1],
'gamma': [0]
} # set the default LibSVM parameters
video_svm_pad_algorithm_default_svm_param_mean_std_frame_level = VideoSvmPadAlgorithm(
machine_type=machine_type,
kernel_type=kernel_type,
n_samples=n_samples,
trainer_grid_search_params=trainer_grid_search_params,
mean_std_norm_flag=mean_std_norm_flag,
frame_level_scores_flag=frame_level_scores_flag)
frame_level_scores_flag = False # one score per video(!) in this case
video_svm_pad_algorithm_10k_grid_mean_std = VideoSvmPadAlgorithm(machine_type = machine_type,
kernel_type = kernel_type,
n_samples = n_samples,
trainer_grid_search_params = trainer_grid_search_params,
mean_std_norm_flag = mean_std_norm_flag,
frame_level_scores_flag = frame_level_scores_flag)
frame_level_scores_flag = True # one score per frame(!) in this case
video_svm_pad_algorithm_10k_grid_mean_std_frame_level = VideoSvmPadAlgorithm(machine_type = machine_type,
kernel_type = kernel_type,
n_samples = n_samples,
trainer_grid_search_params = trainer_grid_search_params,
mean_std_norm_flag = mean_std_norm_flag,
frame_level_scores_flag = frame_level_scores_flag)
trainer_grid_search_params = {'cost': [1], 'gamma': [0]} # set the default LibSVM parameters
video_svm_pad_algorithm_default_svm_param_mean_std_frame_level = VideoSvmPadAlgorithm(machine_type = machine_type,
kernel_type = kernel_type,
n_samples = n_samples,
trainer_grid_search_params = trainer_grid_search_params,
mean_std_norm_flag = mean_std_norm_flag,
frame_level_scores_flag = frame_level_scores_flag)
#!/usr/bin/env python
"""Aggregated Db is a database for face PAD experiments.
This database aggregates the data from 3 publicly available data-sets:
`REPLAYATTACK`_, `REPLAY-MOBILE`_ and `MSU MFSD`_.
......@@ -19,7 +20,7 @@ from bob.pad.face.database import AggregatedDbPadDatabase
original_directory = "[YOUR_AGGREGATED_DB_DIRECTORIES]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
original_extension = ".mov" # extension of the data files
original_extension = ".mov" # extension of the data files
database = AggregatedDbPadDatabase(
protocol='grandtest',
......@@ -46,4 +47,4 @@ must be separated with a space. See the following note with an example of
.. note::
[YOUR_AGGREGATED_DB_DIRECTORIES] = <PATH_TO_REPLAY_ATTACK> <PATH_TO_REPLAY_MOBILE> <PATH_TO_MSU_MFSD>
"""
"""
\ No newline at end of file
#!/usr/bin/env python
"""`MIFS`_ is a face makeup spoofing database adapted for face PAD experiments.
Database assembled from a dataset consisting of 107 makeup-transformations taken
......@@ -19,12 +20,14 @@ the link.
from bob.pad.face.database.mifs import MIFSPadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
original_directory = "[YOUR_MIFS_DATABASE_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
original_extension = ".jpg" # extension of the data files
original_extension = ".jpg" # extension of the data files
database = MIFSPadDatabase(
protocol='grandtest',
......
#!/usr/bin/env python
"""`MSU MFSD`_ is a database for face PAD experiments.
Database created at MSU, for face-PAD experiments. The public version of the database contains
......@@ -17,12 +18,13 @@ the link.
from bob.pad.face.database import MsuMfsdPadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
original_directory = "[YOUR_MSU_MFSD_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
original_extension = "none" # extension is not used to load the data in the HLDI of this database
original_extension = "none" # extension is not used to load the data in the HLDI of this database