Commit 9fcb42b2 authored by Pavel KORSHUNOV's avatar Pavel KORSHUNOV

Moved one class GMM from bob.pad.face

parent 9f709161
Pipeline #16596 failed with stage
in 10 minutes and 23 seconds
This diff is collapsed.
from .Algorithm import Algorithm
from .SVM import SVM
from OneClassGMM import OneClassGMM
def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module.
......@@ -23,5 +23,6 @@ def __appropriate__(*args):
__appropriate__(
Algorithm,
SVM,
OneClassGMM,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -14,14 +14,17 @@ import bob.bio.video
import bob.pad.base
from bob.pad.base.algorithm import SVM
from bob.pad.base.algorithm import OneClassGMM
import random
from bob.pad.base.utils import convert_array_to_list_of_frame_cont, convert_frame_cont_to_array
from bob.pad.base.utils import convert_array_to_list_of_frame_cont, convert_list_of_frame_cont_to_array, \
convert_frame_cont_to_array
def test_video_svm_pad_algorithm():
"""
Test the VideoSvmPadAlgorithm algorithm.
Test the SVM PAD algorithm.
"""
random.seed(7)
......@@ -81,4 +84,62 @@ def test_video_svm_pad_algorithm():
precision = algorithm.comp_prediction_precision(machine, real_array,
attack_array)
assert precision > 0.99
\ No newline at end of file
assert precision > 0.99
def test_video_gmm_pad_algorithm():
"""
Test the OneClassGMM PAD algorithm.
"""
random.seed(7)
N = 1000
mu = 1
sigma = 1
real_array = np.transpose(
np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
[random.gauss(mu, sigma) for _ in range(N)]]))
mu = 5
sigma = 1
attack_array = np.transpose(
np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
[random.gauss(mu, sigma) for _ in range(N)]]))
real = convert_array_to_list_of_frame_cont(real_array)
N_COMPONENTS = 1
RANDOM_STATE = 3
FRAME_LEVEL_SCORES_FLAG = True
algorithm = OneClassGMM(
n_components=N_COMPONENTS,
random_state=RANDOM_STATE,
frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
# training_features[0] - training features for the REAL class.
real_array_converted = convert_list_of_frame_cont_to_array(real) # output is array
assert (real_array == real_array_converted).all()
# Train the OneClassGMM machine and get normalizers:
machine, features_mean, features_std = algorithm.train_gmm(
real=real_array_converted,
n_components=algorithm.n_components,
random_state=algorithm.random_state)
algorithm.machine = machine
algorithm.features_mean = features_mean
algorithm.features_std = features_std
scores_real = algorithm.project(real_array_converted)
scores_attack = algorithm.project(attack_array)
assert (np.min(scores_real) + 7.9423798970985917) < 0.000001
assert (np.max(scores_real) + 1.8380480068281055) < 0.000001
assert (np.min(scores_attack) + 38.831260843070098) < 0.000001
assert (np.max(scores_attack) + 5.3633030621521272) < 0.000001
......@@ -225,3 +225,57 @@ def convert_array_to_list_of_frame_cont(data):
frame_container) # add current frame to FrameContainer
return frame_container_list
def mean_std_normalize(features,
features_mean=None,
features_std=None):
"""
The features in the input 2D array are mean-std normalized.
The rows are samples, the columns are features. If ``features_mean``
and ``features_std`` are provided, then these vectors will be used for
normalization. Otherwise, the mean and std of the features is
computed on the fly.
**Parameters:**
``features`` : 2D :py:class:`numpy.ndarray`
Array of features to be normalized.
``features_mean`` : 1D :py:class:`numpy.ndarray`
Mean of the features. Default: None.
``features_std`` : 2D :py:class:`numpy.ndarray`
Standart deviation of the features. Default: None.
**Returns:**
``features_norm`` : 2D :py:class:`numpy.ndarray`
Normalized array of features.
``features_mean`` : 1D :py:class:`numpy.ndarray`
Mean of the features.
``features_std`` : 1D :py:class:`numpy.ndarray`
Standart deviation of the features.
"""
features = np.copy(features)
# Compute mean and std if not given:
if features_mean is None:
features_mean = np.mean(features, axis=0)
features_std = np.std(features, axis=0)
row_norm_list = []
for row in features: # row is a sample
row_norm = (row - features_mean) / features_std
row_norm_list.append(row_norm)
features_norm = np.vstack(row_norm_list)
return features_norm, features_mean, features_std
\ No newline at end of file
......@@ -34,8 +34,8 @@ The implementation of (most of) the tools is separated into other packages in th
All these packages can be easily combined.
Here is a growing list of derived packages:
* `bob.pad.voice <http://pypi.python.org/pypi/bob.pad.voice>`__ Tools to run presentation attack detection experiments for speech, including several Cepstral-based features and LBP-based feature extraction, GMM-based and logistic regression based algorithms, as well as plot and score fusion scripts.
* `bob.pad.face <http://pypi.python.org/pypi/bob.pad.face>`__ Tools to run presentation attack detection experiments for face, including face-related feature extraction, GMM, SVM, and logistic regression based algorithms, as well as plotting scripts.
* `bob.pad.voice <http://pypi.python.org/pypi/bob.pad.voice>`__ Tools to run presentation attack detection experiments for speech, including several Cepstral-based features and LBP-based feature extraction, OneClassGMM-based and logistic regression based algorithms, as well as plot and score fusion scripts.
* `bob.pad.face <http://pypi.python.org/pypi/bob.pad.face>`__ Tools to run presentation attack detection experiments for face, including face-related feature extraction, OneClassGMM, SVM, and logistic regression based algorithms, as well as plotting scripts.
If you are interested, please continue reading:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment