Commit 91857aa2 authored by Pavel KORSHUNOV's avatar Pavel KORSHUNOV

Merge branch 'harmonize_algorithms' into 'master'

Harmonizing algorithms from bob.pad.face and bob.pad.voice, fixes issue#16

See merge request !28
parents d9fcf2da f1a35ae3
Pipeline #16820 passed with stages
in 11 minutes and 52 seconds
from .utils import *
from . import database
from . import algorithm
from . import tools
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
from .Algorithm import Algorithm
from .SVM import SVM
from .OneClassGMM import OneClassGMM
from .LogRegr import LogRegr
from .SVMCascadePCA import SVMCascadePCA
# to fix sphinx warnings of not able to find classes, when path is shortened
Algorithm.__module__ = "bob.pad.base.algorithm"
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module.
Fixing sphinx warnings of not being able to find classes, when path is
shortened.
Parameters
----------
*args
The objects that you want sphinx to believe that are defined here.
Resolves `Sphinx referencing issues <https//github.com/sphinx-
doc/sphinx/issues/3048>`
"""
for obj in args:
obj.__module__ = __name__
__appropriate__(
Algorithm,
SVM,
OneClassGMM,
LogRegr,
SVMCascadePCA,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
from .PadIsoMetrics import PadIsoMetrics
# to fix sphinx warnings of not able to find classes, when path is shortened
PadIsoMetrics.__module__ = "bob.pad.base.evaluation"
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module.
Fixing sphinx warnings of not being able to find classes, when path is
shortened.
Parameters
----------
*args
The objects that you want sphinx to believe that are defined here.
Resolves `Sphinx referencing issues <https//github.com/sphinx-
doc/sphinx/issues/3048>`
"""
for obj in args:
obj.__module__ = __name__
__appropriate__(
PadIsoMetrics,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -16,7 +16,7 @@ from sqlalchemy.ext.declarative import declarative_base
regenerate_database = False
dbfile = bob.io.base.test_utils.datafile("test_db.sql3", "bob.pad.base.test")
dbfile = bob.io.base.test_utils.datafile("test_db.sql3", "bob.pad.base.test", path="data")
Base = declarative_base()
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
from __future__ import print_function
import numpy as np
from bob.io.base.test_utils import datafile
from bob.io.base import load
import bob.io.image # for image loading functionality
import bob.bio.video
import bob.pad.base
from bob.pad.base.algorithm import SVM
from bob.pad.base.algorithm import OneClassGMM
import random
from bob.pad.base.utils import convert_array_to_list_of_frame_cont, convert_list_of_frame_cont_to_array, \
convert_frame_cont_to_array
def test_video_svm_pad_algorithm():
"""
Test the SVM PAD algorithm.
"""
random.seed(7)
N = 20000
mu = 1
sigma = 1
real_array = np.transpose(
np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
[random.gauss(mu, sigma) for _ in range(N)]]))
mu = 5
sigma = 1
attack_array = np.transpose(
np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
[random.gauss(mu, sigma) for _ in range(N)]]))
real = convert_array_to_list_of_frame_cont(real_array)
attack = convert_array_to_list_of_frame_cont(attack_array)
training_features = [real, attack]
MACHINE_TYPE = 'C_SVC'
KERNEL_TYPE = 'RBF'
N_SAMPLES = 1000
TRAINER_GRID_SEARCH_PARAMS = {'cost': [1], 'gamma': [0.5, 1]}
MEAN_STD_NORM_FLAG = True # enable mean-std normalization
FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
algorithm = SVM(
machine_type=MACHINE_TYPE,
kernel_type=KERNEL_TYPE,
n_samples=N_SAMPLES,
trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
mean_std_norm_flag=MEAN_STD_NORM_FLAG,
frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
machine = algorithm.train_svm(
training_features=training_features,
n_samples=algorithm.n_samples,
machine_type=algorithm.machine_type,
kernel_type=algorithm.kernel_type,
trainer_grid_search_params=algorithm.trainer_grid_search_params,
mean_std_norm_flag=algorithm.mean_std_norm_flag,
projector_file="",
save_debug_data_flag=False)
assert machine.n_support_vectors == [148, 150]
assert machine.gamma == 0.5
real_sample = convert_frame_cont_to_array(real[0])
prob = machine.predict_class_and_probabilities(real_sample)[1]
assert prob[0, 0] > prob[0, 1]
precision = algorithm.comp_prediction_precision(machine, real_array,
attack_array)
assert precision > 0.99
def test_video_gmm_pad_algorithm():
"""
Test the OneClassGMM PAD algorithm.
"""
random.seed(7)
N = 1000
mu = 1
sigma = 1
real_array = np.transpose(
np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
[random.gauss(mu, sigma) for _ in range(N)]]))
mu = 5
sigma = 1
attack_array = np.transpose(
np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
[random.gauss(mu, sigma) for _ in range(N)]]))
real = convert_array_to_list_of_frame_cont(real_array)
N_COMPONENTS = 1
RANDOM_STATE = 3
FRAME_LEVEL_SCORES_FLAG = True
algorithm = OneClassGMM(
n_components=N_COMPONENTS,
random_state=RANDOM_STATE,
frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
# training_features[0] - training features for the REAL class.
real_array_converted = convert_list_of_frame_cont_to_array(real) # output is array
assert (real_array == real_array_converted).all()
# Train the OneClassGMM machine and get normalizers:
machine, features_mean, features_std = algorithm.train_gmm(
real=real_array_converted,
n_components=algorithm.n_components,
random_state=algorithm.random_state)
algorithm.machine = machine
algorithm.features_mean = features_mean
algorithm.features_std = features_std
scores_real = algorithm.project(real_array_converted)
scores_attack = algorithm.project(attack_array)
assert (np.min(scores_real) + 7.9423798970985917) < 0.000001
assert (np.max(scores_real) + 1.8380480068281055) < 0.000001
assert (np.min(scores_attack) + 38.831260843070098) < 0.000001
assert (np.max(scores_attack) + 5.3633030621521272) < 0.000001
from .helper_functions import *
This diff is collapsed.
......@@ -27,9 +27,13 @@ requirements:
- bob.db.base
- bob.io.base
- bob.bio.base
- bob.bio.video
- bob.learn.linear
- bob.learn.libsvm
run:
- python
- setuptools
- gridtk
test:
imports:
......@@ -44,10 +48,13 @@ test:
- conda inspect objects -p $PREFIX {{ name }} # [osx]
requires:
- bob-devel {{ bob_devel }}.*
- bob.learn.linear
- nose
- gridtk
- coverage
- sphinx
- sphinx_rtd_theme
- scikit-learn
about:
home: https://www.idiap.ch/software/bob/
......
......@@ -5,15 +5,16 @@
[buildout]
parts = scripts
eggs = bob.pad.base
bob.extension
bob.db.base
bob.bio.base
gridtk
extensions = bob.buildout
mr.developer
auto-checkout = *
develop = src/bob.db.base
src/bob.bio.base
src/bob.extension
.
; options for bob.buildout
......@@ -22,6 +23,7 @@ verbose = true
newest = false
[sources]
bob.extension = git git@gitlab.idiap.ch:bob/bob.extension
bob.db.base = git git@gitlab.idiap.ch:bob/bob.db.base.git
bob.bio.base = git git@gitlab.idiap.ch:bob/bob.bio.base.git
......
......@@ -9,4 +9,5 @@ bob.pad.voice
bob.db.voicepa
bob.bio.spear
bob.bio.face
bob.pad.face
\ No newline at end of file
bob.pad.face
bob.learn.linear
\ No newline at end of file
......@@ -34,8 +34,8 @@ The implementation of (most of) the tools is separated into other packages in th
All these packages can be easily combined.
Here is a growing list of derived packages:
* `bob.pad.voice <http://pypi.python.org/pypi/bob.pad.voice>`__ Tools to run presentation attack detection experiments for speech, including several Cepstral-based features and LBP-based feature extraction, GMM-based and logistic regression based algorithms, as well as plot and score fusion scripts.
* `bob.pad.face <http://pypi.python.org/pypi/bob.pad.face>`__ Tools to run presentation attack detection experiments for face, including face-related feature extraction, GMM, SVM, and logistic regression based algorithms, as well as plotting scripts.
* `bob.pad.voice <http://pypi.python.org/pypi/bob.pad.voice>`__ Tools to run presentation attack detection experiments for speech, including several Cepstral-based features and LBP-based feature extraction, OneClassGMM-based and logistic regression based algorithms, as well as plot and score fusion scripts.
* `bob.pad.face <http://pypi.python.org/pypi/bob.pad.face>`__ Tools to run presentation attack detection experiments for face, including face-related feature extraction, OneClassGMM, SVM, and logistic regression based algorithms, as well as plotting scripts.
If you are interested, please continue reading:
......
......@@ -2,4 +2,7 @@ setuptools
bob.extension
bob.db.base
bob.io.base
bob.bio.base
\ No newline at end of file
bob.bio.base
bob.bio.video
bob.learn.libsvm
bob.learn.linear
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment