Skip to content
Snippets Groups Projects
Commit 4f0b126a authored by Olegs NIKISINS's avatar Olegs NIKISINS
Browse files

Merge branch 'rebase_branch' into 'master'

Docs on Anomaly detection based PAD using Aggregated Db + Sparse coding stuff

See merge request !15
parents 5026e4cc 42800e57
No related branches found
No related tags found
1 merge request!15Docs on Anomaly detection based PAD using Aggregated Db + Sparse coding stuff
Pipeline #
Showing with 1625 additions and 1 deletion
#!/usr/bin/env python
from bob.pad.face.extractor import VideoHistOfSparseCodes
#=======================================================================================
# Define instances here:
METHOD = "mean"
extractor_mean = VideoHistOfSparseCodes(method = METHOD)
METHOD = "hist"
extractor_hist = VideoHistOfSparseCodes(method = METHOD)
#!/usr/bin/env python
from bob.pad.face.preprocessor import VideoSparseCoding
#=======================================================================================
# Define instances here:
BLOCK_SIZE = 5
BLOCK_LENGTH = 10
MIN_FACE_SIZE = 50
NORM_FACE_SIZE = 64
DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert.hdf5"]
FRAME_STEP = 10
preprocessor = VideoSparseCoding(gblock_size = BLOCK_SIZE,
block_length = BLOCK_LENGTH,
min_face_size = MIN_FACE_SIZE,
norm_face_size = NORM_FACE_SIZE,
dictionary_file_names = DICTIONARY_FILE_NAMES,
frame_step = FRAME_STEP)
#=======================================================================================
BLOCK_SIZE = 5
BLOCK_LENGTH = 10
MIN_FACE_SIZE = 50
NORM_FACE_SIZE = 64
DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front_10_5_16.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor_10_5_16.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert_10_5_16.hdf5"]
FRAME_STEP = 2
EXTRACT_HISTOGRAMS_FLAG = True
preprocessor_10_5_16 = VideoSparseCoding(gblock_size = BLOCK_SIZE,
block_length = BLOCK_LENGTH,
min_face_size = MIN_FACE_SIZE,
norm_face_size = NORM_FACE_SIZE,
dictionary_file_names = DICTIONARY_FILE_NAMES,
frame_step = FRAME_STEP,
extract_histograms_flag = EXTRACT_HISTOGRAMS_FLAG)
BLOCK_SIZE = 5
BLOCK_LENGTH = 10
MIN_FACE_SIZE = 50
NORM_FACE_SIZE = 64
DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front_10_5_32.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor_10_5_32.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert_10_5_32.hdf5"]
FRAME_STEP = 2
EXTRACT_HISTOGRAMS_FLAG = True
preprocessor_10_5_32 = VideoSparseCoding(gblock_size = BLOCK_SIZE,
block_length = BLOCK_LENGTH,
min_face_size = MIN_FACE_SIZE,
norm_face_size = NORM_FACE_SIZE,
dictionary_file_names = DICTIONARY_FILE_NAMES,
frame_step = FRAME_STEP,
extract_histograms_flag = EXTRACT_HISTOGRAMS_FLAG)
BLOCK_SIZE = 5
BLOCK_LENGTH = 10
MIN_FACE_SIZE = 50
NORM_FACE_SIZE = 64
DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front_10_5_64.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor_10_5_64.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert_10_5_64.hdf5"]
FRAME_STEP = 2
EXTRACT_HISTOGRAMS_FLAG = True
preprocessor_10_5_64 = VideoSparseCoding(gblock_size = BLOCK_SIZE,
block_length = BLOCK_LENGTH,
min_face_size = MIN_FACE_SIZE,
norm_face_size = NORM_FACE_SIZE,
dictionary_file_names = DICTIONARY_FILE_NAMES,
frame_step = FRAME_STEP,
extract_histograms_flag = EXTRACT_HISTOGRAMS_FLAG)
BLOCK_SIZE = 5
BLOCK_LENGTH = 10
MIN_FACE_SIZE = 50
NORM_FACE_SIZE = 64
DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front_10_5_128.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor_10_5_128.hdf5",
"/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert_10_5_128.hdf5"]
FRAME_STEP = 2
EXTRACT_HISTOGRAMS_FLAG = True
preprocessor_10_5_128 = VideoSparseCoding(gblock_size = BLOCK_SIZE,
block_length = BLOCK_LENGTH,
min_face_size = MIN_FACE_SIZE,
norm_face_size = NORM_FACE_SIZE,
dictionary_file_names = DICTIONARY_FILE_NAMES,
frame_step = FRAME_STEP,
extract_histograms_flag = EXTRACT_HISTOGRAMS_FLAG)
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 27 11:33:45 2017
@author: Olegs Nikisins
"""
#==============================================================================
# Import what is needed here:
from bob.bio.base.extractor import Extractor
import numpy as np
import bob.bio.video
from bob.pad.face.extractor import ImageQualityMeasure
#==============================================================================
# Main body:
class VideoHistOfSparseCodes(Extractor, object):
"""
This class is designed to extract histograms of sparse codes.
**Parameters:**
``method`` : :py:class:`str`
A method to use in the histogram computation. Two options are available:
"mean" and "hist". Default: "mean".
"""
#==========================================================================
def __init__(self,
method = "mean"):
super(VideoHistOfSparseCodes, self).__init__(method = method)
self.method = method
# extractor to process a single image/frame:
extractor = ImageQualityMeasure()
# This extractor is used only to write and read the feature vectors.
self.video_extractor = bob.bio.video.extractor.Wrapper(extractor)
#==========================================================================
def comp_hist_of_sparse_codes(self, frames, method):
"""
Compute the histograms of sparse codes.
"""
histograms = []
for frame_data in frames:
frame = frame_data[1]
if method == "mean":
frame_codes = np.mean(frame, axis=1)
if method == "hist":
frame_codes = np.mean(frame!=0, axis=1)
for idx, row in enumerate(frame_codes):
frame_codes[idx,:] = row/np.sum(row)
hist = frame_codes.flatten()
histograms.append(hist)
return histograms
#==========================================================================
def convert_sparse_codes_to_frame_container(self, list_of_arrays):
"""
Convert an input list of arrays into Frame Container.
**Parameters:**
``list_of_arrays`` : [:py:class:`numpy.ndarray`]
A list of arrays.
**Returns:**
``frame_container`` : FrameContainer
FrameContainer containing the feature vectors.
"""
frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
for idx, item in enumerate(list_of_arrays):
frame_container.add(idx, item) # add frame to FrameContainer
return frame_container
#==========================================================================
def __call__(self, frames):
"""
Extract feature vectors.
**Parameters:**
``frames`` : FrameContainer or string.
Data stored in the FrameContainer,
see ``bob.bio.video.utils.FrameContainer`` for further details.
**Returns:**
``frame_container`` : FrameContainer
Histograms of sparse codes stored in the FrameContainer.
"""
histograms = self.comp_hist_of_sparse_codes(frames, self.method)
frame_container = self.convert_sparse_codes_to_frame_container(histograms)
return frame_container
#==========================================================================
def write_feature(self, frames, file_name):
"""
Writes the given data (that has been generated using the __call__ function of this class) to file.
This method overwrites the write_data() method of the Extractor class.
**Parameters:**
``frames`` :
Data returned by the __call__ method of the class.
``file_name`` : :py:class:`str`
Name of the file.
"""
self.video_extractor.write_feature(frames, file_name)
#==========================================================================
def read_feature(self, file_name):
"""
Reads the preprocessed data from file.
This method overwrites the read_data() method of the Extractor class.
**Parameters:**
``file_name`` : :py:class:`str`
Name of the file.
**Returns:**
``frames`` : :py:class:`bob.bio.video.FrameContainer`
Frames stored in the frame container.
"""
frames = self.video_extractor.read_feature(file_name)
return frames
......@@ -4,6 +4,7 @@ from .ImageQualityMeasure import ImageQualityMeasure
from .VideoDataLoader import VideoDataLoader
from .VideoQualityMeasure import VideoQualityMeasure
from .FrameDiffFeatures import FrameDiffFeatures
from .VideoHistOfSparseCodes import VideoHistOfSparseCodes
def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module.
......@@ -30,5 +31,6 @@ __appropriate__(
VideoQualityMeasure,
VideoDataLoader,
FrameDiffFeatures,
VideoHistOfSparseCodes,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
This diff is collapsed.
from .VideoFaceCrop import VideoFaceCrop
from .ImageFaceCrop import ImageFaceCrop
from .FrameDifference import FrameDifference
from .VideoSparseCoding import VideoSparseCoding
def __appropriate__(*args):
......@@ -25,5 +26,7 @@ __appropriate__(
VideoFaceCrop,
ImageFaceCrop,
FrameDifference,
VideoSparseCoding,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
File added
File added
File added
......@@ -22,6 +22,7 @@ Users Guide
installation
baselines
other_pad_algorithms
references
resources
api
......
.. _bob.pad.face.other_pad_algorithms:
===============================
Executing Other Algorithms
===============================
This section explains how to execute face presentation attack detection (PAD) algorithms implemented
in ``bob.pad.face``.
.. warning::
Algorithms introduced in this section might be in the process of publishing. Therefore, it is not
allowed to publish results introduced in this section without permission of the owner of the package.
If you are planning to use the results from this section, please contact the owner of the package first.
Please check the ``setup.py`` for contact information.
Running face PAD Experiments
------------------------------
To run the PAD experiments, the ``spoof.py`` script located in ``bin`` directory is used.
To see the description of the script you can type in the console:
.. code-block:: sh
$ spoof.py --help
This script is explained in more detail in :ref:`bob.pad.base.experiments`.
Usually it is a good idea to have at least verbose level 2 (i.e., calling
``spoof.py --verbose --verbose``, or the short version ``spoof.py
-vv``).
.. note:: **Running in Parallel**
To run the experiments in parallel, you can define an SGE grid or local host
(multi-processing) configurations as explained in
:ref:`running_in_parallel`.
In short, to run in the Idiap SGE grid, you can simply add the ``--grid``
command line option, with grid configuration parameters. To run experiments in parallel on
the local machine, simply add a ``--parallel <N>`` option, where ``<N>``
specifies the number of parallel jobs you want to execute.
Database setups and face PAD algorithms are encoded using
:ref:`bob.bio.base.configuration-files`, all stored inside the package root, in
the directory ``bob/pad/face/config``. Documentation for each resource
is available on the section :ref:`bob.pad.face.resources`.
.. warning::
You **cannot** run experiments just by executing the command line
instructions described in this guide. You **need first** to procure yourself
the raw data files that correspond to *each* database used here in order to
correctly run experiments with those data. Biometric data is considered
private date and, under EU regulations, cannot be distributed without a
consent or license. You may consult our
:ref:`bob.pad.face.resources.databases` resources section for checking
currently supported databases and accessing download links for the raw data
files.
Once the raw data files have been downloaded, particular attention should be
given to the directory locations of those. Unpack the databases carefully
and annotate the root directory where they have been unpacked.
Then, carefully read the *Databases* section of
:ref:`bob.pad.base.installation` on how to correctly setup the
``~/.bob_bio_databases.txt`` file.
Use the following keywords on the left side of the assignment (see
:ref:`bob.pad.face.resources.databases`):
.. code-block:: text
[YOUR_REPLAY_ATTACK_DIRECTORY] = /complete/path/to/replayattack-database/
Notice it is rather important to use the strings as described above,
otherwise ``bob.pad.base`` will not be able to correctly load your images.
Once this step is done, you can proceed with the instructions below.
------------
.. _bob.pad.face.other_pad_algorithms.aggregated_db:
Anomaly detection based PAD on Aggregated Database
--------------------------------------------------------
This section summarizes the results of *anomaly detection* based face PAD experiments on the Aggregated Database.
The description of the database-related settings, which are used to run face PAD algorithms on the Aggregated Db is given here :ref:`bob.pad.face.resources.databases.aggregated_db`. To understand the settings in more details you can check the corresponding configuration file : ``bob/pad/face/config/aggregated_db.py``.
------------
Results for *grandtest* protocol
========================================================================
This section summarizes the evaluation results on the **grandtest** protocol of the Aggregated database for the following face PAD algorithms (for more details click on the corresponding algorithm):
- :ref:`bob.pad.face.resources.face_pad.qm_one_class_gmm`,
- :ref:`bob.pad.face.resources.face_pad.qm_one_class_svm_aggregated_db`,
- :ref:`bob.pad.face.resources.face_pad.qm_lr`,
- :ref:`bob.pad.face.resources.face_pad.qm_svm_aggregated_db`.
For a more detailed understanding of above pipe-lines you can also check corresponding configuration files:
- ``bob/pad/face/config/qm_one_class_gmm.py``,
- ``bob/pad/face/config/qm_one_class_svm_aggregated_db.py``,
- ``bob/pad/face/config/qm_lr.py``,
- ``bob/pad/face/config/qm_svm_aggregated_db.py``.
To run above algorithms on the :ref:`bob.pad.face.resources.databases.aggregated_db` database, using the ``grandtest`` protocol, execute the following:
.. code-block:: sh
$ spoof.py aggregated-db qm-one-class-gmm \
--sub-directory <PATH_TO_STORE_THE_RESULTS_1>
$ spoof.py aggregated-db qm-one-class-svm-aggregated-db \
--sub-directory <PATH_TO_STORE_THE_RESULTS_2>
$ spoof.py aggregated-db qm-lr \
--sub-directory <PATH_TO_STORE_THE_RESULTS_3>
$ spoof.py aggregated-db qm-svm-aggregated-db \
--sub-directory <PATH_TO_STORE_THE_RESULTS_4>
.. tip::
If you are in `idiap`_ you can use SGE grid to speed-up the calculations.
Simply add ``--grid idiap`` argument to the above command. For example:
To evaluate the results computing EER, HTER and plotting ROC you can use the
following command:
.. code-block:: sh
./bin/evaluate.py \
--dev-files \
<PATH_TO_STORE_THE_RESULTS_1>/grandtest/scores/scores-dev \
<PATH_TO_STORE_THE_RESULTS_2>/grandtest/scores/scores-dev \
<PATH_TO_STORE_THE_RESULTS_3>/grandtest/scores/scores-dev \
<PATH_TO_STORE_THE_RESULTS_4>/grandtest/scores/scores-dev \
--eval-files \
<PATH_TO_STORE_THE_RESULTS_1>/grandtest/scores/scores-eval \
<PATH_TO_STORE_THE_RESULTS_2>/grandtest/scores/scores-eval \
<PATH_TO_STORE_THE_RESULTS_3>/grandtest/scores/scores-eval \
<PATH_TO_STORE_THE_RESULTS_4>/grandtest/scores/scores-eval \
--legends \
"IQM + one-class GMM + Aggregated Db" \
"IQM + one-class SVM + Aggregated Db" \
"IQM + two-class LR + Aggregated Db" \
"IQM + two-class SVM + Aggregated Db" \
-F 7 \
--criterion EER \
--roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf
The EER/HTER errors for the :ref:`bob.pad.face.resources.databases.aggregated_db` database are summarized in the Table below:
+------------------------+----------+----------+
| Algorithm | EER,\% | HTER,\% |
+========================+==========+==========+
| IQM + one-class GMM | 19.336 | 20.769 |
+------------------------+----------+----------+
| IQM + one-class SVM | 28.137 | 34.776 |
+------------------------+----------+----------+
| IQM + two-class LR | 10.354 | 11.856 |
+------------------------+----------+----------+
| IQM + two-class SVM | 12.710 | 15.253 |
+------------------------+----------+----------+
The ROC curves for the particular experiment can be downloaded from here:
:download:`ROC curve <img/ROC_iqm_anomaly_detection_aggr_db_grandtest.pdf>`
------------
Results for *photo-photo-video* protocol
========================================================================
This section summarizes the evaluation results on the **photo-photo-video** protocol of the Aggregated database for the following face PAD algorithms (for more details click on the corresponding algorithm):
- :ref:`bob.pad.face.resources.face_pad.qm_one_class_gmm`,
- :ref:`bob.pad.face.resources.face_pad.qm_one_class_svm_aggregated_db`,
- :ref:`bob.pad.face.resources.face_pad.qm_lr`,
- :ref:`bob.pad.face.resources.face_pad.qm_svm_aggregated_db`.
For a more detailed understanding of above pipe-lines you can also check corresponding configuration files:
- ``bob/pad/face/config/qm_one_class_gmm.py``,
- ``bob/pad/face/config/qm_one_class_svm_aggregated_db.py``,
- ``bob/pad/face/config/qm_lr.py``,
- ``bob/pad/face/config/qm_svm_aggregated_db.py``.
To run above algorithms on the :ref:`bob.pad.face.resources.databases.aggregated_db` database, using the ``photo-photo-video`` protocol, execute the following:
.. code-block:: sh
$ spoof.py aggregated-db qm-one-class-gmm \
--protocol photo-photo-video \
--sub-directory <PATH_TO_STORE_THE_RESULTS_1>
$ spoof.py aggregated-db qm-one-class-svm-aggregated-db \
--protocol photo-photo-video \
--sub-directory <PATH_TO_STORE_THE_RESULTS_2>
$ spoof.py aggregated-db qm-lr \
--protocol photo-photo-video \
--sub-directory <PATH_TO_STORE_THE_RESULTS_3>
$ spoof.py aggregated-db qm-svm-aggregated-db \
--protocol photo-photo-video \
--sub-directory <PATH_TO_STORE_THE_RESULTS_4>
.. tip::
If you are in `idiap`_ you can use SGE grid to speed-up the calculations.
Simply add ``--grid idiap`` argument to the above command. For example:
To evaluate the results computing EER, HTER and plotting ROC you can use the
following command:
.. code-block:: sh
./bin/evaluate.py \
--dev-files \
<PATH_TO_STORE_THE_RESULTS_1>/photo-photo-video/scores/scores-dev \
<PATH_TO_STORE_THE_RESULTS_2>/photo-photo-video/scores/scores-dev \
<PATH_TO_STORE_THE_RESULTS_3>/photo-photo-video/scores/scores-dev \
<PATH_TO_STORE_THE_RESULTS_4>/photo-photo-video/scores/scores-dev \
--eval-files \
<PATH_TO_STORE_THE_RESULTS_1>/photo-photo-video/scores/scores-eval \
<PATH_TO_STORE_THE_RESULTS_2>/photo-photo-video/scores/scores-eval \
<PATH_TO_STORE_THE_RESULTS_3>/photo-photo-video/scores/scores-eval \
<PATH_TO_STORE_THE_RESULTS_4>/photo-photo-video/scores/scores-eval \
--legends \
"IQM + one-class GMM + Aggregated Db" \
"IQM + one-class SVM + Aggregated Db" \
"IQM + two-class LR + Aggregated Db" \
"IQM + two-class SVM + Aggregated Db" \
-F 7 \
--criterion EER \
--roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf
The EER/HTER errors for the :ref:`bob.pad.face.resources.databases.aggregated_db` database are summarized in the Table below:
+------------------------+----------+----------+
| Algorithm | EER,\% | HTER,\% |
+========================+==========+==========+
| IQM + one-class GMM | 22.075 | 14.470 |
+------------------------+----------+----------+
| IQM + one-class SVM | 35.537 | 24.317 |
+------------------------+----------+----------+
| IQM + two-class LR | 10.184 | 30.132 |
+------------------------+----------+----------+
| IQM + two-class SVM | 10.527 | 21.926 |
+------------------------+----------+----------+
The ROC curves for the particular experiment can be downloaded from here:
:download:`ROC curve <img/ROC_iqm_anomaly_detection_aggr_db_ph_ph_vid.pdf>`
------------
Results for *video-video-photo* protocol
========================================================================
This section summarizes the evaluation results on the **video-video-photo** protocol of the Aggregated database for the following face PAD algorithms (for more details click on the corresponding algorithm):
- :ref:`bob.pad.face.resources.face_pad.qm_one_class_gmm`,
- :ref:`bob.pad.face.resources.face_pad.qm_one_class_svm_aggregated_db`,
- :ref:`bob.pad.face.resources.face_pad.qm_lr`,
- :ref:`bob.pad.face.resources.face_pad.qm_svm_aggregated_db`.
For a more detailed understanding of above pipe-lines you can also check corresponding configuration files:
- ``bob/pad/face/config/qm_one_class_gmm.py``,
- ``bob/pad/face/config/qm_one_class_svm_aggregated_db.py``,
- ``bob/pad/face/config/qm_lr.py``,
- ``bob/pad/face/config/qm_svm_aggregated_db.py``.
To run above algorithms on the :ref:`bob.pad.face.resources.databases.aggregated_db` database, using the ``video-video-photo`` protocol, execute the following:
.. code-block:: sh
$ spoof.py aggregated-db qm-one-class-gmm \
--protocol video-video-photo \
--sub-directory <PATH_TO_STORE_THE_RESULTS_1>
$ spoof.py aggregated-db qm-one-class-svm-aggregated-db \
--protocol video-video-photo \
--sub-directory <PATH_TO_STORE_THE_RESULTS_2>
$ spoof.py aggregated-db qm-lr \
--protocol video-video-photo \
--sub-directory <PATH_TO_STORE_THE_RESULTS_3>
$ spoof.py aggregated-db qm-svm-aggregated-db \
--protocol video-video-photo \
--sub-directory <PATH_TO_STORE_THE_RESULTS_4>
.. tip::
If you are in `idiap`_ you can use SGE grid to speed-up the calculations.
Simply add ``--grid idiap`` argument to the above command. For example:
To evaluate the results computing EER, HTER and plotting ROC you can use the
following command:
.. code-block:: sh
./bin/evaluate.py \
--dev-files \
<PATH_TO_STORE_THE_RESULTS_1>/video-video-photo/scores/scores-dev \
<PATH_TO_STORE_THE_RESULTS_2>/video-video-photo/scores/scores-dev \
<PATH_TO_STORE_THE_RESULTS_3>/video-video-photo/scores/scores-dev \
<PATH_TO_STORE_THE_RESULTS_4>/video-video-photo/scores/scores-dev \
--eval-files \
<PATH_TO_STORE_THE_RESULTS_1>/video-video-photo/scores/scores-eval \
<PATH_TO_STORE_THE_RESULTS_2>/video-video-photo/scores/scores-eval \
<PATH_TO_STORE_THE_RESULTS_3>/video-video-photo/scores/scores-eval \
<PATH_TO_STORE_THE_RESULTS_4>/video-video-photo/scores/scores-eval \
--legends \
"IQM + one-class GMM + Aggregated Db" \
"IQM + one-class SVM + Aggregated Db" \
"IQM + two-class LR + Aggregated Db" \
"IQM + two-class SVM + Aggregated Db" \
-F 7 \
--criterion EER \
--roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf
The EER/HTER errors for the :ref:`bob.pad.face.resources.databases.aggregated_db` database are summarized in the Table below:
+------------------------+----------+----------+
| Algorithm | EER,\% | HTER,\% |
+========================+==========+==========+
| IQM + one-class GMM | 13.503 | 29.794 |
+------------------------+----------+----------+
| IQM + one-class SVM | 18.234 | 39.502 |
+------------------------+----------+----------+
| IQM + two-class LR | 1.499 | 30.268 |
+------------------------+----------+----------+
| IQM + two-class SVM | 1.422 | 24.901 |
+------------------------+----------+----------+
The ROC curves for the particular experiment can be downloaded from here:
:download:`ROC curve <img/ROC_iqm_anomaly_detection_aggr_db_vid_vid_ph.pdf>`
------------
.. include:: links.rst
......@@ -135,3 +135,32 @@ Frame differences based features (motion analysis) + SVM for Aggregated Database
.. automodule:: bob.pad.face.config.frame_diff_svm_aggregated_db
:members:
.. _bob.pad.face.resources.face_pad.qm_lr:
Image Quality Measures as features of facial region + Logistic Regression
============================================================================================================================
.. automodule:: bob.pad.face.config.qm_lr
:members:
.. _bob.pad.face.resources.face_pad.qm_one_class_gmm:
Image Quality Measures as features of facial region + GMM-based one-class classifier (anomaly detector)
============================================================================================================================
.. automodule:: bob.pad.face.config.qm_one_class_gmm
:members:
.. _bob.pad.face.resources.face_pad.qm_one_class_svm_aggregated_db:
Image Quality Measures as features of facial region + one-class SVM classifier (anomaly detector) for Aggregated Database
============================================================================================================================
.. automodule:: bob.pad.face.config.qm_one_class_svm_aggregated_db
:members:
......@@ -103,13 +103,24 @@ setup(
# registered preprocessors:
'bob.pad.preprocessor': [
'empty-preprocessor = bob.pad.face.config.preprocessor.filename:empty_preprocessor', # no preprocessing
# The sparse coding based preprocessors
'sparse-coding-preprocessor = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor',
'sparse-coding-preprocessor-10-5-16 = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor_10_5_16',
'sparse-coding-preprocessor-10-5-32 = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor_10_5_32',
'sparse-coding-preprocessor-10-5-64 = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor_10_5_64',
'sparse-coding-preprocessor-10-5-128 = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor_10_5_128',
],
# registered preprocessors:
# registered extractors:
'bob.pad.extractor': [
'video-lbp-histogram-extractor-n8r1-uniform = bob.pad.face.config.extractor.video_lbp_histogram:video_lbp_histogram_extractor_n8r1_uniform',
'video-quality-measure-galbally-msu = bob.pad.face.config.extractor.video_quality_measure:video_quality_measure_galbally_msu',
'frame-diff-feat-extr-w20-over0 = bob.pad.face.config.extractor.frame_diff_features:frame_diff_feat_extr_w20_over0',
# extractors for sparse coding:
'hist-of-sparse-codes-mean = bob.pad.face.config.extractor.video_hist_of_sparse_codes:extractor_mean',
'hist-of-sparse-codes-hist = bob.pad.face.config.extractor.video_hist_of_sparse_codes:extractor_hist',
],
# registered algorithms:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment