Skip to content
Snippets Groups Projects
Commit 85ce8670 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Remove bob.ip.base and LBP

parent ea96d321
No related branches found
No related tags found
1 merge request!123Drop bob.db.atnt and bob.ip.base
Pipeline #60592 failed
import bob.pipelines as mario
from bob.bio.face.utils import face_crop_solver, get_default_cropped_positions
from bob.bio.video.transformer import VideoWrapper
from bob.pad.face.extractor import LBPHistogram
database = globals().get("database")
if database is not None:
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
# Preprocessor #
cropped_image_size = (64, 64)
cropped_positions = get_default_cropped_positions(
"pad", cropped_image_size, annotation_type
)
cropper = face_crop_solver(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel="gray",
fixed_positions=fixed_positions,
)
preprocessor = VideoWrapper(cropper)
preprocessor = mario.wrap(
["sample"],
preprocessor,
transform_extra_arguments=(("annotations", "annotations"),),
)
# Extractor #
extractor = VideoWrapper(
LBPHistogram(
lbp_type="uniform",
elbp_type="regular",
radius=1,
neighbors=8,
circular=False,
dtype=None,
)
)
extractor = mario.wrap(["sample"], extractor)
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
class LBPHistogram(TransformerMixin, BaseEstimator):
"""Calculates a normalized LBP histogram over an image.
These features are implemented based on [CAM12]_.
Parameters
----------
lbp_type : str
The type of the LBP operator (regular, uniform or riu2)
elbp_type : str
Which type of LBP codes should be computed; possible values: ('regular',
'transitional', 'direction-coded'). For the old 'modified' method,
specify `elbp_type` as 'regular` and `to_average` as True.
to_average : bool
Compare the neighbors to the average of the pixels instead of the central pixel?
radius : float
The radius of the circle on which the points are taken (for circular LBP)
neighbors : int
The number of points around the central point on which LBP is
computed (4, 8, 16)
circular : bool
True if circular LBP is needed, False otherwise
n_hor : int
Number of blocks horizontally for spatially-enhanced LBP/MCT
histograms. Default: 1
n_vert
Number of blocks vertically for spatially-enhanced LBP/MCT
histograms. Default: 1
Attributes
----------
dtype : numpy.dtype
If a ``dtype`` is specified in the constructor, it is assured that the
resulting features have that dtype.
lbp : bob.ip.base.LBP
The LPB extractor object.
"""
def __init__(
self,
lbp_type="uniform",
elbp_type="regular",
to_average=False,
radius=1,
neighbors=8,
circular=False,
dtype=None,
n_hor=1,
n_vert=1,
**kwargs,
):
super().__init__(**kwargs)
self.lbp_type = lbp_type
self.elbp_type = elbp_type
self.to_average = to_average
self.radius = radius
self.neighbors = neighbors
self.circular = circular
self.dtype = dtype
self.n_hor = n_hor
self.n_vert = n_vert
self.fit()
def fit(self, X=None, y=None):
self.lbp_ = LBP(
neighbors=self.neighbors,
radius=self.radius,
circular=self.circular,
to_average=self.to_average,
uniform=self.lbp_type in ("uniform", "riu2"),
rotation_invariant=self.lbp_type == "riu2",
elbp_type=self.elbp_type,
)
return self
def __getstate__(self):
d = self.__dict__.copy()
d.pop("lbp_")
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.fit()
def comp_block_histogram(self, data):
"""
Extracts LBP/MCT histograms from a gray-scale image/block.
Takes data of arbitrary dimensions and linearizes it into a 1D vector;
Then, calculates the histogram.
enforcing the data type, if desired.
Parameters
----------
data : numpy.ndarray
The preprocessed data to be transformed into one vector.
Returns
-------
1D :py:class:`numpy.ndarray`
The extracted feature vector, of the desired ``dtype`` (if
specified)
"""
assert isinstance(data, np.ndarray)
# allocating the image with lbp codes
lbpimage = np.ndarray(self.lbp_.lbp_shape(data), "uint16")
self.lbp_(data, lbpimage) # calculating the lbp image
hist = histogram(lbpimage, (0, self.lbp_.max_label - 1), self.lbp_.max_label)
hist = hist / np.sum(hist) # histogram normalization
if self.dtype is not None:
hist = hist.astype(self.dtype)
return hist
def transform_one_image(self, data):
"""
Extracts spatially-enhanced LBP/MCT histograms from a gray-scale image.
Parameters
----------
data : numpy.ndarray
The preprocessed data to be transformed into one vector.
Returns
-------
1D :py:class:`numpy.ndarray`
The extracted feature vector, of the desired ``dtype`` (if
specified)
"""
# Make sure the data can be split into equal blocks:
row_max = int(data.shape[0] / self.n_vert) * self.n_vert
col_max = int(data.shape[1] / self.n_hor) * self.n_hor
data = data[:row_max, :col_max]
blocks = [
sub_block
for block in np.hsplit(data, self.n_hor)
for sub_block in np.vsplit(block, self.n_vert)
]
hists = [self.comp_block_histogram(block) for block in blocks]
hist = np.hstack(hists)
hist = hist / len(blocks) # histogram normalization
return hist
def transform(self, images):
return [self.transform_one_image(img) for img in images]
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
from .LBPHistogram import LBPHistogram
from .ImageQualityMeasure import ImageQualityMeasure
......@@ -21,7 +20,6 @@ def __appropriate__(*args):
__appropriate__(
LBPHistogram,
ImageQualityMeasure,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -2,7 +2,7 @@ import bob.io.base
import os
from bob.pipelines import DelayedSample
from bob.pad.base.pipelines.vanilla_pad.abstract_classes import Database
from bob.db.base.utils import check_parameters_for_validity, convert_names_to_lowlevel
from bob.bio.base.database.legacy import check_parameters_for_validity, convert_names_to_lowlevel
from bob.bio.video import VideoLikeContainer
from bob.bio.base.database import AtntBioDatabase
......
......@@ -14,79 +14,9 @@ import bob.bio.video
from bob.bio.face.color import rgb_to_gray
from bob.pad.face.extractor import LBPHistogram, ImageQualityMeasure
from bob.pad.face.extractor import ImageQualityMeasure
def test_lbp_histogram():
lbp = LBPHistogram()
img = load(datafile("testimage.jpg", "bob.bio.face.test"))
img = rgb_to_gray(img)
features = lbp.transform([img])[0]
reference = load(datafile("lbp.hdf5", "bob.pad.face.test"))
assert np.allclose(features, reference)
def notest_video_lbp_histogram():
"""
Test LBPHistogram with Wrapper extractor.
"""
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = False # Gray-scale output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
image_preprocessor = FaceCropAlign(
face_size=FACE_SIZE,
rgb_output_flag=RGB_OUTPUT_FLAG,
use_face_alignment=USE_FACE_ALIGNMENT,
max_image_size=MAX_IMAGE_SIZE,
face_detection_method=FACE_DETECTION_METHOD,
min_face_size=MIN_FACE_SIZE,
)
preprocessor = Wrapper(image_preprocessor)
image = load(datafile("test_image.png", "bob.pad.face.test"))
annotations = {"topleft": (95, 155), "bottomright": (215, 265)}
video, annotations = convert_image_to_video_data(image, annotations, 20)
faces = preprocessor(frames=video, annotations=annotations)
LBPTYPE = "uniform"
ELBPTYPE = "regular"
RAD = 1
NEIGHBORS = 8
CIRC = False
DTYPE = None
extractor = bob.bio.video.extractor.Wrapper(
LBPHistogram(
lbptype=LBPTYPE,
elbptype=ELBPTYPE,
rad=RAD,
neighbors=NEIGHBORS,
circ=CIRC,
dtype=DTYPE,
)
)
lbp_histograms = extractor(faces)
assert len(lbp_histograms) == 20
assert len(lbp_histograms[0][1]) == 59
assert (lbp_histograms[0][1] == lbp_histograms[-1][1]).all()
assert (lbp_histograms[0][1][0] - 0.12695109261186263) < 0.000001
assert (lbp_histograms[0][1][-1] - 0.031737773152965658) < 0.000001
# ==============================================================================
def notest_video_quality_measure():
"""
Test ImageQualityMeasure with Wrapper extractor.
......
......@@ -24,7 +24,6 @@ requirements:
- pip {{ pip }}
- bob.extension
- bob.io.base
- bob.ip.base
- bob.ip.color
- bob.ip.qualitymeasure
- bob.bio.base
......
......@@ -89,79 +89,6 @@ more detail you can check the corresponding configuration file:
``bob/pad/face/config/replay_attack.py``.
LBP features of facial region + SVM classifier
===================================================
Detailed description of this PAD pipe-line is given at
:ref:`bob.pad.face.resources.face_pad.lbp_svm_replayattack`.
To run this baseline on the `replayattack`_ database, using the ``grandtest``
protocol, execute the following:
.. code-block:: sh
$ bob pad vanilla-pad replay-attack lbp svm-frames \
--output <PATH_TO_STORE_THE_RESULTS>
.. tip::
If you are at `idiap`_ you can use the SGE grid to speed-up the calculations.
Simply add the ``--dask-client sge`` (or ``-l sge``) argument to the above
command. For example:
.. code-block:: sh
$ bob pad vanilla-pad replay-attack lbp svm-frames \
--output <PATH_TO_STORE_THE_RESULTS> \
--dask-client sge
To understand the settings of this baseline PAD experiment you can check the
corresponding configuration file: ``bob/pad/face/config/lbp_svm.py``
To evaluate the results computing EER, HTER and plotting ROC you can use the
following command:
.. code-block:: sh
$ bob pad evaluate \
<PATH_TO_STORE_THE_RESULTS>/scores-dev.csv \
<PATH_TO_STORE_THE_RESULTS>/scores-eval.csv \
--legends "LBP features of facial region + SVM classifier + REPLAY-ATTACK database" \
--eval \
--criterion eer \
--output <PATH_TO_STORE_THE_RESULTS>/evaluation_report.pdf
The error rates for `replayattack`_ database are summarized in the table below:
============== ================= =================
.. Development Evaluation
============== ================= =================
APCER (attack) 17.4% 14.2%
APCER_AP 17.4% 14.2%
BPCER 17.4% 16.4%
ACER 17.4% 15.3%
FTA 0.0% 0.0%
FPR 17.4% (1045/6000) 14.2% (1134/7999)
FNR 17.4% (209/1200) 16.4% (262/1600)
HTER 17.4% 15.3%
FAR 17.4% 14.2%
FRR 17.4% 16.4%
PRECISION 0.5 0.5
RECALL 0.8 0.8
F1_SCORE 0.6 0.7
AUC 0.9 0.9
AUC-LOG-SCALE 2.0 2.1
============== ================= =================
The ROC curves for this particular experiment can be downloaded from here:
:download:`ROC curve <img/ROC_lbp_svm_replay_attack.pdf>`
------------
Image Quality Measures as features of facial region + SVM classifier
========================================================================
......@@ -235,70 +162,6 @@ This section summarizes the results of baseline face PAD experiments on the `Rep
The description of the database-related settings, which are used to run face PAD baselines on the Replay-Mobile is given here :ref:`bob.pad.face.resources.databases.replay_mobile`. To understand the settings in more detail you can check the corresponding configuration file : ``bob/pad/face/config/replay_mobile.py``.
LBP features of facial region + SVM classifier
========================================================================
Detailed description of this PAD pipe-line is given at :ref:`bob.pad.face.resources.face_pad.lbp_svm_replayattack`.
Note, that the same PAD pipe-line was used to run experiments on the Replay-Attack database.
To run this baseline on the `Replay-Mobile`_ database, using the ``grandtest`` protocol, execute the following:
.. code-block:: sh
$ bob pad vanilla-pad replay-mobile lbp svm_frame \
--output <PATH_TO_STORE_THE_RESULTS>
.. tip::
Similarly to the tip above you can run this baseline in parallel with the
``--dask-client`` option.
To understand the settings of this baseline PAD experiment you can check the
corresponding configuration file: ``bob/pad/face/config/lbp_svm.py``
To evaluate the results computing EER, HTER and plotting ROC you can use the
following command:
.. code-block:: sh
$ bob pad evaluate \
<PATH_TO_STORE_THE_RESULTS>/scores-dev.csv \
<PATH_TO_STORE_THE_RESULTS>/scores-eval.csv \
--legends "LBP features of facial region + SVM classifier + Replay-Mobile database" \
--eval \
--criterion eer \
--output <PATH_TO_STORE_THE_RESULTS>/evaluation_report.pdf
The EER/HTER errors for the `Replay-Mobile`_ database are summarized in the table below:
=================== ================= =================
.. Development Evaluation
=================== ================= =================
APCER (mattescreen) 15.60% 25.77%
APCER (print) 12.97% 8.44%
APCER_AP 15.60% 25.77%
BPCER 14.29% 20.03%
ACER 14.94% 22.90%
FTA 0.00% 0.00%
FPR 14.28% (728/5098) 17.02% (647/3802)
FNR 14.29% (457/3199) 20.03% (439/2192)
HTER 14.28% 18.52%
FAR 14.28% 17.02%
FRR 14.29% 20.03%
PRECISION 0.79 0.73
RECALL 0.86 0.80
F1_SCORE 0.82 0.76
AUC 0.93 0.88
AUC-LOG-SCALE 1.83 1.76
=================== ================= =================
The ROC curves for the particular experiment can be downloaded from here:
:download:`ROC curve <img/ROC_lbp_svm_replay_mobile.pdf>`
------------
Image Quality Measures as features of facial region + SVM classifier
========================================================================
......
......@@ -59,11 +59,6 @@ The configuration files contain at least the following arguments of the
* ``pipeline`` containing zero, one, or more Transformers and one Classifier
.. _bob.pad.face.resources.face_pad.lbp_svm_replayattack:
LBP features of facial region + SVM for REPLAY-ATTACK
================================================================================
.. _bob.pad.face.resources.face_pad.qm_svm_replayattack:
......
......@@ -76,8 +76,6 @@ setup(
"casiasurf = bob.pad.face.config.casiasurf",
"swan = bob.pad.face.config.swan",
"oulunpu = bob.pad.face.config.oulunpu",
# LBPs
"lbp = bob.pad.face.config.lbp_64",
# quality measure
"qm = bob.pad.face.config.qm_64",
# classifiers
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment