Commit 69a14c63 authored by Pavel KORSHUNOV's avatar Pavel KORSHUNOV

moved SVM algorithm from bob.pad.face

parent d9fcf2da
Pipeline #16585 canceled with stage
in 2 minutes and 39 seconds
This diff is collapsed.
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
# @author: Pavel Korshunov <pavel.korshunov@idiap.ch>
# @date: Tue May 17 12:09:22 CET 2016
#
import numpy as np
from bob.io.base.test_utils import datafile
from bob.io.base import load
import bob.io.image # for image loading functionality
import bob.bio.video
from bob.pad.base.algorithm import SVM
import random
from bob.pad.base.utils import convert_array_to_list_of_frame_cont, convert_frame_cont_to_array
def test_video_svm_pad_algorithm():
"""
Test the VideoSvmPadAlgorithm algorithm.
"""
random.seed(7)
N = 20000
mu = 1
sigma = 1
real_array = np.transpose(
np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
[random.gauss(mu, sigma) for _ in range(N)]]))
mu = 5
sigma = 1
attack_array = np.transpose(
np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
[random.gauss(mu, sigma) for _ in range(N)]]))
real = convert_array_to_list_of_frame_cont(real_array)
attack = convert_array_to_list_of_frame_cont(attack_array)
training_features = [real, attack]
MACHINE_TYPE = 'C_SVC'
KERNEL_TYPE = 'RBF'
N_SAMPLES = 1000
TRAINER_GRID_SEARCH_PARAMS = {'cost': [1], 'gamma': [0.5, 1]}
MEAN_STD_NORM_FLAG = True # enable mean-std normalization
FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
algorithm = SVM(
machine_type=MACHINE_TYPE,
kernel_type=KERNEL_TYPE,
n_samples=N_SAMPLES,
trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
mean_std_norm_flag=MEAN_STD_NORM_FLAG,
frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
machine = algorithm.train_svm(
training_features=training_features,
n_samples=algorithm.n_samples,
machine_type=algorithm.machine_type,
kernel_type=algorithm.kernel_type,
trainer_grid_search_params=algorithm.trainer_grid_search_params,
mean_std_norm_flag=algorithm.mean_std_norm_flag,
projector_file="",
save_debug_data_flag=False)
assert machine.n_support_vectors == [148, 150]
assert machine.gamma == 0.5
real_sample = convert_frame_cont_to_array(real[0])
prob = machine.predict_class_and_probabilities(real_sample)[1]
assert prob[0, 0] > prob[0, 1]
precision = algorithm.comp_prediction_precision(machine, real_array,
attack_array)
assert precision > 0.99
\ No newline at end of file
from .helper_functions import *
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import numpy as np
def convert_frame_cont_to_array(frame_container):
"""
This function converts a single Frame Container into an array of features.
The rows are samples, the columns are features.
**Parameters:**
``frame_container`` : object
A Frame Container conteining the features of an individual,
see ``bob.bio.video.utils.FrameContainer``.
**Returns:**
``features_array`` : 2D :py:class:`numpy.ndarray`
An array containing features for all frames.
The rows are samples, the columns are features.
"""
feature_vectors = []
frame_dictionary = {}
for frame in frame_container:
frame_dictionary[frame[0]] = frame[1]
for idx, _ in enumerate(frame_container):
# Frames are stored in a mixed order, therefore we get them using incrementing frame index:
feature_vectors.append(frame_dictionary[str(idx)])
features_array = np.vstack(feature_vectors)
return features_array
def convert_and_prepare_features(features):
"""
This function converts a list or a frame container of features into a 2D array of features.
If the input is a list of frame containers, features from different frame containers (individuals)
are concatenated into the same list. This list is then converted to an array. The rows are samples,
the columns are features.
**Parameters:**
``features`` : [2D :py:class:`numpy.ndarray`] or [FrameContainer]
A list or 2D feature arrays or a list of Frame Containers, see ``bob.bio.video.utils.FrameContainer``.
Each frame Container contains feature vectors for the particular individual/person.
**Returns:**
``features_array`` : 2D :py:class:`numpy.ndarray`
An array containing features for all samples and frames.
"""
if isinstance(
features[0],
FrameContainer): # if FrameContainer convert to 2D numpy array
return self.convert_list_of_frame_cont_to_array(features)
else:
return np.vstack(features)
def convert_list_of_frame_cont_to_array(frame_containers):
"""
This function converts a list of Frame containers into an array of features.
Features from different frame containers (individuals) are concatenated into the
same list. This list is then converted to an array. The rows are samples,
the columns are features.
**Parameters:**
``frame_containers`` : [FrameContainer]
A list of Frame Containers, , see ``bob.bio.video.utils.FrameContainer``.
Each frame Container contains feature vectors for the particular individual/person.
**Returns:**
``features_array`` : 2D :py:class:`numpy.ndarray`
An array containing features for all frames of all individuals.
"""
feature_vectors = []
for frame_container in frame_containers:
video_features_array = self.convert_frame_cont_to_array(
frame_container)
feature_vectors.append(video_features_array)
features_array = np.vstack(feature_vectors)
return features_array
def combinations(input_dict):
"""
Obtain all possible key-value combinations in the input dictionary
containing list values.
**Parameters:**
``input_dict`` : :py:class:`dict`
Input dictionary with list values.
**Returns:**
``combinations`` : [:py:class:`dict`]
A list of dictionaries containing the combinations.
"""
varNames = sorted(input_dict)
combinations = [
dict(zip(varNames, prod))
for prod in it.product(*(input_dict[varName]
for varName in varNames))
]
return combinations
def select_uniform_data_subset(features, n_samples):
"""
Uniformly select N samples/feature vectors from the input array of samples.
The rows in the input array are samples. The columns are features.
**Parameters:**
``features`` : 2D :py:class:`numpy.ndarray`
Input array with feature vectors. The rows are samples, columns are features.
``n_samples`` : :py:class:`int`
The number of samples to be selected uniformly from the input array of features.
**Returns:**
``features_subset`` : 2D :py:class:`numpy.ndarray`
Selected subset of features.
"""
if features.shape[0] <= n_samples:
features_subset = features
else:
uniform_step = np.int(features.shape[0] / n_samples)
features_subset = features[0:np.int(uniform_step * n_samples):
uniform_step, :]
return features_subset
def select_quasi_uniform_data_subset(features, n_samples):
"""
Select quasi uniformly N samples/feature vectors from the input array of samples.
The rows in the input array are samples. The columns are features.
Use this function if n_samples is close to the number of samples.
**Parameters:**
``features`` : 2D :py:class:`numpy.ndarray`
Input array with feature vectors. The rows are samples, columns are features.
``n_samples`` : :py:class:`int`
The number of samples to be selected uniformly from the input array of features.
**Returns:**
``features_subset`` : 2D :py:class:`numpy.ndarray`
Selected subset of features.
"""
if features.shape[0] <= n_samples:
features_subset = features
else:
uniform_step = (1.0 * features.shape[0]) / n_samples
element_num_list = range(0, n_samples)
idx = [np.int(uniform_step * item) for item in element_num_list]
features_subset = features[idx, :]
return features_subset
def convert_array_to_list_of_frame_cont(data):
"""
Convert an input 2D array to a list of FrameContainers.
**Parameters:**
``data`` : 2D :py:class:`numpy.ndarray`
Input data array of the dimensionality (N_samples X N_features ).
**Returns:**
``frame_container_list`` : [FrameContainer]
A list of FrameContainers, see ``bob.bio.video.utils.FrameContainer``
for further details. Each frame container contains one feature vector.
"""
frame_container_list = []
for idx, vec in enumerate(data):
frame_container = bob.bio.video.FrameContainer(
) # initialize the FrameContainer
frame_container.add(0, vec)
frame_container_list.append(
frame_container) # add current frame to FrameContainer
return frame_container_list
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment