Commit af778bc0 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Merge branch 'small-fixes' into 'master'

Small fixes

See merge request !69
parents e19c925c 1e1185c5
Pipeline #36786 passed with stages
in 9 minutes and 38 seconds
......@@ -54,6 +54,7 @@ class OneClassGMM(Algorithm):
frame_level_scores_flag=False,
covariance_type='full',
reg_covar=1e-06,
normalize_features=False,
):
Algorithm.__init__(
......@@ -69,6 +70,7 @@ class OneClassGMM(Algorithm):
self.frame_level_scores_flag = frame_level_scores_flag
self.covariance_type = covariance_type
self.reg_covar = reg_covar
self.normalize_features = normalize_features
self.machine = None # this argument will be updated with pretrained OneClassGMM machine
self.features_mean = None # this argument will be updated with features mean
......@@ -105,7 +107,12 @@ class OneClassGMM(Algorithm):
"""
# real is now mean-std normalized
features_norm, features_mean, features_std = mean_std_normalize(real, copy=False)
if self.normalize_features:
features_norm, features_mean, features_std = mean_std_normalize(real, copy=False)
else:
features_norm = real
features_mean = np.zeros(real.shape[1:], dtype=real.dtype)
features_std = np.ones(real.shape[1:], dtype=real.dtype)
if isinstance(self.n_components, (tuple, list)) or isinstance(self.covariance_type, (tuple, list)):
# perform grid search on covariance_type and n_components
......@@ -119,7 +126,8 @@ class OneClassGMM(Algorithm):
logger.info("Testing for n_components: %s, covariance_type: %s", nc, cv_type)
gmm = mixture.GaussianMixture(
n_components=nc, covariance_type=cv_type,
reg_covar=self.reg_covar)
reg_covar=self.reg_covar,
verbose=logger.level)
try:
gmm.fit(features_norm)
except Exception:
......@@ -136,7 +144,8 @@ class OneClassGMM(Algorithm):
n_components=self.n_components,
random_state=self.random_state,
covariance_type=self.covariance_type,
reg_covar=self.reg_covar)
reg_covar=self.reg_covar,
verbose=logger.level)
machine.fit(features_norm)
......@@ -211,6 +220,7 @@ class OneClassGMM(Algorithm):
# Save the GNN machine and normalizers:
self.save_gmm_machine_and_mean_std(projector_file, machine,
features_mean, features_std)
logger.info("Finished training the GMM.")
# ==========================================================================
def load_gmm_machine_and_mean_std(self, projector_file):
......@@ -319,16 +329,17 @@ class OneClassGMM(Algorithm):
feature,
FrameContainer): # if FrameContainer convert to 2D numpy array
features_array = convert_frame_cont_to_array(feature)
features = convert_frame_cont_to_array(feature)
else:
features_array = feature
features = feature
features_array_norm, _, _ = mean_std_normalize(
features_array, self.features_mean, self.features_std, copy=False)
features_norm, _, _ = mean_std_normalize(
features, self.features_mean, self.features_std, copy=False)
del features
scores = self.machine.score_samples(features_array_norm)
scores = self.machine.score_samples(features_norm)
return scores
......
......@@ -8,6 +8,7 @@ import logging
import numpy as np
from collections.abc import Iterable
from multiprocessing import cpu_count
from sklearn.externals import joblib
logger = logging.getLogger(__name__)
......@@ -48,6 +49,7 @@ class OneClassGMM2(Algorithm):
update_means=True,
update_variances=True,
n_threads=cpu_count(),
preprocessor=None, # a scikit learn preprocessor, can be PCA for example
**kwargs
):
kwargs.setdefault("performs_projection", True)
......@@ -65,12 +67,17 @@ class OneClassGMM2(Algorithm):
n_threads=n_threads,
)
self.number_of_gaussians = number_of_gaussians
self.preprocessor = preprocessor
def train_projector(self, training_features, projector_file):
del training_features[1]
real = convert_and_prepare_features(training_features[0], dtype="float64")
del training_features[0]
if self.preprocessor is not None:
real = self.preprocessor.fit_transform(real)
joblib.dump(self.preprocessor, projector_file + ".pkl")
if isinstance(self.number_of_gaussians, Iterable):
logger.info(
"Performing grid search for GMM on number_of_gaussians: %s",
......@@ -101,10 +108,15 @@ class OneClassGMM2(Algorithm):
def load_projector(self, projector_file):
self.gmm_alg.load_ubm(projector_file)
if self.preprocessor is not None:
self.preprocessor = joblib.load(projector_file + ".pkl")
def project(self, feature):
feature = convert_and_prepare_features([feature], dtype="float64")[0]
if self.preprocessor is not None:
feature = self.preprocessor.transform(feature)
return self.gmm_alg.ubm(feature)
def score(self, toscore):
......
......@@ -29,6 +29,8 @@ class VideoPredictions(Algorithm):
def score(self, predictions):
# Assuming the predictions are the output of a softmax layer
if len(predictions) == 0:
return [float("nan")]
predictions = predictions.as_array()[:, self.axis]
if self.frame_level_scoring:
......
......@@ -30,3 +30,6 @@ class PadFile(bob.bio.base.database.BioFile):
# just copy the information
# The attack type of the sample, None if it is a genuine sample.
self.attack_type = attack_type
def __repr__(self):
return f"<File({self.id}: {self.path}, {self.client_id}, {self.attack_type})>"
......@@ -3,6 +3,7 @@
import numpy as np
import bob.bio.video
from bob.io.base import vstack_features
import itertools
......@@ -78,18 +79,12 @@ def convert_list_of_frame_cont_to_array(frame_containers):
An array containing features for all frames of all individuals.
"""
def reader(x):
if isinstance(x, bob.bio.video.FrameContainer):
return x.as_array()
return x
if isinstance( frame_containers[0], bob.bio.video.FrameContainer):
feature_vectors = []
for frame_container in frame_containers:
video_features_array = convert_frame_cont_to_array(
frame_container)
feature_vectors.append(video_features_array)
else:
feature_vectors = frame_containers
features_array = np.vstack(feature_vectors)
features_array = vstack_features(reader, frame_containers)
return features_array
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment