From 784f98e3d86a84fb8251c1fc02f8215d4e4fb9e7 Mon Sep 17 00:00:00 2001 From: Yannick DAYER <yannick.dayer@idiap.ch> Date: Thu, 17 Feb 2022 17:56:53 +0100 Subject: [PATCH] Set tests for the GMM BioAlgorithm --- bob/bio/gmm/algorithm/GMM.py | 175 ++--- bob/bio/gmm/test/__init__.py | 1 - .../{gmm_model.hdf5 => gmm_enrolled.hdf5} | Bin 12920 -> 12920 bytes bob/bio/gmm/test/data/gmm_projected.hdf5 | Bin 10608 -> 10608 bytes .../data/{gmm_projector.hdf5 => gmm_ubm.hdf5} | Bin 12920 -> 12920 bytes bob/bio/gmm/test/dummy/__init__.py | 1 - bob/bio/gmm/test/dummy/extractor.py | 30 - bob/bio/gmm/test/test_algorithms.py | 719 ------------------ bob/bio/gmm/test/test_gmm.py | 159 ++++ 9 files changed, 243 insertions(+), 842 deletions(-) rename bob/bio/gmm/test/data/{gmm_model.hdf5 => gmm_enrolled.hdf5} (69%) rename bob/bio/gmm/test/data/{gmm_projector.hdf5 => gmm_ubm.hdf5} (68%) delete mode 100644 bob/bio/gmm/test/dummy/__init__.py delete mode 100644 bob/bio/gmm/test/dummy/extractor.py delete mode 100644 bob/bio/gmm/test/test_algorithms.py create mode 100644 bob/bio/gmm/test/test_gmm.py diff --git a/bob/bio/gmm/algorithm/GMM.py b/bob/bio/gmm/algorithm/GMM.py index 7309c11..4b7310c 100644 --- a/bob/bio/gmm/algorithm/GMM.py +++ b/bob/bio/gmm/algorithm/GMM.py @@ -10,18 +10,16 @@ This adds the notions of models, probes, enrollment, and scores to GMM. """ +import copy import logging from typing import Callable -import os - +import dask import dask.array as da import numpy as np -import copy -import dask -from h5py import File as HDF5File +from h5py import File as HDF5File from sklearn.base import BaseEstimator from bob.bio.base.pipelines.vanilla_biometrics.abstract_classes import BioAlgorithm @@ -56,17 +54,20 @@ class GMM(BioAlgorithm, BaseEstimator): ubm_training_iterations: int = 25, # Maximum number of iterations for GMM Training training_threshold: float = 5e-4, # Threshold to end the ML training variance_threshold: float = 5e-4, # Minimum value that a variance can reach - update_weights: bool = True, update_means: bool = True, update_variances: bool = True, + update_weights: bool = True, # parameters of the GMM enrollment - relevance_factor: float = 4, # Relevance factor as described in Reynolds paper gmm_enroll_iterations: int = 1, # Number of iterations for the enrollment phase + enroll_update_means: bool = True, + enroll_update_variances: bool = False, + enroll_update_weights: bool = False, + relevance_factor: float = 4, # Relevance factor as described in Reynolds paper responsibility_threshold: float = 0, # If set, the weight of a particular Gaussian will at least be greater than this threshold. In the case the real weight is lower, the prior mean value will be used to estimate the current mean and variance. - init_seed: int = 5489, # scoring scoring_function: Callable = linear_scoring, - # n_threads=None, + # RNG + init_seed: int = 5489, ): """Initializes the local UBM-GMM tool chain. @@ -88,10 +89,16 @@ class GMM(BioAlgorithm, BaseEstimator): Decides wether the means of the Gaussians are updated while training. update_variances Decides wether the variancess of the Gaussians are updated while training. - relevance_factor - Relevance factor as described in Reynolds paper. gmm_enroll_iterations Number of iterations for the MAP GMM used for enrollment. + enroll_update_weights + Decides wether the weights of the Gaussians are updated while enrolling. + enroll_update_means + Decides wether the means of the Gaussians are updated while enrolling. + enroll_update_variances + Decides wether the variancess of the Gaussians are updated while enrolling. + relevance_factor + Relevance factor as described in Reynolds paper. responsibility_threshold If set, the weight of a particular Gaussian will at least be greater than this threshold. In the case where the real weight is lower, the prior mean @@ -102,9 +109,6 @@ class GMM(BioAlgorithm, BaseEstimator): Function returning a score from a model, a UBM, and a probe. """ - # call base class constructor and register that this tool performs projection - # super().__init__(score_reduction_operation=??) - # copy parameters self.number_of_gaussians = number_of_gaussians self.kmeans_training_iterations = kmeans_training_iterations @@ -116,14 +120,14 @@ class GMM(BioAlgorithm, BaseEstimator): self.update_variances = update_variances self.relevance_factor = relevance_factor self.gmm_enroll_iterations = gmm_enroll_iterations + self.enroll_update_means = enroll_update_means + self.enroll_update_weights = enroll_update_weights + self.enroll_update_variances = enroll_update_variances self.init_seed = init_seed - self.rng = self.init_seed # TODO verify if rng object needed + self.rng = self.init_seed self.responsibility_threshold = responsibility_threshold - def scoring_function_wrapped(*args, **kwargs): - return scoring_function(*args, **kwargs) - - self.scoring_function = scoring_function_wrapped + self.scoring_function = scoring_function self.ubm = None @@ -143,29 +147,26 @@ class GMM(BioAlgorithm, BaseEstimator): % (self.ubm.shape[1], feature.shape[1]) ) - def save_ubm(self, ubm_file): - """Saves the projector to file""" + def save_model(self, ubm_file): + """Saves the projector to file.""" # Saves the UBM to file logger.debug("Saving model to file '%s'", ubm_file) - hdf5 = ( - ubm_file - if isinstance(ubm_file, HDF5File) - else HDF5File(ubm_file, "w") - ) + hdf5 = ubm_file if isinstance(ubm_file, HDF5File) else HDF5File(ubm_file, "w") self.ubm.save(hdf5) - def load_ubm(self, ubm_file): + def load_model(self, ubm_file): + """Loads the projector from a file.""" hdf5file = HDF5File(ubm_file, "r") logger.debug("Loading model from file '%s'", ubm_file) - # read UBM + # Read UBM self.ubm = GMMMachine.from_hdf5(hdf5file) self.ubm.variance_thresholds = self.variance_threshold def project(self, array): """Computes GMM statistics against a UBM, given a 2D array of feature vectors""" self._check_feature(array) - logger.warning(" .... Projecting %d feature vectors", array.shape[0]) + logger.debug("Projecting %d feature vectors", array.shape[0]) # Accumulates statistics gmm_stats = self.ubm.transform(array) gmm_stats.compute() @@ -182,7 +183,11 @@ class GMM(BioAlgorithm, BaseEstimator): return feature.save(feature_file) def enroll(self, data): - """Enrolls a GMM using MAP adaptation, given a list of 2D np.ndarray's of feature vectors""" + """Enrolls a GMM using MAP adaptation given a reference's feature vectors + + Returns a GMMMachine tweaked from the UBM with MAP + """ + [self._check_feature(feature) for feature in data] array = da.vstack(data) # Use the array to train a GMM and return it @@ -197,23 +202,24 @@ class GMM(BioAlgorithm, BaseEstimator): convergence_threshold=self.training_threshold, max_fitting_steps=self.gmm_enroll_iterations, random_state=self.rng, - update_means=True, - update_variances=True, # TODO default? - update_weights=True, # TODO default? + update_means=self.enroll_update_means, + update_variances=self.enroll_update_variances, + update_weights=self.enroll_update_weights, ) gmm.variance_thresholds = self.variance_threshold gmm.fit(array) return gmm def read_biometric_reference(self, model_file): - """Reads the model, which is a GMM machine""" + """Reads an enrolled reference model, which is a MAP GMMMachine""" return GMMMachine.from_hdf5(HDF5File(model_file, "r"), ubm=self.ubm) - def write_biometric_reference(self, model, model_file): - """Write the features (GMM_Stats)""" + @classmethod + def write_biometric_reference(cls, model: GMMMachine, model_file): + """Write the enrolled reference (MAP GMMMachine)""" return model.save(model_file) - def score(self, biometric_reference: GMMMachine, data: GMMStats): + def score(self, biometric_reference: GMMMachine, probe): """Computes the score for the given model and the given probe. Uses the scoring function passed during initialization. @@ -222,22 +228,24 @@ class GMM(BioAlgorithm, BaseEstimator): ---------- biometric_reference: The model to score against. - data: + probe: The probe data to compare to the model. """ - logger.debug(f"scoring {biometric_reference}, {data}") - assert isinstance(biometric_reference, GMMMachine) - stats = self.project(data) + logger.debug(f"scoring {biometric_reference}, {probe}") + if not isinstance(probe, GMMStats): + probe = self.project( + probe + ) # Projection is done here instead of transform (or it would be applied to enrollment data too...) return self.scoring_function( models_means=[biometric_reference], ubm=self.ubm, - test_stats=stats, + test_stats=probe, frame_length_normalization=True, )[0, 0] def score_multiple_biometric_references( - self, biometric_references: "list[GMMMachine]", data: GMMStats + self, biometric_references: "list[GMMMachine]", probe: GMMStats ): """Computes the score between multiple models and one probe. @@ -247,15 +255,15 @@ class GMM(BioAlgorithm, BaseEstimator): ---------- biometric_references: The models to score against. - data: + probe: The probe data to compare to the models. """ - logger.debug(f"scoring {biometric_references}, {data}") + logger.debug(f"scoring {biometric_references}, {probe}") assert isinstance(biometric_references[0], GMMMachine), type( biometric_references[0] ) - stats = self.project(data) + stats = self.project(probe) if not isinstance(probe, GMMStats) else probe return self.scoring_function( models_means=biometric_references, ubm=self.ubm, @@ -267,15 +275,15 @@ class GMM(BioAlgorithm, BaseEstimator): """This function computes the score between the given model and several given probe files.""" logger.debug(f"scoring {model}, {probes}") assert isinstance(model, GMMMachine) - stats = [] - for probe in probes: - stats.append(self.project(probe)) - # logger.warn("Please verify that this function is correct") + stats = [ + self.project(probe) if not isinstance(probe, GMMStats) else probe + for probe in probes + ] return ( self.scoring_function( models_means=model.means, ubm=self.ubm, - test_stats=probes, + test_stats=stats, frame_length_normalization=True, ) .mean() @@ -284,50 +292,35 @@ class GMM(BioAlgorithm, BaseEstimator): def fit(self, X, y=None, **kwargs): """Trains the UBM.""" - ubm_filename = "UBM_mobio_001.hdf5" # Manually set "projector" file TODO remove - if not os.path.exists(ubm_filename): - - # Stack all the samples in a 2D array of features - array = np.vstack(X).persist() - - logger.debug("UBM with %d feature vectors", array.shape[0]) - - logger.debug(f"Creating UBM machine with {self.number_of_gaussians} gaussians") - - self.ubm = GMMMachine( - n_gaussians=self.number_of_gaussians, - trainer="ml", - max_fitting_steps=self.ubm_training_iterations, + # Stack all the samples in a 2D array of features + array = da.vstack(X).persist() + + logger.debug("UBM with %d feature vectors", array.shape[0]) + + logger.debug(f"Creating UBM machine with {self.number_of_gaussians} gaussians") + + self.ubm = GMMMachine( + n_gaussians=self.number_of_gaussians, + trainer="ml", + max_fitting_steps=self.ubm_training_iterations, + convergence_threshold=self.training_threshold, + update_means=self.update_means, + update_variances=self.update_variances, + update_weights=self.update_weights, + k_means_trainer=KMeansMachine( + self.number_of_gaussians, convergence_threshold=self.training_threshold, - update_means=self.update_means, - update_variances=self.update_variances, - update_weights=self.update_weights, - k_means_trainer=KMeansMachine( - self.number_of_gaussians, - convergence_threshold=self.training_threshold, # TODO Have a separate threshold for kmeans instead of re-using the one for GMM... - max_iter=self.kmeans_training_iterations, # TODO pass this param through GMMMachine instead of the full KMeansMachine? - init_method="k-means||", - init_max_iter=5, - ) - # TODO more params? - ) - - # Trains the GMM - logger.info("Training UBM GMM") - # Resetting the pseudo random number generator so we can have the same initialization for serial and parallel execution. - # self.rng = bob.core.random.mt19937(self.init_seed) + max_iter=self.kmeans_training_iterations, + init_method="k-means||", + init_max_iter=5, + ), + ) - self.ubm = self.ubm.fit(array) + # Train the GMM + logger.info("Training UBM GMM") - logger.warning(f"Saving trained ubm to {ubm_filename}") - self.save_ubm(ubm_filename) + self.ubm.fit(array, ubm_train=True) - if not np.all(self.ubm.weights): - logger.error("zero weights after gmm training") - raise ValueError("!! zero weights after gmm training...") - else: - logger.warning(f"Loading trained ubm from {ubm_filename}") - self.load_ubm(ubm_filename) return self def transform(self, X, **kwargs): diff --git a/bob/bio/gmm/test/__init__.py b/bob/bio/gmm/test/__init__.py index 20aa1aa..e69de29 100644 --- a/bob/bio/gmm/test/__init__.py +++ b/bob/bio/gmm/test/__init__.py @@ -1 +0,0 @@ -from . import dummy # noqa: F401 diff --git a/bob/bio/gmm/test/data/gmm_model.hdf5 b/bob/bio/gmm/test/data/gmm_enrolled.hdf5 similarity index 69% rename from bob/bio/gmm/test/data/gmm_model.hdf5 rename to bob/bio/gmm/test/data/gmm_enrolled.hdf5 index 07c92210b263582fb47143438de7fe31564bf091..4dba2c87521088f9ae6a3a99ba1170768ebacdc2 100644 GIT binary patch delta 1698 zcmey7@*`!#9A=i&wR}mN7chq~fmxFmFi)7Qz^pWR6<ZKkU=zCoBctMEM-CYQcVA!M z#N>?3yi|2N1_lO($)z01Odb527joqDPrSg-s5n_rVlr40$dt_%k~)lV9@tDNX^5Gd z3#3&bQj@eACKqUCO>UA^0xN~_C&|J19TP9Sn8d*|*+fC4ex2vrK*q=S?X{vg!|VAT z*fTORG!>`qnjU^^fAiYlm8yH+*h_q<IMb<FU_Zs{wpz~g3;X|;wFmEe`*6SM=MCCF zbZ+n8JY|ZaVASpXD^AUOw)OvI`{yq{d96-7Zm)A%!dJuX;(pyi+n)}ehxe~&I;_%T zTDyPV_ZjE*E_=9NEMh8m{iTH$?cZ2>nD~j`uxF9JQ#Zlsv3=8xnH713C+$-Lo0t8V zw|2i!&4aI!=MV3<&*R(68uGxt@J`q%pL3t=buKs`^fkP-|HTEPX;m*y?RRAFEiw6d z!M<n8L7fckEB2f}cz;ckdtiUM#6e0ZYvca65pMQdcHFh^5puk<y>O%b=4-3!XNst- z*`F<^;T2y0VE+d;4}C|+3-)eu(<iW1AKHIINN#n0$&3AekL={Fa$K{YdC#L_`}3FW zuQ!Ch4>7x8uUQ!JsPOg&dnYsdlS<z&?q9w=uX91ooBhn}xpxh;57=+zo)mgU=HC8K zPvpg|CcWAJ$dzr$ukM!pN7+qzebm?3&p%rKENWWpbNek4Zf>lTj_;ou^x|Q|`jhq> zTlSc}?!3J}Fk-nLWAgR=m$)Y06?rpde?0SM+dP%)_J40UX6Wy{Zoh6Ro9ObLC+w3a z{ps!db<h4o$L&jd&+XmM5jSZ$`+|k`8+~7$_g(yK|8|SR^?^3m>|IlT{b4bAvR}p} z^Ih?q`}@z;drrOTlY4A`M2t!4>;=#FpFH_(z3=Vo`@2GqidG%lZ_h2$+WPj>3;UPV zUh!`&7wr#tXS4C+%@y`mudnzP_@CY1ySO^)t?=djfA%l`T=f3f{%=<*CL3Qmv488j zY%%w$E&EGOsXq0Rzi!_!g=NVJo%8!I-HNk%#BkPL@BJrUKFRI->z6&-*4Z|D+5V_Q zQ&hQrth9f@V=rYbSGV6aHlmAV!eM*8?YG%@)8^ZM>$<;3^v6Scg9i!clib(XZ|P^5 zR9p99|BtYzimN-G>_2&_*ZSI&_xm$6PrR))cyAx|kAtbR|FM0jYz9li+)eh~*(Wx! z+9*5dwe37&{Y=qeUVet_tNjY~4qaca>@qWwbx3lb;`sH0g2SdB^%(0N3J$F4t)hME z3J$MMNq$mcQgV3v$iOxHx}w7Y>p6RTWaS-h{^2lP)}-i=c;LvdQVkggvj=*Irs*p< zsHe&tZnso$SZH!c)!?XtLxqS_*OL|nhqB6B{|uMOJIr`Aul3dnc?Z6OQPb+h>g60Z z9jRfL(vxz~m|xvcv`4|A-~P;d=k*E>YdR~V*e@$Obm#h|uKK9p@P6N><x2t;9JU== zx!doAjKj}@SLVr&l^tB()c*VRUeTds(%*lkf$|Pn8x3FWkCAs+)BEbjH%%FbsoQ&I zzqlyxaCWy#p7K|Dhj!~zi)*wM9U^qI<!9ILR&+@A(r)?QE$Og)&ei8f?c^P1%ye#M zJSOX4K0`VoKupQuwTYU<wdFDnoYwzW9seuq&~WMVRTC*ihs3FcQ>H(ab=dN#f6+U3 z1&3drA%4pY6&(&}z5H~;SJ^>5_2osWFNzL#4jj2YM?%42^%R@sPckGOR?YvpEpoH6 zL$>wKUG=*<6&xmMdS=bsF6Hp#YVzbCmz5miH0Gt0wJJJHeS1}br%}OSs^>C|f-^D> zPkk!>&0H++aP7|P6--8o4juwK=CK(oIRu#ebU)Ff;P9dMLrYG&lEcNukllG-<sA%* z!#ij5Dmh%Pdy*!nD(}!06Jw}aC+on@x2No{sG@`Fa_@Rce`iI9GZz*~vMf<_@G5k> zqcKt5fpg}j*-xw$9CV{{7wecyI~=|wcIV_nC5QbH_jtP>DmYl56k4=cR?)$5Nn3mO zSw)8|!HGx1Cn`F8S|^k6h)dDIX|Lz^$MLcbA8cR!eHke45G@lOK6Ra<!`nlz*<&l^ z9r{dn{dapT>rf_Rdqz=nqP)YNnw+rt8x<TT9XwdRMnu%%xyLqTUpHBYAGSg5Qi3uL zUMlxyypLCKuoREmy)91Op<SVYg>$vy=3lBzEZ{<ZlhzyvWAg$%c(tQfFj;|FWAX&U K5U?6h4FUjLa}(MC delta 1702 zcmey7@*`!#9A=hs-&Kj57chq~fmxFmFi)7Qz^pWR6<ZKkU=zCoBctwQM-CZ@+{}uS z(xOzo^xRxMcVA!M#N>?3yi|3&$+;ZLj18M7a+LEk>P{Avm<-kkGHJ7gqz)sT2R2tq z8e;C|0%;Y9)FiEj$pxBOlbd9fz)E5KNpdiL$HWUSCUNjgHc=3%m%DP~=3eI~_D90~ zd|E25+cPpUu2A&QikzHk&s+0oqQS=p_EM`CM=xi&YCq@P=C}UNGxk6D##h$*<>CI6 zGuIaNa6Z|up?=nzvFh6X3H~0pp2R$`-;~R<g)!`&{a)rbr+++OV!!fB<@b#jHtjc@ zzkF^`;o1EGTYGEYUwOE{w4T$bUi0fM`z1V1)_nHY>_48d`fzjC9eca&S#SS7KW8ty zZMA~0;*R~#-v6;N=Do7tEvEj(lBg&4zBRMuW9>iN%SQKWtx9^lKgcxvS@4Q;`_DMu zQC>cKi+#{*K`y?P+w7ms3!ZV(@|b;ntJAzPyX*TWsDG(W)V*#0_jK$o7P~|Cp)=Rk zAIoUHx<BfXee<fx@AgX@JWptnK4!0WKV9(bgme2%CkgiTiagpsvoK?OS<`|2QzNf_ z@OyFB{(r@jQ^^J=?I&mDot|m%-rmK*zgVsD`2HBV0KXfB@Am7ecb{?>?%e-;T7au$ z!{hx{GviMc@4B#`DTsMOM%EGgwkPS!KJ47Sf1zc)P|8&E7xrowmQ0rrU1Ps7U|+Rh z@1_0oHYk|Sxcp{6+dB97;?hg|51Soz$y<ESKCGv>D`Ue0`+|Aehs(QG+WSwv81VbT z<^4-eD+_<mJ!3C^*=oIZ?z#O@lQ`6_pTBLt?eCq->QXQEAJHvd=>Bz|eeu4|s<Ug~ z?JrtvV{Sk7$^L!yUPq<w&AYL``NkV%o1VA(-M<ytneIBafA!&`D*aQ>?myK(N9fb0 zNA}9D{U?0I?$|%-o#o;acEWzE>GfTLVcYiizDr;$yK!K@ymdBTitIuA{S}i;cBNmm z&-;7jCfnJQ`#0-ne`Vcq)joTMTu5}?F?)`H%Iy50FZQ_~XDlmH+`PYjkF`MN!<57J zBI-_h9fl3|cfYGgCY(HIzeVPh#;LC3``MoM*{%C<%Ko3X{`Q}1AK16-y%6#JVZXh% zaITDi{rmkMa{e|9=brAjv7B9a;r`qGVeChJPuo7SKec4aUDqiO?PFa<Vva~&wGaPT z-x{8*?6BCD^Uv~HB?r5$9P9V3QK)wiJu!Xz)cZ0Hw$tRN^*AXx*hCdZ2N%ja#EI8t zC-0YaIK9sPYV-j`hhD37To$}a4xP!~JEa*F9O`y&o$1T0=+L?7&CC=l8HbCypWj<p z$U9hVnmYG!hJwRmjoO7@J}WpFTisY?a$3Q`^UJe{Wq0Hq4r%;<lMt%lP;$im)P#@q zG7i72eGmEg$T&>hq$$+qpy*&0@1Ni&qTs+a*>95mVg(1LqnkQEZc=dg9&g_<J4eCc z6r--B%se@V7mn+mJ=~$>P$n+fVZ2z;L23DB7n@J=4h{j60#fG4ImqtbT$H&^+Ce+> z>(aCL<s81OKXu}euDrv7+zSmWUnn}vH#+2>Q_rO2F!|7}+Yw(R9j-qsK2ho_=Wy^* z!Jhk*WgPz1UpFb|Q+6;pX}H8xUdkc6`D&Z4nViG1tA`6-T$Xou^yJK2A0Z_N`^h^P z)>tY!$SlvFroCOk!Nx<fU9eTzVVY~xx!2K34%%D(@cxyRcewHD_LT$k#2hwnF`E4< zPRT)K`(Dp_rP~S)M`k3NB_EV=*d@es=8%`N!^h^{yi=+Q4l7r#+H>%ef<xOk>H0of z83(~B`9j~8$~&;9ubZGXQOQB*?dAfpN+pM^N;7d|TRDe~XD6NP*rwzlF<-u|nMdB? zh1}w-ild4S+vXj6ZF5S&;Y{O9U(qf}2fyA#!Ii8E4jcc8*Dn(CQgm21x#HK>5@m;- zQd@+AMHC!1^k&}N(y8DuJ6*ByzN4f=*2VPJ75|kTetdWKJ^Mt#;odQ2(?V@Uhvi@T zx_7-$a`;{pyK*J3lmp+Y+Qg7hB?rzWw>cLV$~x$;3VPdeO~zqmgQ&8TgrY;P?TQDB zr^-8+Uhm;8Dw21IUTLfIZ-&02Ls`Gv^kreP4quLSrU#u7b13Ax^xj-o)?r8F`?u#3 z<Q)Fh96B&%s=PxXW9wGNv(gS~>#w_)W-4v|rOLztF61|9&4DmBFVKTmJ$eO`6__<9 MPcRGts{z#@02iAOp8x;= diff --git a/bob/bio/gmm/test/data/gmm_projected.hdf5 b/bob/bio/gmm/test/data/gmm_projected.hdf5 index 3866125f2b150cc3fb972a4317cef4343644f5e4..602a4184ae4fd4232a24db35aee2525e87558e80 100644 GIT binary patch delta 1677 zcmewm^dV@&9A=hNwR}mN7chq~fmxFmFi)7Qz^pWR6<ZKkU=zCoBcuFeM-CYgcVFM& zlEji?bvp(aSh2a0qo04`1p!9+$$}EyU>%zmNQf|kSs=qEUf|dqASDA4*jym3!pIJ? zk%6IN;=$U<3d};2wPd@XLKzbe=7NQ+<lsUZ55_Xq>q70T;j%aT;J5IA2$V0<>;COv zy@tc}rv<__4Dt@j`aAdQ98o+F!M=G>><@zj=W~S?K3*VwV21ITyT5JD?$`ZaFLn2~ zfkXU;_m|8PzwAGL&tXM&jPikF7Y$Pj6<81Sss2hTW)MAaCgF0@gDENpcK%qb&L}Ka z@1VBds#-c+#v$lgUBUX<8V>JHm35cDRyt61PwUM7oy-oc`_63lysPd|@#FvB1woPy z+c_)qB7IaG<iDL))GIJ{;JNs&crUl$fqJ`#a{~Q^4$Rd17yHIm^nm|$i({Ymi5-wh zby&4;hrYw1FuPQ3Ic*2#eBpfKA~A=U7iYCKnd&7RQh$ls+6fCdD16HmKB~;-;GCzz zo3F-rz-2GzGA)1B1AC@7IlPwCIPk;uV!iQwrUSgM4Gx}qsCeMtMX|-I9MA2In!Yph ze9&~5H`Uob&P@G)`%3Bg?{`16&wa18^L?iHfooGE{by`6JrEli8*=}djKgkGW^+pm zRfmt;;`LtEyK@~l&K%+|m7sh;Q^(*|V!E`$G+Wi<6_ce7sIEx0v}#s5@MD!*h=s1S z!<`>n*6i(KKj1uNdG~I`kNdaXl<@C}__}|=r$ybHinrM(zCX33D^=ydi6wg{w{bBZ zI3moop8u}!fpg#X_{}PjcSxFi)ACP|qQk2dH;g}3f3uJIan!rMy8pO+h-JWAmWKid zejmyICZ{WKz-rxUcZFDi153oWdwUy-9eBDmSp4=pRtJVx3&KQ%6dW2kH>gzQXdF24 zbY{%OWQGGZoXK+o{Dd5)Y3(_&DMHR+-fXj+AL`%jAIu8xZJ6-d{>+IgHcR(U`yb?9 zDy@BecmE};v*{mq3p+Sn{9nIt{runi3y$l!UTAq{&$8vCRPdY6`@4AJQ$Cq;9@w#J z+2Z*xUhki~c<uJ)1BwocTVE@zk`i`k6PG{lUM70L>zV5pm4osJ(m(vj2y*?k-=tOT zSI#8)1MN5R6%=Nw9Jn{_-uI2x><;EF4%5sNxg8G7p8X;F8?yt$mtC){6DQfL71w9Z z-P0fG&{MxwS*Sb0;Vt`@wBH*;9eAGxUz^h%>9F%|GRJDoD2JW<j9d?FNN@=3+3)as zLx_V;nfT=^{GkpXUZ?#xi}P?;z`8!Pbyl>4f`Xrza+AM<pr?EPnp=JjWqT%apOH>* zSp34{>Z5`Phrsi7H_nHIIdtq{I>D_F=J3B>lr?KfTA0ITn*;Avu0}c>J$f{h>qmk^ z?P_W94}H-NVPBiC*CvHI{F?XVgyn|_hp5vEEf)Ke97I3ddvd}!)?vxY4cXpV(GGHY z2e?#AgB@fY0z>v>1v&(t-R1wKBFTY!#+M}Tdl3%1t^~Jjcpu|1`_;987xz6KZpT?O zopBFwsAu?SX7znxphI=Qe8u?*;STi;uNFy8jCRlzzT~*xI=~^1N$c^}zmX2{S09}H z&=cS=VgI#tIW0jB9QzMkUgVJMP-<>>xSAosVbi1Lqry%J4zGATVmLXX9enprcVsCK zcR1zEIU{9Gm;=|>uA+^vyd11vx~S|`^>$d2)WmQ@HK5+%{k4bhj=ptw*uk>O)#jX= zLy6p<!?HWV90F}a3%8mDI9w|1cg$%Ga=5j=JLBRx2Zt8r(tnZ+0S*lB?q@tx_H{Vg zWN-FFD8OOzS<iJdU->#r7qU2AV;SghPuEsdYOS+FNVtQ9K)%0&&U1E-w{~t0eN_{+ zweJKuT$ol-{-L`*(7`mxdV0`zFNawPS9}@U0v&`HAA9XT?&lB^y5#nTpP>%onrt!m zRQ(;kIPq3bdmrGCbi$W^-tsVq^~Hh3Vo$sr_Bg8Mif#&Vh+p&j{S&SLhrb&ZvsF!W zcj(&q^?|IWmqXSz3z-K$Jsl23+dl2KcXHVGL;IrG-e89-i@!&%2|noKFg;%U?*>PA zhsg{ka(pe`4h7$(RGOV!9i$Xq2&w3MJD4g=iC~uqba?9FaY3y<#DS~q^b^A$!4A*< q1tzTe>*3%fA*8*b*xTXly2n$l{_}F!JdwkKX|e$e+h#otCq@9JB;)!3 delta 1690 zcmewm^dV@&9A=hs-&Kj57chq~fmxFmFi)7Qz^pWR6<ZKkU=zCoBct|YM-Ca4+{}uS z(xOzo^xRxMcVFM&lEji?bvp)RFk^EeM?XKK_GCecZm{;v3nWAs!7Px;6EARV4v><8 z2y8BpR$*iZS<k>wG4WvSWCdoS$y%~qP@#;82XnzfR&sEmjR#{H>vf?P?%rybSm(F! zfC!X-Jvq_3NKV~Bf6lxky$=-}s<jua*A9?5AhPc6w(=6g1Fz<vsONJ3YOmXHZ^tzc z5eHjQnMqkJ+71tj-ZsoXBYj}uMpG}bnTiMAUEccJjPcL@wkrqt%H$Xhyb5mW)fW~y zu*<&F?4X!Py@S)2`QM`?6dZco|3s<%Q*}^@G_INxta^a?NX(pxobnD#?+!had8_K6 zcOo_FU5==O$Zg&ukHpj*L{Gn6@xj&1AwKB2(_Tw~0~H%~O$~g(c%VkXOLn%Z#DQHu zM0Tg_U_Ma3WBR|>_thO(9b}Bv&gnQXCFy<1d&T8&?~|RguyXw$`$@a^EN@&P?yzjx zJ3aZI|LqSmdu3g@#&$r2<-<?LIGF?1sSjTCS!y3xW~i#Yr%U|6^K)0_846_%tez{J z7s1Kt5LtLQ=#;O9!_uBRtntUx4;W{;>+O-}Iv^|g_mO&w$bnKTzs8GUrU#~kD_z_1 zL&Sk?%7bZFKB_rfIH2=lR{gV6`}4OtOzZzCc3|5h2UC7`We4sZ)!vt%DjevG5)fqT zRy?p}`S(rRMZfP?T`#<yuSVg(uA94$CfG1LI0R(x(2o;z&?vI&zn}JUfA-1+at*p_ z2U2<~mR6UE9!PSSUVNca<-nfnujO-hC^!`VUHd<lN8X{~{I`7BEvycn#<BP7#f-n% zTOL0hc$-c6z#oqtE4pTK9N70q$>a7rjsqgQAAE~FCV7BCbG_uG&j<D!gjv;S1j##G z{Iju_PfGVdI3vrR=Ne!4&)9kClEb>M_V*653Fxq@JLD|r`{f+)*#2meAFsqc4u_ae z`;2tXb2z*?$KLYoIn#l2JttIq&WkyeFWXZ8{E6L%{i*vVtY>Q%aai#9(|ir*EA}!S z7bZ+*x?wNBgK=HZeAWX6L0jI$-<5P&buGAAH(t!aU-PY_TbRg!ygha=PgDvV5LdbJ zz~UX#f%u=Al1J(k4y+9iddRa#;XvHnj0%@f1_zPjGMf+BUb9ycOmo<y`+a}Yp0g{q zX|NrTtE&HAc>HgegKbLhy_;^44pVarlFAA~9TqRWc*I0G#Nicx$t;7K2!~XcHgC($ z1P8s|c?Tm8hd8+2oAKYOCCDLQgW~0lzdRlOAB|+YVif7HP%5+Z@h&e1#^TnMXXE`G z_I@p0V00+j!RW=emsPb94(be-Jd}?IIw*g8)8~IG*x^h4`jcE`CP5C{%k!=s5RY<r z&Ny$qnnt1nKTp5w(x?cB4SSV082$-yxa{y;DEVBt!=m$tYB(<^I_%n4m$Jn+-a$5` zF}v|dw8Ms$J_o^DVGdImyS$bx_ILOi^o+ONFTr8W|9e)GpGP=6;R@a0?Gxo7TK}Wu z*?C`w`+PramMe!j)E5^!OIU;kI4JyTJHa|N!r@nSgUT_MXosBFAI<Gd{2fl~7&FgF z2z8j$v9WORWiN;I^JGJ#g@YWVpRP>bw<ylx!j;anHDPfM@{LB}Z-2)+7(dkRvYip- zusK3sE0H<G!PmZPZ&g&VLtXLqC%)_f4oBo$*XgeFbolp&l{Gd!px)uH)@Kgy_&|ru z1)Yk9nqCg(!lB!>*~1;GZOT4`sRTG&=HwG)6AX3ub8VsK+7vqn$K+GKr_+NR&R0HS znfx`t;ZCl?%HV(ihku!ud^cqUI|vDPZF`~<=wNncno02q9|xXKrmil$K@OUm>yCLY z_H&qL|JbE}W{|@Kxf6%CYu5)l)CFkCepw&j5HiE0)b~x0!`gd!(c4-B9M1YAyyCkQ z?r^pHSPZ+6w?p|1o9cwA0S<Qx>u+v!3U+AulzH)$sGmcx*|`tz*&-eGxd=+kWejkb z<g##+^9>(|DA_A}<SpGCa>Mp7({yrkxDlqJ$#v7k!PVbIYePqn!;v+o!-9iv`8w3h zI_Yt-#lzvtRe|>k@j(tNCAIwyc6d8nPmDU|Cg|f}xHEf3-?s<{fyY)K3N^zVl6NhT w$Y}_32n*|Cak}g2aIoN`PPJ~J!_UVbmV_J(aM;|)VZk)nfQ4<do`w@60I&qyzW@LL diff --git a/bob/bio/gmm/test/data/gmm_projector.hdf5 b/bob/bio/gmm/test/data/gmm_ubm.hdf5 similarity index 68% rename from bob/bio/gmm/test/data/gmm_projector.hdf5 rename to bob/bio/gmm/test/data/gmm_ubm.hdf5 index a25006d8ede0f7e5055b75c1a4c828d09f5e7c3b..50b42a96d67e594f3f80d869759f8a135f7a937c 100644 GIT binary patch delta 1694 zcmey7@*`!#9A=hNwR}mN7chq~fmxFmFi)7Qz^pWR6<ZKkU=zCoBctMEM-CYQcVA!M z#N>?3yi|2N1_lO($)z01Odb527joqDPrSg-s5n_rVlr40$dt_%k~)lV9@tDNX^5Gd z3#3&bQj@eACKqUCO>UA^0xN~_C&|J19TP9Sn5?0oP`}ReZ6M?0`}SJVoZ<C+59}Ek z8Ey8JOs-mcbN>_h^0d?WckJW6)?M0n^v?dgWqJI`l8fz=|GrC>mfK;keXhEIS@r4u zt=H5=p3Jzr|I1|K?fto%_vcTN6ig~vXz#ntG2m(7<NYx!PD$~ZPqja*{Ke1u<P!Uz zmu@av-FnhKWbtG3`a2sA?T>p|)wTW6ef!PD;)U1Kp4+F&FFmv@VV!+XMVX}(*EM_5 z!!@i0#+U7t7am{4`)h{1XV8^LZpnxD2d(OCu=d(*Kh53P^ZmZV_8WJ%nAb$?vk$zr zRXldy9ec)&CucBdytBWiSoxoO(kc7PEfwcHldjkYyt&tzUVg)V_xpSGQ9r{s>_5k` zaqFVKOZ%_CnrGcUZK1vAlZP7X_8hZcl3O>g$MgFB8=f=EzMfpNe?j*8{TV{*?X_-8 zvhI6%+g{W?-eU2FBld?cPv_>Gb9w(P)52F%SM0M-a1%Q6OZA?;nvsgKV)hpMhkWx+ zR$u(GU+jauaf`>@{U;NBWJLOo?LYXVexdMU{-^foHy6&8Y<#xANcwK5+WhnOoVCx4 zu1vaUzrtls`1#rs`vv3QgiSNQX1{TV`{TTf?e^-<p;xz8EVO?TeS$S1W841U{S_Pk z{C#X6o!1l^x&QM1KgN%CeEz*=zu`6~j~V-}?=LvL)K1C!guTRMw*nE1m;0-~Uk>O| zdA@%`{og#9mU~C{Z!nCLm=}0!f1At0%9$Zs?1lJqSR;e#_V=6+OZh6X#s12dQ$9(t zi}&Bp%<M5?FSOT46>gsDd}e>xn;FXe>Ua0cen}9}_Ia~kv$(%mM(xu6lR=EjY)_rA zf5|#;`|BNB?cb(fT*>)*&;Er*@1k4QKeuP@5t;K-@#Oyc(vP=<PtMtDKl|9?gaEbY z_Hq2>wWTYT+kdP0ZaB64<bLa4*H<NWKiJ<jwQZhY*=G9~-al)%|Gsap8*(ln+<vz` z>mQ!Bil9sTCxz?ptqD40-zS|MR~7f#e%Xhg=jP78XJ5W^{<Zu|Gwm15>)_;Qmv<;x z&pcE0y`sahg(_ca5*6wl)R_anvm8-ysGWJE<7J+rgO{Z5Sr<D+he_o#yO>4f9g^Pe z^j9laa&Qup)Sa22=rB>m!17eSg2U&(PuH=h$~jy;eru)t1_g&r`e9R9*2_3NN?SBT zs#M;g?ZzhsO&)oNYYRm$70-}!cyTU*P3)F}L*z@=gI}5y92Q+?PujY&Ufy9<)MKBB z1UZM3i+<{8n<+ZnD9$M9(N=VrlsN4cuZEI?mZ#9piMJFSzD(M5y=bnS!<<fylNP5G z9GVV?8z?MMa!__?QaUB0;E?=${+e`C1&8IFy$kR5D>^KY_;br$UBThv!exEUHi{0@ zKO8-!`$@qeT!ZbwoplNhMq7)Se%H@ca8PsOezMC)!C~hXgYWS!iVpL_YA*{dQ*@BM z&vDXSOvXVea>E+&OY#mPHa<~%=gB)<t&wLnf1%*8g>%Ev#*fktO=l%MjaJG#l-nF; zZ0J*TxS73eYw0pMhxmZ+wR?pX9J0)6?lYWGaL^I_mA24V!Qrdo)W6p!Dmbul+mzL( z>&rVVKlMT~MoZ4&Q8x>JXN01IP|J^5>ZP&{cIG=K_Atphh^gJuH(ns;P?+~#Bi2>P z!D-)l<-JoC91K^RM%Kv5JAB;!%_DiPtV8IoxQ(T%@(y=(r9WI&QE*V3Co5%Us_1aI zmc{dbhN8pVxW{#YG4c*fJsXt;zAHF9dH%fq_WWb=4ilVYX3Qy6aF}*Sj_GTPoP)Ui zoehty6dY3fmmd-SFYi#iF;4iaznp_1)4mp$r*aO7wLwdBLX;f(zfIotr9jEy^7BNW zrz%Pg;wSkhXFXSNIQTJx?b-_khc~}>+|&Id=fE`i$@AXX3J%4wQ*zI&Rdf)RJo9qD ziM+!vPn%PgM`tNId@U+nw{fwILuBfcMZPQK9eyr~Osrod@8Dz>E;J`k(LqP0XM3EQ zyhF}OpAE|oDmrxj+qY8e`sSai%q-vnev{T52xIdCJ$RL)S1?(DS!419!w|3<Q0)N# DIg<aZ delta 1698 zcmey7@*`!#9A=hs-&Kj57chq~fmxFmFi)7Qz^pWR6<ZKkU=zCoBctwQM-CZ@+{}uS z(xOzo^xRxMcVA!M#N>?3yi|3&$+;ZLj18M7a+LEk>P{Avm<-kkGHJ7gqz)sT2R2tq z8e;C|0%;Y9)FiEj$pxBOlbd9fz)E5KNpdiL$HWUSCTl1t)W0be+h3gd&|XF4a-)Rq zU3*4GMy^li<&F29-oLyq(DP&9A$#p7t2MrIoU)gV=;Xh{`@sI#L)ErNGp_Hym?}3( z*8l1L$KP+*#``?k-)$SPP5#;{`!?4NO3`vR?R5iJT$R37wqH7}IgiI}^Zu_c+L>OT zPwy`cYn>$WW!L_H{>4-3WA+}iFT2IZC@lBfUg@FX<|oUa+8^A0>80Gwo%TsaN2TYN zp0J;;8M!~^&<=aC_Y*l)<{hwi+RZL@o9(K-t%Xkf8KblNndXX^zF551ew)j~EqsMX z_8%$T$9nU?Zu>g(xn;`_-Lfx9bi5-Ub#4FhS$zWgr%koLa@YR2{{Q3l{QK6`e^&Fj zzF%ye!g221ANR+qt{3lmzifZ`jjJh9H&59MXWdx*@Zz!k`*T*zV~IGtf1+^7)C)!T z?cKC*DbC8jWPe-6g(<Idw|(r^eZDf<XZP3ceI6Nkb+3I((Q-aRuCw-6yJtpz729Xe zvU+;6)y}8;moCXOKf3wy{zXpdkJw+#v0t>d-r;Ls?n8UE4S}UkQts|g_jVAUH0AF8 z<3@7A8u1tQ^9bD6UhQ_({+yoJ!)mF!_9x}mDb47)W^c|CRI0dW_kK&4B}e-nKG-i( zwEK|L(uMYu_3HEf3!dG-r|*<?yTEz-x&4V5-=}QYzx}HzXU3Hy`=e(}J=6E?)BZ14 z5C5F9@8$mB`aL_tLyXSvcah6jU0ii}f0h1?6)z1g+XuNn<&-WzxW8QE;UjO&d;4qR ze#*}`zh=Ma$Ej`oFBaNAKQ`@Y;J(fFQihD%Z|Wb~zrR|wK|S!!ej&*M&G!~p?JJLm z7Oz!*Xzwqbm>!UN$=>B-G@sM%bN1hw*2^wY_-o&<(z_|2`M~~q>77p^-kn@zU*Yp1 zByqxH`{~+0uCc#9Zm*}H#Obj7`TjfCBPI&HzP-P|Gry$e@d5koKmVzn*S&AQ{d}QO z|Kr{EE-xzhc?>u0FX`O8g?G;``x~JLHRecMwV$vupY4<RP5U6JOCLltPwxL|vpY<q zP2S<w_W$aBp^6Uj=R$?O+ZE~^c--RlJnffvSokzlCTW9$1K+|6Hx-{KIQ-19TgUlL z&Oum6e(Ls5iVg-f?-|O?6&>skK07bHRl(u?jr7u-XR;0wxt{N4T~cr;?p&2DTqNhP zX}Mmy?qPX{`wjb!bWW3V5SXg=tY)j6L);g>1Ma^R9IpQk<7cZ-aJY2ym!DQ$y@JCD zo6cO}i*gRTH5Y76^-yrwn7#MQj%NxEsxIt$flCw}4*n63-F{rb!O;Ee3CVN?hbJBq zMiwSY4qpRjHDCL#=pdB1u*J<x!Qsu7$It!y6dk0h4VhP4Dmomh3U2aZl5x1?dwR{X zHbsY9K^v9oEEF7;IR$aXtx|B1-RNM$Sbt2xq3HAHy`k(14#n9n2LqofI0)@|S9eKL z$w5kI(VV?kWE^g@u3sw5Bj-@h5Uk6ZrQpyYHS@3SDMbe>lWh))x8xmO?X+9hvsuBx zrTEmnzCuL@F*aisxg+up*Fsib&|0DB@a<qtTK+Z#hw9?<f$jGc99W(xU3mXS!9hPM z(XrmrK;GfU(erJ!M`Ro}g-;Xxb5+sd|F<i~9re-<s!l>)vL5mdKT{Ng{d46V_R6m) z$mdaV*icoh?kKC^VAPd9Gjh9}!@K_J`RThP9jr}Lm22P0Imo#B-oCS0-oYbFWU2fH zMTa||InoZFRB)(q`@X^Ogq%Zgu5N%zrlP~mlg#y9S~c<x4m_r27S9zN7&ZDQ|BID# zu(5D-TDDH!A?)~T=eeSa4tgIQm!8@q=V0(%;=0mrS%=ToLRyuqiVjsdYkp7dRC2f~ z|EYO-m88Q|%LQ6ZlNB5)^|rj9w_nkLX%3t5UnT_y{Y763uJ|fA>`Qm~YdcZF!CGL! z%tyWo4wc$Ae?P<tDmoZ!^<TF{PR4;FDBO|bp@PGaTL&kHImtVGVb3Tm$X9R>PF%KW zS*oIgUhm7mkD-bVyoR%~z8&2BQ<a$oT)=PAngd~MUZ4lBcJvA+D==$Jo?sXPRs*U% E03DV2$^ZZW diff --git a/bob/bio/gmm/test/dummy/__init__.py b/bob/bio/gmm/test/dummy/__init__.py deleted file mode 100644 index e19bac9..0000000 --- a/bob/bio/gmm/test/dummy/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import extractor # noqa: F401 diff --git a/bob/bio/gmm/test/dummy/extractor.py b/bob/bio/gmm/test/dummy/extractor.py deleted file mode 100644 index 9459dd2..0000000 --- a/bob/bio/gmm/test/dummy/extractor.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy - -import bob.io.base - -from bob.bio.base.extractor import Extractor - -_data = [10.0, 11.0, 12.0, 13.0, 14.0] - - -class DummyExtractor(Extractor): - def __init__(self): - Extractor.__init__(self, requires_training=True) - self.model = False - - def train(self, train_data, extractor_file): - assert isinstance(train_data, list) - bob.io.base.save(_data, extractor_file) - - def load(self, extractor_file): - data = bob.io.base.load(extractor_file) - assert (_data == data).all() - self.model = True - - def __call__(self, data): - """Does nothing, simply converts the data type of the data, ignoring any annotation.""" - assert self.model - return data.astype(numpy.float) - - -extractor = DummyExtractor() diff --git a/bob/bio/gmm/test/test_algorithms.py b/bob/bio/gmm/test/test_algorithms.py deleted file mode 100644 index ad4f79c..0000000 --- a/bob/bio/gmm/test/test_algorithms.py +++ /dev/null @@ -1,719 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# @author: Manuel Guenther <Manuel.Guenther@idiap.ch> -# @date: Thu May 24 10:41:42 CEST 2012 -# -# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import logging -import os -import shutil -import sys - -import numpy -import pkg_resources - -from bob.bio.gmm.algorithm import GMM - -from bob.learn.em.mixture import GMMMachine - -import bob.bio.gmm -import bob.io.base -import bob.io.base.test_utils -import bob.learn.linear - -from bob.bio.base.test import utils - -logger = logging.getLogger(__name__) - -regenerate_refs = False - -seed_value = 5489 - -_mac_os = sys.platform == "darwin" - - -def _compare( - data, reference, write_function=bob.bio.base.save, read_function=bob.bio.base.load -): - # write reference? - if regenerate_refs: - write_function(data, reference) - - # compare reference - reference = read_function(reference) - if hasattr(data, "is_similar_to"): - assert data.is_similar_to(reference) - else: - assert numpy.allclose(data, reference, atol=1e-5) - - -def _compare_complex( - data, reference, write_function=bob.bio.base.save, read_function=bob.bio.base.load -): - # write reference? - if regenerate_refs: - write_function(data, reference) - - # compare reference - reference = read_function(reference) - for d, r in zip(data, reference): - if hasattr(d, "is_similar_to"): - assert d.is_similar_to(r) - else: - assert numpy.allclose(d, r, atol=1e-5) - - -def test_gmm(): - temp_file = bob.io.base.test_utils.temporary_filename() - gmm1 = bob.bio.base.load_resource( - "gmm", "algorithm", preferred_package="bob.bio.gmm" - ) - assert isinstance(gmm1, GMM) - assert isinstance( - gmm1, bob.bio.base.pipelines.vanilla_biometrics.abstract_classes.BioAlgorithm - ) - - # Fix the number of gaussians for tests - gmm1.number_of_gaussians = 2 - - # create smaller GMM object - gmm2 = GMM( - number_of_gaussians=2, - kmeans_training_iterations=1, - ubm_training_iterations=1, - init_seed=seed_value, - ) - - train_data = utils.random_training_set( - (100, 45), count=5, minimum=-5.0, maximum=5.0 - ) - reference_file = pkg_resources.resource_filename( - "bob.bio.gmm.test", "data/gmm_projector.hdf5" - ) - try: - # train the projector - gmm2.fit(train_data).ubm.save(temp_file) - - assert os.path.exists(temp_file) - - if regenerate_refs: - shutil.copy(temp_file, reference_file) - - # check projection matrix - gmm1.ubm = GMMMachine.from_hdf5(reference_file) - gmm2.ubm = GMMMachine.from_hdf5(temp_file) - - assert gmm1.ubm.is_similar_to(gmm2.ubm) - finally: - if os.path.exists(temp_file): - os.remove(temp_file) - - # generate and project random feature - feature = utils.random_array((20, 45), -5.0, 5.0, seed=84) - projected = gmm1.project(feature) - assert isinstance(projected, bob.learn.em.mixture.GMMStats) - _compare( - projected, - pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_projected.hdf5"), - gmm1.write_feature, - gmm1.read_feature, - ) - - # enroll model from random features - enroll = utils.random_training_set((20, 45), 5, -5.0, 5.0, seed=21) - model = gmm1.enroll(enroll) - assert isinstance(model, bob.learn.em.mixture.GMMMachine) - _compare( - model, - pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_model.hdf5"), - gmm1.write_model, - gmm1.read_model, - ) - - # compare model with probe - # TODO YD 2021 - # probe = gmm1.read_feature( - # pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_projected.hdf5") - # ) - # reference_score = -0.01992773 - # assert ( - # abs(gmm1.score(model, probe) - reference_score) < 1e-5 - # ), "The scores differ: %3.8f, %3.8f" % (gmm1.score(model, probe), reference_score) - # assert ( - # abs(gmm1.score_for_multiple_probes(model, [probe, probe]) - reference_score) - # < 1e-5 - # ) - - -# def test_gmm_regular(): - -# temp_file = bob.io.base.test_utils.temporary_filename() -# gmm1 = bob.bio.base.load_resource( -# "gmm-regular", "algorithm", preferred_package="bob.bio.gmm" -# ) -# assert isinstance(gmm1, bob.bio.gmm.algorithm.GMMRegular) -# assert isinstance(gmm1, bob.bio.gmm.algorithm.GMM) -# assert isinstance( -# gmm1, bob.bio.base.pipelines.vanilla_biometrics.abstract_classes.BioAlgorithm -# ) -# assert not gmm1.performs_projection -# assert not gmm1.requires_projector_training -# assert not gmm1.use_projected_features_for_enrollment -# assert gmm1.requires_enroller_training - -# # create smaller GMM object -# gmm2 = bob.bio.gmm.algorithm.GMMRegular( -# number_of_gaussians=2, -# kmeans_training_iterations=1, -# gmm_training_iterations=1, -# INIT_SEED=seed_value, -# ) - -# train_data = utils.random_training_set( -# (100, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# reference_file = pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/gmm_projector.hdf5" -# ) -# try: -# # train the enroler -# gmm2.train_enroller([train_data], temp_file) - -# assert os.path.exists(temp_file) - -# if regenerate_refs: -# shutil.copy(temp_file, reference_file) - -# # check projection matrix -# gmm1.load_enroller(reference_file) -# gmm2.load_enroller(temp_file) - -# assert gmm1.ubm.is_similar_to(gmm2.ubm) -# finally: -# if os.path.exists(temp_file): -# os.remove(temp_file) - -# # enroll model from random features -# enroll = utils.random_training_set((20, 45), 5, -5.0, 5.0, seed=21) -# model = gmm1.enroll(enroll) -# assert isinstance(model, bob.learn.em.mixture.GMMMachine) -# _compare( -# model, -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_model.hdf5"), -# gmm1.write_model, -# gmm1.read_model, -# ) - -# # generate random probe feature -# probe = utils.random_array((20, 45), -5.0, 5.0, seed=84) - -# # compare model with probe -# reference_score = -0.40840148 -# assert ( -# abs(gmm1.score(model, probe) - reference_score) < 1e-5 -# ), "The scores differ: %3.8f, %3.8f" % (gmm1.score(model, probe), reference_score) -# # TODO: not implemented -# # assert abs(gmm1.score_for_multiple_probes(model, [probe, probe]) - reference_score) < 1e-5 - - -# def test_isv(): -# temp_file = bob.io.base.test_utils.temporary_filename() -# isv1 = bob.bio.base.load_resource( -# "isv", "algorithm", preferred_package="bob.bio.gmm" -# ) -# assert isinstance(isv1, bob.bio.gmm.algorithm.ISV) -# assert isinstance(isv1, bob.bio.gmm.algorithm.GMM) -# assert isinstance(isv1, bob.bio.base.algorithm.Algorithm) -# assert isv1.performs_projection -# assert isv1.requires_projector_training -# assert isv1.use_projected_features_for_enrollment -# assert isv1.split_training_features_by_client -# assert not isv1.requires_enroller_training - -# # create smaller GMM object -# isv2 = bob.bio.gmm.algorithm.ISV( -# number_of_gaussians=2, -# subspace_dimension_of_u=10, -# kmeans_training_iterations=1, -# gmm_training_iterations=1, -# isv_training_iterations=1, -# INIT_SEED=seed_value, -# ) - -# train_data = utils.random_training_set_by_id( -# (100, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# reference_file = pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/isv_projector.hdf5" -# ) -# try: -# # train the projector -# isv2.train_projector(train_data, temp_file) - -# assert os.path.exists(temp_file) - -# if regenerate_refs: -# shutil.copy(temp_file, reference_file) - -# # check projection matrix -# isv1.load_projector(reference_file) -# isv2.load_projector(temp_file) - -# assert isv1.ubm.is_similar_to(isv2.ubm) -# assert isv1.isvbase.is_similar_to(isv2.isvbase) -# finally: -# if os.path.exists(temp_file): -# os.remove(temp_file) - -# # generate and project random feature -# feature = utils.random_array((20, 45), -5.0, 5.0, seed=84) -# projected = isv1.project(feature) -# assert isinstance(projected, (list, tuple)) -# assert len(projected) == 2 -# assert isinstance(projected[0], bob.learn.em.GMMStats) -# assert isinstance(projected[1], numpy.ndarray) -# _compare_complex( -# projected, -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/isv_projected.hdf5"), -# isv1.write_feature, -# isv1.read_feature, -# ) - -# # enroll model from random features -# random_features = utils.random_training_set( -# (20, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# enroll_features = [isv1.project(feature) for feature in random_features] -# model = isv1.enroll(enroll_features) -# assert isinstance(model, bob.learn.em.ISVMachine) -# _compare( -# model, -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/isv_model.hdf5"), -# isv1.write_model, -# isv1.read_model, -# ) - -# # compare model with probe -# probe = isv1.read_feature( -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/isv_projected.hdf5") -# ) -# reference_score = 0.02136784 -# assert ( -# abs(isv1.score(model, probe) - reference_score) < 1e-5 -# ), "The scores differ: %3.8f, %3.8f" % (isv1.score(model, probe), reference_score) -# # assert abs(isv1.score_for_multiple_probes(model, [probe]*4) - reference_score) < 1e-5, isv1.score_for_multiple_probes(model, [probe, probe]) -# # TODO: Why is the score not identical for multiple copies of the same probe? -# assert ( -# abs(isv1.score_for_multiple_probes(model, [probe, probe]) - reference_score) -# < 1e-4 -# ), isv1.score_for_multiple_probes(model, [probe, probe]) - - -# def test_jfa(): -# temp_file = bob.io.base.test_utils.temporary_filename() -# jfa1 = bob.bio.base.load_resource( -# "jfa", "algorithm", preferred_package="bob.bio.gmm" -# ) -# assert isinstance(jfa1, bob.bio.gmm.algorithm.JFA) -# assert isinstance(jfa1, bob.bio.gmm.algorithm.GMM) -# assert isinstance(jfa1, bob.bio.base.algorithm.Algorithm) -# assert jfa1.performs_projection -# assert jfa1.requires_projector_training -# assert jfa1.use_projected_features_for_enrollment -# assert not jfa1.split_training_features_by_client -# assert jfa1.requires_enroller_training - -# # create smaller JFA object -# jfa2 = bob.bio.gmm.algorithm.JFA( -# number_of_gaussians=2, -# subspace_dimension_of_u=2, -# subspace_dimension_of_v=2, -# kmeans_training_iterations=1, -# gmm_training_iterations=1, -# jfa_training_iterations=1, -# INIT_SEED=seed_value, -# ) - -# train_data = utils.random_training_set( -# (100, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# # reference is the same as for GMM projection -# reference_file = pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/gmm_projector.hdf5" -# ) -# try: -# # train the projector -# jfa2.train_projector(train_data, temp_file) - -# assert os.path.exists(temp_file) - -# if regenerate_refs: -# shutil.copy(temp_file, reference_file) - -# # check projection matrix -# jfa1.load_projector(reference_file) -# jfa2.load_projector(temp_file) - -# assert jfa1.ubm.is_similar_to(jfa2.ubm) -# finally: -# if os.path.exists(temp_file): -# os.remove(temp_file) - -# # generate and project random feature -# feature = utils.random_array((20, 45), -5.0, 5.0, seed=84) -# projected = jfa1.project(feature) -# assert isinstance(projected, bob.learn.em.GMMStats) -# _compare( -# projected, -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_projected.hdf5"), -# jfa1.write_feature, -# jfa1.read_feature, -# ) - -# # enroll model from random features -# random_features = utils.random_training_set_by_id( -# (20, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# train_data = [ -# [jfa1.project(feature) for feature in client_features] -# for client_features in random_features -# ] -# reference_file = pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/jfa_enroller.hdf5" -# ) -# try: -# # train the projector -# jfa2.train_enroller(train_data, temp_file) - -# assert os.path.exists(temp_file) - -# if regenerate_refs: -# shutil.copy(temp_file, reference_file) - -# # check projection matrix -# jfa1.load_enroller(reference_file) -# jfa2.load_enroller(temp_file) - -# assert jfa1.jfa_base.is_similar_to(jfa2.jfa_base) -# finally: -# if os.path.exists(temp_file): -# os.remove(temp_file) - -# # enroll model from random features -# random_features = utils.random_training_set( -# (20, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# enroll_features = [jfa1.project(feature) for feature in random_features] -# model = jfa1.enroll(enroll_features) -# assert isinstance(model, bob.learn.em.JFAMachine) -# _compare( -# model, -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/jfa_model.hdf5"), -# jfa1.write_model, -# jfa1.read_model, -# ) - -# # compare model with probe -# probe = jfa1.read_feature( -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_projected.hdf5") -# ) -# reference_score = 0.02225812 -# assert ( -# abs(jfa1.score(model, probe) - reference_score) < 1e-5 -# ), "The scores differ: %3.8f, %3.8f" % (jfa1.score(model, probe), reference_score) -# # TODO: implement that -# # assert abs(jfa1.score_for_multiple_probes(model, [probe, probe]) - reference_score) < 1e-5, jfa1.score_for_multiple_probes(model, [probe, probe]) - - -# def test_ivector_cosine(): -# temp_file = bob.io.base.test_utils.temporary_filename() -# ivec1 = bob.bio.base.load_resource( -# "ivector-cosine", "algorithm", preferred_package="bob.bio.gmm" -# ) -# assert isinstance(ivec1, bob.bio.gmm.algorithm.IVector) -# assert isinstance(ivec1, bob.bio.gmm.algorithm.GMM) -# assert isinstance(ivec1, bob.bio.base.algorithm.Algorithm) -# assert ivec1.performs_projection -# assert ivec1.requires_projector_training -# assert ivec1.use_projected_features_for_enrollment -# assert ivec1.split_training_features_by_client -# assert not ivec1.requires_enroller_training - -# # create smaller IVector object -# ivec2 = bob.bio.gmm.algorithm.IVector( -# number_of_gaussians=2, -# subspace_dimension_of_t=2, -# kmeans_training_iterations=1, -# tv_training_iterations=1, -# INIT_SEED=seed_value, -# ) - -# train_data = utils.random_training_set( -# (100, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# train_data = [train_data] - -# # reference is the same as for GMM projection -# reference_file = pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/ivector_projector.hdf5" -# ) -# try: -# # train the projector - -# ivec2.train_projector(train_data, temp_file) - -# assert os.path.exists(temp_file) - -# if regenerate_refs: -# shutil.copy(temp_file, reference_file) - -# # check projection matrix -# ivec1.load_projector(reference_file) -# ivec2.load_projector(temp_file) - -# assert ivec1.ubm.is_similar_to(ivec2.ubm) -# assert ivec1.tv.is_similar_to(ivec2.tv) -# assert ivec1.whitener.is_similar_to(ivec2.whitener) -# finally: -# if os.path.exists(temp_file): -# os.remove(temp_file) - -# # generate and project random feature -# feature = utils.random_array((20, 45), -5.0, 5.0, seed=84) -# projected = ivec1.project(feature) -# _compare( -# projected, -# pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/ivector_projected.hdf5" -# ), -# ivec1.write_feature, -# ivec1.read_feature, -# ) - -# # enroll model from random features -# random_features = utils.random_training_set( -# (20, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# enroll_features = [ivec1.project(feature) for feature in random_features] -# model = ivec1.enroll(enroll_features) -# _compare( -# model, -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/ivector_model.hdf5"), -# ivec1.write_model, -# ivec1.read_model, -# ) - -# # compare model with probe -# probe = ivec1.read_feature( -# pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/ivector_projected.hdf5" -# ) -# ) -# reference_score = -0.00187151 -# assert ( -# abs(ivec1.score(model, probe) - reference_score) < 1e-5 -# ), "The scores differ: %3.8f, %3.8f" % (ivec1.score(model, probe), reference_score) -# # TODO: implement that -# assert ( -# abs(ivec1.score_for_multiple_probes(model, [probe, probe]) - reference_score) -# < 1e-5 -# ) - - -# def test_ivector_plda(): -# temp_file = bob.io.base.test_utils.temporary_filename() -# ivec1 = bob.bio.base.load_resource( -# "ivector-plda", "algorithm", preferred_package="bob.bio.gmm" -# ) -# ivec1.use_plda = True - -# # create smaller IVector object -# ivec2 = bob.bio.gmm.algorithm.IVector( -# number_of_gaussians=2, -# subspace_dimension_of_t=10, -# kmeans_training_iterations=1, -# tv_training_iterations=1, -# INIT_SEED=seed_value, -# use_plda=True, -# plda_dim_F=2, -# plda_dim_G=2, -# plda_training_iterations=2, -# ) - -# train_data = utils.random_training_set_by_id( -# (100, 45), count=5, minimum=-5.0, maximum=5.0 -# ) - -# # reference is the same as for GMM projection -# reference_file = pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/ivector2_projector.hdf5" -# ) -# try: -# # train the projector - -# ivec2.train_projector(train_data, temp_file) - -# assert os.path.exists(temp_file) - -# if regenerate_refs: -# shutil.copy(temp_file, reference_file) - -# # check projection matrix -# ivec1.load_projector(reference_file) -# ivec2.load_projector(temp_file) - -# assert ivec1.ubm.is_similar_to(ivec2.ubm) -# assert ivec1.tv.is_similar_to(ivec2.tv) -# assert ivec1.whitener.is_similar_to(ivec2.whitener) -# finally: -# if os.path.exists(temp_file): -# os.remove(temp_file) - -# # generate and project random feature -# feature = utils.random_array((20, 45), -5.0, 5.0, seed=84) -# projected = ivec1.project(feature) -# _compare( -# projected, -# pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/ivector2_projected.hdf5" -# ), -# ivec1.write_feature, -# ivec1.read_feature, -# ) - -# # enroll model from random features -# random_features = utils.random_training_set( -# (20, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# enroll_features = [ivec1.project(feature) for feature in random_features] - -# model = ivec1.enroll(enroll_features) -# _compare( -# model, -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/ivector2_model.hdf5"), -# ivec1.write_model, -# ivec1.read_model, -# ) - -# # compare model with probe -# probe = ivec1.read_feature( -# pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/ivector2_projected.hdf5" -# ) -# ) -# logger.info("%f" % ivec1.score(model, probe)) -# reference_score = 1.21879822 -# assert ( -# abs(ivec1.score(model, probe) - reference_score) < 1e-5 -# ), "The scores differ: %3.8f, %3.8f" % (ivec1.score(model, probe), reference_score) -# assert ( -# abs(ivec1.score_for_multiple_probes(model, [probe, probe]) - reference_score) -# < 1e-5 -# ) - - -# def test_ivector_lda_wccn_plda(): -# temp_file = bob.io.base.test_utils.temporary_filename() -# ivec1 = bob.bio.base.load_resource( -# "ivector-lda-wccn-plda", "algorithm", preferred_package="bob.bio.gmm" -# ) -# ivec1.use_lda = True -# ivec1.use_wccn = True -# ivec1.use_plda = True -# # create smaller IVector object -# ivec2 = bob.bio.gmm.algorithm.IVector( -# number_of_gaussians=2, -# subspace_dimension_of_t=10, -# kmeans_training_iterations=1, -# tv_training_iterations=1, -# INIT_SEED=seed_value, -# use_lda=True, -# lda_dim=3, -# use_wccn=True, -# use_plda=True, -# plda_dim_F=2, -# plda_dim_G=2, -# plda_training_iterations=2, -# ) - -# train_data = utils.random_training_set_by_id( -# (100, 45), count=5, minimum=-5.0, maximum=5.0 -# ) - -# # reference is the same as for GMM projection -# reference_file = pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/ivector3_projector.hdf5" -# ) -# try: -# # train the projector - -# ivec2.train_projector(train_data, temp_file) - -# assert os.path.exists(temp_file) - -# if regenerate_refs: -# shutil.copy(temp_file, reference_file) - -# # check projection matrix -# ivec1.load_projector(reference_file) -# ivec2.load_projector(temp_file) - -# assert ivec1.ubm.is_similar_to(ivec2.ubm) -# assert ivec1.tv.is_similar_to(ivec2.tv) -# assert ivec1.whitener.is_similar_to(ivec2.whitener) -# finally: -# if os.path.exists(temp_file): -# os.remove(temp_file) - -# # generate and project random feature -# feature = utils.random_array((20, 45), -5.0, 5.0, seed=84) -# projected = ivec1.project(feature) -# _compare( -# projected, -# pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/ivector3_projected.hdf5" -# ), -# ivec1.write_feature, -# ivec1.read_feature, -# ) - -# # enroll model from random features -# random_features = utils.random_training_set( -# (20, 45), count=5, minimum=-5.0, maximum=5.0 -# ) -# enroll_features = [ivec1.project(feature) for feature in random_features] -# model = ivec1.enroll(enroll_features) -# _compare( -# model, -# pkg_resources.resource_filename("bob.bio.gmm.test", "data/ivector3_model.hdf5"), -# ivec1.write_model, -# ivec1.read_model, -# ) - -# # compare model with probe -# probe = ivec1.read_feature( -# pkg_resources.resource_filename( -# "bob.bio.gmm.test", "data/ivector3_projected.hdf5" -# ) -# ) -# reference_score = 0.2954148598 -# assert ( -# abs(ivec1.score(model, probe) - reference_score) < 1e-5 -# ), "The scores differ: %3.8f, %3.8f" % (ivec1.score(model, probe), reference_score) -# assert ( -# abs(ivec1.score_for_multiple_probes(model, [probe, probe]) - reference_score) -# < 1e-5 -# ) diff --git a/bob/bio/gmm/test/test_gmm.py b/bob/bio/gmm/test/test_gmm.py new file mode 100644 index 0000000..e60ec31 --- /dev/null +++ b/bob/bio/gmm/test/test_gmm.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : +# @author: Manuel Guenther <Manuel.Guenther@idiap.ch> +# @date: Thu May 24 10:41:42 CEST 2012 +# +# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import logging +import os +import tempfile + +import pkg_resources + +import bob.bio.gmm + +from bob.bio.base.test import utils +from bob.bio.gmm.algorithm import GMM +from bob.learn.em.mixture import GMMMachine + +logger = logging.getLogger(__name__) + +regenerate_refs = False + +seed_value = 5489 + + +def test_class(): + """Tests the creation and initialization of the GMM class.""" + gmm1 = bob.bio.base.load_resource( + "gmm", "algorithm", preferred_package="bob.bio.gmm" + ) + assert isinstance(gmm1, GMM) + assert isinstance( + gmm1, bob.bio.base.pipelines.vanilla_biometrics.abstract_classes.BioAlgorithm + ) + assert gmm1.number_of_gaussians == 512 + + +def test_training(): + """Tests the generation of the UBM.""" + gmm1 = GMM( + number_of_gaussians=2, + kmeans_training_iterations=1, + ubm_training_iterations=1, + init_seed=seed_value, + ) + train_data = utils.random_training_set( + (100, 45), count=5, minimum=-5.0, maximum=5.0 + ) + reference_file = pkg_resources.resource_filename( + "bob.bio.gmm.test", "data/gmm_ubm.hdf5" + ) + + # Train the projector + gmm1.fit(train_data) + + with tempfile.NamedTemporaryFile(prefix="bob_", suffix="_model.hdf5") as fd: + temp_file = fd.name + gmm1.save_model(temp_file) + + assert os.path.exists(temp_file) + + if regenerate_refs: + gmm1.save_model(reference_file) + + gmm1.ubm = GMMMachine.from_hdf5(reference_file) + assert gmm1.ubm.is_similar_to(GMMMachine.from_hdf5(temp_file)) + + +def test_projector(): + """Tests the projector.""" + # Load the UBM + gmm1 = GMM(number_of_gaussians=2) + gmm1.ubm = GMMMachine.from_hdf5( + pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_ubm.hdf5") + ) + + # Generate and project random feature + feature = utils.random_array((20, 45), -5.0, 5.0, seed=84) + projected = gmm1.project(feature) + assert isinstance(projected, bob.learn.em.mixture.GMMStats) + + reference_path = pkg_resources.resource_filename( + "bob.bio.gmm.test", "data/gmm_projected.hdf5" + ) + + if regenerate_refs: + projected.save(reference_path) + + reference = gmm1.read_feature(reference_path) + assert projected.is_similar_to(reference) + + +def test_enroll(): + # Load the UBM + ubm = GMMMachine.from_hdf5( + pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_ubm.hdf5") + ) + # Create a GMM object with that UBM + gmm1 = GMM( + number_of_gaussians=2, enroll_update_means=True, enroll_update_variances=True + ) + gmm1.ubm = ubm + # Enroll the biometric reference from random features + enroll = utils.random_training_set((20, 45), 5, -5.0, 5.0, seed=21) + biometric_reference = gmm1.enroll(enroll) + assert not biometric_reference.is_similar_to(biometric_reference.ubm) + assert isinstance(biometric_reference, GMMMachine) + + reference_file = pkg_resources.resource_filename( + "bob.bio.gmm.test", "data/gmm_enrolled.hdf5" + ) + + if regenerate_refs: + biometric_reference.save(reference_file) + + gmm2 = GMMMachine.from_hdf5(reference_file, ubm=ubm) + assert biometric_reference.is_similar_to(gmm2) + + +def test_score(): + gmm1 = GMM(number_of_gaussians=2) + gmm1.ubm = GMMMachine.from_hdf5( + pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_ubm.hdf5") + ) + biometric_reference = GMMMachine.from_hdf5( + pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_enrolled.hdf5"), + ubm=gmm1.ubm, + ) + probe = gmm1.read_feature( + pkg_resources.resource_filename("bob.bio.gmm.test", "data/gmm_projected.hdf5") + ) + + reference_score = 0.045073 + assert ( + abs(gmm1.score(biometric_reference, probe) - reference_score) < 1e-5 + ), "The scores differ: %3.8f, %3.8f" % ( + gmm1.score(biometric_reference, probe), + reference_score, + ) + assert ( + abs( + gmm1.score_for_multiple_probes(biometric_reference, [probe, probe]) + - reference_score + ) + < 1e-5 + ) -- GitLab