Skip to content
Snippets Groups Projects
Commit 10bec60c authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

pre-commit required changes

parent d62e376e
No related branches found
No related tags found
1 merge request!13Several changes
Pipeline #58625 failed
Showing
with 122 additions and 103 deletions
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
from . import algorithm
from . import preprocessor
from . import tools
from . import config
from . import script
from . import algorithm, config, preprocessor, script, tools # noqa: F401
def get_config():
"""Returns a string containing the configuration information.
"""
"""Returns a string containing the configuration information."""
import bob.extension
return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
__all__ = [_ for _ in dir() if not _.startswith("_")]
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import, division
import logging
import numpy
from .Algorithm import Algorithm
import logging
logger = logging.getLogger(__name__)
......@@ -18,14 +17,14 @@ class AND(Algorithm):
def __init__(self, thresholds=None, **kwargs):
super(AND, self).__init__(classifier=self, **kwargs)
self.thresholds = thresholds
self.str['thresholds'] = thresholds
self.str["thresholds"] = thresholds
def fit(self, X, y):
pass
def decision_function(self, scores):
if self.thresholds is None:
ValueError('No threshold was specified.')
ValueError("No threshold was specified.")
for i, th in enumerate(self.thresholds):
mask = scores[:, i + 1] < th
......@@ -33,5 +32,5 @@ class AND(Algorithm):
mask = numpy.sum(numpy.isnan(scores[:, 1:]), axis=1, dtype=bool)
new_scores = numpy.array(scores[0])
new_scores[mask] = numpy.finfo('float16').min
new_scores[mask] = numpy.finfo("float16").min
return new_scores
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import, division
import numpy as np
import logging
import pickle
import logging
import numpy as np
logger = logging.getLogger(__name__)
......@@ -22,11 +21,7 @@ class Algorithm(object):
A dictionary that its content will printed in the __str__ method.
"""
def __init__(self,
preprocessors=None,
classifier=None,
**kwargs
):
def __init__(self, preprocessors=None, classifier=None, **kwargs):
"""
Parameters
----------
......@@ -44,9 +39,9 @@ class Algorithm(object):
super(Algorithm, self).__init__(**kwargs)
self.classifier = classifier
self.preprocessors = preprocessors
self.str = {'preprocessors': preprocessors}
self.str = {"preprocessors": preprocessors}
if classifier is not self:
self.str['classifier'] = classifier
self.str["classifier"] = classifier
def train_preprocessors(self, X, y=None):
"""Train preprocessors in order.
......@@ -81,7 +76,7 @@ class Algorithm(object):
"""
train_scores = np.vstack((train_neg, train_pos))
neg_len = train_neg.shape[0]
y = np.zeros((train_scores.shape[0],), dtype='bool')
y = np.zeros((train_scores.shape[0],), dtype="bool")
y[neg_len:] = True
self.classifier.fit(train_scores, y)
......@@ -107,9 +102,16 @@ class Algorithm(object):
A string containing the full information of all parameters of this
(and the derived) class.
"""
return "%s(%s)" % (str(self.__class__), ", ".join(
["%s=%s" % (key, value) for key, value in
self.str.items() if value is not None]))
return "%s(%s)" % (
str(self.__class__),
", ".join(
[
"%s=%s" % (key, value)
for key, value in self.str.items()
if value is not None
]
),
)
def save(self, model_file):
"""Save the instance of the algorithm.
......@@ -140,6 +142,6 @@ class Algorithm(object):
with open(model_file, "rb") as f:
algo_class = pickle.load(f)
algo = algo_class()
if not hasattr(algo, 'custom_save'):
if not hasattr(algo, "custom_save"):
return pickle.load(f)
return algo.load(model_file)
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import, division
import logging
import pickle
from .Algorithm import Algorithm
from h5py import File as HDF5File
import logging
from .Algorithm import Algorithm
logger = logging.getLogger(__name__)
......@@ -16,13 +16,13 @@ class AlgorithmBob(Algorithm):
"""A class to be used in score fusion using bob machines."""
def _get_hdf5_file(self, model_file):
return model_file[:-3] + 'hdf5'
return model_file[:-3] + "hdf5"
def custom_save(self, model_file):
# dump preprocessors in a pickle file because
# we don't know how they look like
# saves the class to create it later.
with open(model_file, 'wb') as f:
with open(model_file, "wb") as f:
pickle.dump(type(self), f)
pickle.dump(self.preprocessors, f)
# just for consistent string representation
......
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import, division
from .Algorithm import Algorithm
import logging
from .Algorithm import Algorithm
logger = logging.getLogger(__name__)
......@@ -15,9 +15,7 @@ class Empty(Algorithm):
preprocessors."""
def __init__(self, **kwargs):
super(Empty, self).__init__(
classifier=self,
**kwargs)
super(Empty, self).__init__(classifier=self, **kwargs)
def fit(self, X, y):
pass
......
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import, division
import logging
import numpy as np
from bob.learn.em.mixture import GMMMachine
from .AlgorithmBob import AlgorithmBob
import logging
from .AlgorithmBob import AlgorithmBob
logger = logging.getLogger("bob.fusion.base")
......@@ -64,12 +64,10 @@ class GMM(AlgorithmBob):
if self.n_gaussians is None:
self.n_gaussians = array.shape[1] + 1
logger.warning(
"Number of Gaussians was None. " "Using {}.".format(self.n_gaussians)
"Number of Gaussians was None. "
"Using {}.".format(self.n_gaussians)
)
# Computes input size
input_size = array.shape[1]
# Creates the machines (KMeans and GMM)
logger.debug("Training GMM machine")
self.machine = GMMMachine(
......@@ -84,4 +82,8 @@ class GMM(AlgorithmBob):
self.machine.fit(array)
def decision_function(self, scores):
return np.fromiter((self.machine.log_likelihood(s) for s in scores), np.float, scores.shape[0])
return np.fromiter(
(self.machine.log_likelihood(s) for s in scores),
np.float,
scores.shape[0],
)
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import, division
import logging
import numpy
from .Algorithm import Algorithm
import logging
logger = logging.getLogger(__name__)
......@@ -16,11 +15,9 @@ class Weighted_Sum(Algorithm):
"""weighted sum (default: mean)"""
def __init__(self, weights=None, **kwargs):
super(Weighted_Sum, self).__init__(
classifier=self,
**kwargs)
super(Weighted_Sum, self).__init__(classifier=self, **kwargs)
self.weights = weights
self.str['weights'] = weights
self.str["weights"] = weights
def fit(self, X, y):
pass
......
from .Algorithm import Algorithm
from .AlgorithmBob import AlgorithmBob
from .Weighted_Sum import Weighted_Sum
from .GMM import GMM
from .Empty import Empty
from .GMM import GMM
from .Weighted_Sum import Weighted_Sum
# gets sphinx autodoc done right - don't remove it
......@@ -28,4 +28,4 @@ __appropriate__(
GMM,
Empty,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
__all__ = [_ for _ in dir() if not _.startswith("_")]
#!/usr/bin/env python
import bob.fusion.base
from sklearn.preprocessing import StandardScaler
import bob.fusion.base
algorithm = bob.fusion.base.algorithm.GMM(preprocessors=[StandardScaler()])
algorithm_tanh = bob.fusion.base.algorithm.GMM(
preprocessors=[bob.fusion.base.preprocessor.Tanh()])
preprocessors=[bob.fusion.base.preprocessor.Tanh()]
)
#!/usr/bin/env python
import bob.fusion.base
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
import bob.fusion.base
algorithm = bob.fusion.base.algorithm.Algorithm(
preprocessors=[StandardScaler()],
classifier=LogisticRegression())
preprocessors=[StandardScaler()], classifier=LogisticRegression()
)
algorithm_tanh = bob.fusion.base.algorithm.Algorithm(
preprocessors=[bob.fusion.base.preprocessor.Tanh()],
classifier=LogisticRegression())
classifier=LogisticRegression(),
)
#!/usr/bin/env python
import bob.fusion.base
from sklearn.preprocessing import StandardScaler
import bob.fusion.base
algorithm = bob.fusion.base.algorithm.Weighted_Sum(
preprocessors=[StandardScaler()])
preprocessors=[StandardScaler()]
)
algorithm_tanh = bob.fusion.base.algorithm.Weighted_Sum(
preprocessors=[bob.fusion.base.preprocessor.Tanh()])
preprocessors=[bob.fusion.base.preprocessor.Tanh()]
)
#!/usr/bin/env python
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
import bob.fusion.base
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
algorithm = bob.fusion.base.algorithm.LLR(
preprocessors=[StandardScaler(), PolynomialFeatures(degree=2)])
preprocessors=[StandardScaler(), PolynomialFeatures(degree=2)]
)
algorithm_tanh = bob.fusion.base.algorithm.LLR(
preprocessors=[bob.fusion.base.preprocessor.Tanh(),
PolynomialFeatures(degree=2)])
preprocessors=[
bob.fusion.base.preprocessor.Tanh(),
PolynomialFeatures(degree=2),
]
)
#!/usr/bin/env python
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
import bob.fusion.base
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
algorithm = bob.fusion.base.algorithm.LLR(
preprocessors=[StandardScaler(), PolynomialFeatures(degree=3)])
preprocessors=[StandardScaler(), PolynomialFeatures(degree=3)]
)
algorithm_tanh = bob.fusion.base.algorithm.LLR(
preprocessors=[bob.fusion.base.preprocessor.Tanh(),
PolynomialFeatures(degree=3)])
preprocessors=[
bob.fusion.base.preprocessor.Tanh(),
PolynomialFeatures(degree=3),
]
)
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.preprocessing import StandardScaler
# to fix the sphinx docs
StandardScaler.__module__ = 'sklearn.preprocessing'
StandardScaler.__module__ = "sklearn.preprocessing"
class Tanh(StandardScaler):
......@@ -22,7 +23,8 @@ class Tanh(StandardScaler):
def __init__(self, copy=True, **kwargs):
"""Initialize self. See help(type(self)) for accurate signature."""
super(Tanh, self).__init__(
copy=copy, with_mean=True, with_std=True, **kwargs)
copy=copy, with_mean=True, with_std=True, **kwargs
)
def fit(self, X, y=None):
"""Estimates the mean and standard deviation of samples.
......
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.preprocessing import StandardScaler
# to fix the sphinx docs
StandardScaler.__module__ = 'sklearn.preprocessing'
StandardScaler.__module__ = "sklearn.preprocessing"
class ZNorm(StandardScaler):
......@@ -16,7 +17,8 @@ class ZNorm(StandardScaler):
def __init__(self, copy=True, **kwargs):
"""Initialize self. See help(type(self)) for accurate signature."""
super(ZNorm, self).__init__(
copy=copy, with_mean=True, with_std=True, **kwargs)
copy=copy, with_mean=True, with_std=True, **kwargs
)
def fit(self, X, y=None):
"""Estimates the mean and standard deviation of samples.
......
......@@ -22,4 +22,4 @@ __appropriate__(
Tanh,
ZNorm,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
__all__ = [_ for _ in dir() if not _.startswith("_")]
from .fuse import routine_fusion
from .fuse import routine_fusion # noqa: F401
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
__all__ = [_ for _ in dir() if not _.startswith("_")]
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment