Commit a63074a1 authored by Pavel KORSHUNOV's avatar Pavel KORSHUNOV

Merge branch 'tensorflow' into 'master'

Some refactoring and tensorflow support

See merge request !10
parents 4ae4e05d eb03b70e
Pipeline #16563 failed with stages
in 5 minutes and 6 seconds
......@@ -17,7 +17,7 @@ import logging
logger = logging.getLogger("bob.pad.voice")
class GmmAlgorithm(Algorithm):
class GMM(Algorithm):
"""Trains Logistical Regression classifier and projects testing dat on it."""
def __init__(self,
......@@ -156,8 +156,8 @@ class GmmAlgorithm(Algorithm):
real_features = numpy.vstack(training_features[0])
attack_features = numpy.vstack(training_features[1])
print ("GmmAlgorithm:train_projector(), real_features shape:", real_features.shape)
print ("GmmAlgorithm:train_projector(), attack_features shape:", attack_features.shape)
print ("GMM:train_projector(), real_features shape:", real_features.shape)
print ("GMM:train_projector(), attack_features shape:", attack_features.shape)
print ("Min real ", numpy.min(real_features))
print ("Max real ", numpy.max(real_features))
print ("Min attack ", numpy.min(attack_features))
......@@ -244,4 +244,4 @@ class GmmAlgorithm(Algorithm):
self.score(toscore)
algorithm = GmmAlgorithm()
algorithm = GMM()
......@@ -18,7 +18,7 @@ import logging
logger = logging.getLogger("bob.pad.voice")
class HistDistanceAlgorithm(Algorithm):
class HistDistance(Algorithm):
"""This class is used to test all the possible functions of the tool chain, but it does basically nothing."""
def __init__(self, chi_square=False, hist_intersection=True, probab_dist=False, normalize_features=True, **kwargs):
......@@ -53,10 +53,10 @@ class HistDistanceAlgorithm(Algorithm):
raise ValueError("Training projector: features should contain two lists: real and attack!")
# the format is specified in FileSelector.py:training_list() of bob.spoof.base
# print ("HistDistanceAlgorithm:train_projector(), training_features", type(training_features[0][0]))
# print ("HistDistance:train_projector(), training_features", type(training_features[0][0]))
if isinstance(training_features[0][0][0], numpy.ndarray):
print ("HistDistanceAlgorithm:train_projector(), features are set of arrays of length: ",
print ("HistDistance:train_projector(), features are set of arrays of length: ",
len(training_features[0][0][0]))
real_features = numpy.array([row for feat in training_features[0] for row in feat], dtype=numpy.float64)
attack_features = numpy.array([row for feat in training_features[1] for row in feat], dtype=numpy.float64)
......@@ -64,10 +64,10 @@ class HistDistanceAlgorithm(Algorithm):
real_features = numpy.array(training_features[0], dtype=numpy.float64)
attack_features = numpy.array(training_features[1], dtype=numpy.float64)
# # print ("HistDistanceAlgorithm:train_projector(), real_features", real_features)
# # print ("HistDistanceAlgorithm:train_projector(), attack_features", attack_features)
# print ("HistDistanceAlgorithm:train_projector(), real_features shape:", real_features.shape)
# print ("HistDistanceAlgorithm:train_projector(), attack_features shape:", attack_features.shape)
# # print ("HistDistance:train_projector(), real_features", real_features)
# # print ("HistDistance:train_projector(), attack_features", attack_features)
# print ("HistDistance:train_projector(), real_features shape:", real_features.shape)
# print ("HistDistance:train_projector(), attack_features shape:", attack_features.shape)
# # real_features[real_features<-1024] = -1024
# # attack_features[attack_features<-1024] = -1024
# print ("Min real ", numpy.min(real_features))
......@@ -135,9 +135,9 @@ class HistDistanceAlgorithm(Algorithm):
dist_real = bob.math.histogram_intersection(self.real_mean, feature)
dist_attack = bob.math.histogram_intersection(self.attack_mean, feature)
else:
raise ValueError("HistDistanceAlgorithm: please specify the metric for histogram distance")
raise ValueError("HistDistance: please specify the metric for histogram distance")
# print ("HistDistanceAlgorithm:project(), projection: ", projection)
# print ("HistDistance:project(), projection: ", projection)
return numpy.array([dist_real, dist_attack], dtype=numpy.float64)
# return self.machine(feature)
return numpy.zeros(2, dtype=numpy.float64)
......@@ -158,7 +158,7 @@ class HistDistanceAlgorithm(Algorithm):
The ``feature`` projected into Fisher space.
"""
print ("HistDistanceAlgorithm:project(), feature shape: ", feature.shape)
print ("HistDistance:project(), feature shape: ", feature.shape)
if len(feature) > 0:
if isinstance(feature[0], numpy.ndarray) or isinstance(feature[0], list):
......@@ -170,7 +170,7 @@ class HistDistanceAlgorithm(Algorithm):
def score(self, toscore):
"""Returns the evarage value of the probe"""
print("HistDistanceAlgorithm:score() the score: ", toscore)
print("HistDistance:score() the score: ", toscore)
# projection is already the score in this case
if self.probab_dist:
......@@ -190,12 +190,12 @@ class HistDistanceAlgorithm(Algorithm):
return [dist_real - dist_attack]
else:
raise ValueError("HistDistanceAlgorithm:scoring() please specify the metric for histogram distance")
raise ValueError("HistDistance:scoring() please specify the metric for histogram distance")
def score_for_multiple_projections(self, toscore):
print("HistDistanceAlgorithm:score_for_multiple_projections() the score: ", len(toscore))
print("HistDistance:score_for_multiple_projections() the score: ", len(toscore))
return numpy.array([self.score(score) for score in toscore], dtype=numpy.float64)
algorithm = HistDistanceAlgorithm()
algorithm = HistDistance()
......@@ -16,7 +16,7 @@ import logging
logger = logging.getLogger("bob.pad.voice")
class LogRegrAlgorithm(Algorithm):
class LogRegr(Algorithm):
"""Trains Logistical Regression classifier and projects testing dat on it."""
def __init__(self, use_PCA_training=False, normalize_features=False, **kwargs):
......@@ -69,8 +69,8 @@ class LogRegrAlgorithm(Algorithm):
[feat if self._check_feature(feat) else numpy.nan for feat in training_features[1]],
dtype=numpy.float64)
# print ("LogRegrAlgorithm:train_projector(), real_features shape:", real_features.shape)
# print ("LogRegrAlgorithm:train_projector(), attack_features shape:", attack_features.shape)
# print ("LogRegr:train_projector(), real_features shape:", real_features.shape)
# print ("LogRegr:train_projector(), attack_features shape:", attack_features.shape)
# print ("Min real ", numpy.min(real_features))
# print ("Max real ", numpy.max(real_features))
# print ("Min attack ", numpy.min(attack_features))
......@@ -130,8 +130,8 @@ class LogRegrAlgorithm(Algorithm):
self.machine.input_subtract = mean
self.machine.input_divide = std
# print ("LogRegrAlgorithm:train_projector(), machine shape: ", self.machine.shape)
# print ("LogRegrAlgorithm:train_projector(), machine weights: ", self.machine.weights)
# print ("LogRegr:train_projector(), machine shape: ", self.machine.shape)
# print ("LogRegr:train_projector(), machine weights: ", self.machine.weights)
hdf5file.cd('/')
hdf5file.create_group('LogRegProjector')
......@@ -203,4 +203,4 @@ class LogRegrAlgorithm(Algorithm):
return toscore
algorithm = LogRegrAlgorithm()
algorithm = LogRegr()
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Pavel Korshunov <pavel.korshunov@idiap.ch>
# @date: Wed 19 Oct 23:43:22 2016
from bob.pad.base.algorithm import Algorithm
import numpy
# import tensorflow as tf
import os
import logging
logger = logging.getLogger("bob.pad.voice")
class TensorflowEval(Algorithm):
"""This class is for evaluating data stored in tensorflow tfrecord format using a pre-trained LSTM model."""
def __init__(self,
architecture_name="mlp",
input_shape=[200, 81], # [temporal_length, feature_size]
network_size=60, # the output size of LSTM cell
normalization_file=None, # file with normalization parameters from train set
**kwargs):
"""Generates a test value that is read and written"""
# call base class constructor registering that this tool performs everything.
Algorithm.__init__(
self,
performs_projection=True,
requires_projector_training=False,
**kwargs
)
self.architecture_name = architecture_name
self.input_shape = input_shape
self.num_time_steps = input_shape[0]
self.network_size = network_size
self.data_std = None
# import ipdb
# ipdb.set_trace()
features_length = input_shape[1]
if normalization_file and os.path.exists(normalization_file):
logger.info("Loading normalization file '%s' " % normalization_file)
npzfile = numpy.load(normalization_file)
self.data_mean = npzfile['data_mean']
self.data_std = numpy.array(npzfile['data_std'])
if not self.data_std.shape: # if std was saved as scalar
self.data_std = numpy.ones(features_length)
# if self.data_mean.shape[0] > input_shape[0]:
# self.data_mean = self.data_mean[:input_shape[0]]
# self.data_mean = numpy.reshape(self.data_mean, input_shape)
# if self.data_std.shape[0] > input_shape[0]:
# self.data_std = self.data_std[:input_shape[0]]
# self.data_std = numpy.reshape(self.data_std, input_shape)
else:
logger.warn("Normalization file '%s' does not exist!" % normalization_file)
self.data_mean = 0
self.data_std = 1
self.data_reader = None
self.session = None
self.dnn_model = None
self.data_placeholder = None
# def simple_lstm_network(self, train_data_shuffler, batch_size=10, lstm_cell_size=64,
# num_time_steps=28, num_classes=10, seed=10, reuse=False):
# import tensorflow as tf
# from bob.learn.tensorflow.layers import lstm
# slim = tf.contrib.slim
#
# if isinstance(train_data_shuffler, tf.Tensor):
# inputs = train_data_shuffler
# else:
# inputs = train_data_shuffler("data", from_queue=False)
#
# initializer = tf.contrib.layers.xavier_initializer(seed=seed)
#
# # Creating an LSTM network
# graph = lstm(inputs, lstm_cell_size, num_time_steps=num_time_steps, batch_size=batch_size,
# output_activation_size=num_classes, scope='lstm', name='sync_cell',
# weights_initializer=initializer, activation=tf.nn.sigmoid, reuse=reuse)
#
# # fully connect the LSTM output to the classes
# graph = slim.fully_connected(graph, num_classes, activation_fn=None, scope='fc1',
# weights_initializer=initializer, reuse=reuse)
#
# return graph
def normalize_data(self, features):
mean = numpy.mean(features, axis=0)
std = numpy.std(features, axis=0)
return numpy.divide(features - mean, std)
def _check_feature(self, feature):
"""Checks that the features are appropriate."""
if not isinstance(feature, numpy.ndarray) or feature.ndim != 2 or feature.dtype != numpy.float32:
raise ValueError("The given feature is not appropriate", feature)
return True
def restore_trained_model(self, projector_file):
import tensorflow as tf
if self.session is None:
self.session = tf.Session()
# add extra dimension to the input, so that 2D convolution would work
data_pl = tf.placeholder(tf.float32, shape=(None,) + tuple(self.input_shape) + (1,), name="data")
# create an empty graph of the correct architecture but with needed batch_size==1
if self.architecture_name == 'lstm':
from bob.learn.tensorflow.network import simple_lstm_network
graph = simple_lstm_network(data_pl, batch_size=1,
lstm_cell_size=self.network_size, num_time_steps=self.num_time_steps,
num_classes=2, reuse=False)
elif self.architecture_name == 'mlp':
from bob.learn.tensorflow.network import mlp_network
graph = mlp_network(data_pl,
hidden_layer_size=self.network_size,
num_time_steps=self.num_time_steps,
num_classes=2, reuse=False)
elif self.architecture_name == 'simplecnn':
from bob.learn.tensorflow.network import simple2Dcnn_network
graph = simple2Dcnn_network(data_pl,
num_classes=2, reuse=False)
elif self.architecture_name == 'lightcnn':
from bob.learn.tensorflow.network import LightCNN9
network = LightCNN9(n_classes=2, device="/cpu:0")
graph = network(data_pl, reuse=False)
else:
return None
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
# saver = tf.train.import_meta_graph(projector_file + ".meta", clear_devices=True)
saver.restore(self.session, projector_file)
return tf.nn.softmax(graph, name="softmax"), data_pl
# return graph, data_pl
def load_projector(self, projector_file):
logger.info("Loading pretrained model from {0}".format(projector_file))
self.dnn_model, self.data_placeholder = self.restore_trained_model(projector_file)
def project_feature(self, feature):
logger.info(" .... Projecting %d features vector" % feature.shape[0])
from bob.learn.tensorflow.datashuffler import DiskAudio
if not self.data_reader:
self.data_reader = DiskAudio([0], [0], [1] + self.input_shape)
# normalize the feature using pre-loaded normalization parameters
if self.data_std is not None and self.data_std.all() > 0:
feature = numpy.divide(feature - self.data_mean, self.data_std)
# split the feature in the sliding window frames
frames, _ = self.data_reader.split_features_in_windows(features=feature, label=1,
win_size=self.num_time_steps,
sliding_step=1)
# logger.info(" .... And frames of shape {0} are extracted to pass into DNN model".format(frames.shape))
if frames is None:
return None
logger.info(" .... And frames of shape {0} are extracted to pass into DNN model".format(frames.shape))
projections = numpy.zeros((len(frames), 2), dtype=numpy.float32)
for i in range(frames.shape[0]):
frame = frames[i]
# reshape to 4D shape, so that all networks, including CNN-based
# would work propery
frame = numpy.reshape(frame, [1] + self.input_shape + [1])
#logger.info(" .... projecting frame of shape {0} onto DNN model".format(frame.shape))
if self.session is not None:
forward_output = self.session.run(self.dnn_model, feed_dict={self.data_placeholder: frame})
projections[i] = forward_output[0]
else:
raise ValueError("Tensorflow session was not initialized, so cannot project on DNN model!")
logger.info("Projected scores {0}".format(projections))
return numpy.asarray(projections, dtype=numpy.float32)
def project(self, feature):
"""project(feature) -> projected
This function will project the given feature.
It is assured that the :py:meth:`load_projector` was called once before the ``project`` function is executed.
**Parameters:**
feature : object
The feature to be projected.
**Returns:**
projected : object
The projected features.
Must be writable with the :py:meth:`write_feature` function and readable with the :py:meth:`read_feature` function.
"""
if len(feature) > 0:
# if we have a set of independent blocks to process
# collect all projections and flatten them in one output array
if isinstance(feature, list):
projections = []
for feat in feature:
feat = numpy.cast['float32'](feat)
self._check_feature(feat)
projection = self.project_feature(feat)
if projection is not None:
projections.extend(projection)
if len(projections) == 0:
return None
return numpy.asarray(projections, dtype=numpy.float32)
else:
feature = numpy.cast['float32'](feature)
self._check_feature(feature)
return self.project_feature(feature)
else:
return numpy.zeros(1, dtype=numpy.float64)
def score_for_multiple_projections(self, toscore):
"""scorescore_for_multiple_projections(toscore) -> score
**Returns:**
score : float
A score value for the object ``toscore``.
"""
scores = numpy.asarray(toscore, dtype=numpy.float32)
real_scores = scores[:, 1]
logger.debug("Mean score %f", numpy.mean(real_scores))
return [numpy.mean(real_scores)]
def score(self, toscore):
"""Returns the evarage value of the probe"""
logger.debug("score() score %f", toscore)
# return only real score
return [toscore[0]]
algorithm = TensorflowEval()
from .gmm_algorithm import GmmAlgorithm
from .logregr_algorithm import LogRegrAlgorithm
from .tensorflow_algorithm import TensorflowAlgo
from .GMM import GMM
from .LogRegr import LogRegr
from .TensorflowEval import TensorflowEval
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
......@@ -17,8 +17,8 @@ def __appropriate__(*args):
for obj in args: obj.__module__ = __name__
__appropriate__(
GmmAlgorithm,
LogRegrAlgorithm,
TensorflowAlgo,
GMM,
LogRegr,
TensorflowEval,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Pavel Korshunov <pavel.korshunov@idiap.ch>
# @date: Wed 19 Oct 23:43:22 2016
from bob.pad.base.algorithm import Algorithm
import logging
logger = logging.getLogger("bob.pad.voice")
class DummyAlgorithm(Algorithm):
"""This class is used to test all the possible functions of the tool chain,
but it does basically nothing."""
def __init__(self, **kwargs):
"""Generates a test value that is read and written"""
# call base class constructor registering that this tool performs everything.
Algorithm.__init__(
self,
performs_projection=False,
requires_projector_training=False,
**kwargs
)
def score(self, toscore):
"""Returns the evarage value of the probe"""
logger.info("score() score %f", toscore)
return toscore
algorithm = DummyAlgorithm()
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Pavel Korshunov <pavel.korshunov@idiap.ch>
# @date: Wed 19 Oct 23:43:22 2016
from bob.pad.base.algorithm import Algorithm
import logging
logger = logging.getLogger("bob.pad.voice")
class DummyAlgorithm (Algorithm):
"""This class is used to test all the possible functions of the tool chain, but it does basically nothing."""
def __init__(self, **kwargs):
"""Generates a test value that is read and written"""
# call base class constructor registering that this tool performs everything.
Algorithm.__init__(
self,
performs_projection = False,
requires_projector_training = False,
)
def score(self, toscore):
"""Returns the evarage value of the probe"""
logger.info("score() score %f", toscore)
return toscore
algorithm = DummyAlgorithm()
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Pavel Korshunov <pavel.korshunov@idiap.ch>
# @date: Wed 19 Oct 23:43:22 2016
from bob.pad.base.algorithm import Algorithm
import numpy
import bob.io.base
# import tensorflow as tf
import logging
logger = logging.getLogger("bob.pad.voice")
class TensorflowAlgo(Algorithm):
"""This class is used to test all the possible functions of the tool chain, but it does basically nothing."""
def __init__(self,
**kwargs):
"""Generates a test value that is read and written"""
# call base class constructor registering that this tool performs everything.
Algorithm.__init__(
self,
performs_projection=True,
requires_projector_training=False,
)
self.data_reader = None
# self.session = tf.Session()
self.dnn_model = None
# def __del__(self):
# self.session.close()
def _check_feature(self, feature):
"""Checks that the features are appropriate."""
if not isinstance(feature, numpy.ndarray) or feature.ndim != 1 or feature.dtype != numpy.float32:
raise ValueError("The given feature is not appropriate", feature)
return True
def load_projector(self, projector_file):
logger.info("Loading pretrained model from {0}".format(projector_file))
from bob.learn.tensorflow.network.SequenceNetwork import SequenceNetwork
self.dnn_model = SequenceNetwork()
# self.dnn_model.load_hdf5(bob.io.base.HDF5File(projector_file), shape=[1, 6560, 1])
self.dnn_model.load(projector_file, True)
def project_feature(self, feature):
logger.debug(" .... Projecting %d features vector" % feature.shape[0])
from bob.learn.tensorflow.datashuffler import DiskAudio
if not self.data_reader:
self.data_reader = DiskAudio([0], [0])
frames, labels = self.data_reader.extract_frames_from_wav(feature, 0)
frames = numpy.asarray(frames)
logger.debug(" .... And %d frames are extracted to pass into DNN model" % frames.shape[0])
frames = numpy.reshape(frames, (frames.shape[0], -1, 1))
forward_output = self.dnn_model(frames)
# return tf.nn.log_softmax(tf.nn.log_softmax(forward_output)).eval(session=self.session)
return forward_output
def project(self, feature):
"""project(feature) -> projected
This function will project the given feature.
It is assured that the :py:meth:`load_projector` was called once before the ``project`` function is executed.
**Parameters:**
feature : object
The feature to be projected.
**Returns:**
projected : object
The projected features.
Must be writable with the :py:meth:`write_feature` function and readable with the :py:meth:`read_feature` function.
"""
if len(feature) > 0:
feature = numpy.cast['float32'](feature)
self._check_feature(feature)
return self.project_feature(feature)
else:
return numpy.zeros(1, dtype=numpy.float64)
def score_for_multiple_projections(self, toscore):
"""scorescore_for_multiple_projections(toscore) -> score
**Returns:**
score : float
A score value for the object ``toscore``.
"""
scores = numpy.asarray(toscore, dtype=numpy.float32)
real_scores = scores[:, 0]
logger.debug("Mean score %f", numpy.mean(real_scores))
return [numpy.mean(real_scores)]
def score(self, toscore):
"""Returns the evarage value of the probe"""
logger.debug("score() score %f", toscore)
# return only real score
return [toscore[0]]
algorithm = TensorflowAlgo()
import bob.pad.voice
algorithm = bob.pad.voice.algorithm.LogRegrAlgorithm(
algorithm = bob.pad.voice.algorithm.LogRegr(
# use PCA to reduce dimension of features
use_PCA_training = False,
normalize_features = True,
......
import bob.pad.voice
algorithm = bob.pad.voice.algorithm.LogRegrAlgorithm(
algorithm = bob.pad.voice.algorithm.LogRegr(
# use PCA to reduce dimension of features
use_PCA_training = True,
)
......
import bob.pad.voice
algorithm = bob.pad.voice.algorithm.GmmAlgorithm(
algorithm = bob.pad.voice.algorithm.GMM(
number_of_gaussians = 512,
kmeans_training_iterations = 10, # Maximum number of iterations for K-Means
gmm_training_iterations = 10, # Maximum number of iterations for ML GMM Training
......
......@@ -90,7 +90,7 @@ def test_spoof_EnergyLBP2():
'-d', 'bob.pad.voice.test.dummy.database.DummyDatabaseSpeechSpoof()',
'-p', 'bob.bio.spear.preprocessor.Mod_4Hz()',
'-e', 'bob.pad.voice.extractor.LBPHistograms(features_processor=bob.pad.voice.extractor.SpectrogramExtended())',
'-a', 'bob.pad.voice.algorithm.LogRegrAlgorithm()',
'-a', 'bob.pad.voice.algorithm.LogRegr()',
'-vs', 'test_energylbp',
'--temp-directory', test_dir,
'--result-directory', test_dir
......
......@@ -6,13 +6,21 @@
parts = scripts
eggs = bob.pad.voice
bob.db.base
bob.learn.tensorflow
bob.bio.base
bob.bio.spear
bob.bio.gmm
bob.pad.base
bob.db.base
bob.measure
bob.db.asvspoof
bob.db.asvspoof2017
bob.db.avspoof
bob.db.voicepa
bob.extension
bob.pad.db
bob.bio.db
bob.db.cpqd_replay
gridtk
extensions = bob.buildout
......@@ -20,27 +28,41 @@ extensions = bob.buildout
auto-checkout = *
develop = src/bob.bio.spear
src/bob.db.base
src/bob.bio.gmm
src/bob.db.asvspoof2017
src/bob.db.asvspoof
src/bob.db.avspoof
src/bob.bio.base
src/bob.db.voicepa
src/bob.db.cpqd_replay
src/bob.pad.base
src/bob.bio.base
src/bob.db.base
src/bob.extension
src/bob.bio.db
src/bob.pad.db
bob.learn.tensorflow
/remote/idiap.svm/home.active/pkorshunov/src/bob.learn.tensorflow .
.
; options for bob.buildout
debug = true
debug = false
verbose = true
newest = false
[sources]
bob.db.base = git git@gitlab.idiap.ch:bob/bob.db.base.git
bob.bio.gmm = git git@gitlab.idiap.ch:bob/bob.bio.gmm.git
bob.db.asvspoof2017 = git git@gitlab.idiap.ch:bob/bob.db.asvspoof2017.git
bob.db.avspoof = git git@gitlab.idiap.ch:bob/bob.db.avspoof.git
bob.db.asvspoof = git git@gitlab.idiap.ch:bob/bob.db.asvspoof.git
bob.db.base = git git@gitlab.idiap.ch:bob/bob.db.base.git
bob.bio.base = git git@gitlab.idiap.ch:bob/bob.bio.base.git
bob.pad.base = git git@gitlab.idiap.ch:bob/bob.pad.base.git
bob.db.voicepa = git git@gitlab.idiap.ch:bob/bob.db.voicepa.git
bob.extension = git git@gitlab.idiap.ch:bob/bob.extension.git
bob.bio.db = git git@gitlab.idiap.ch:bob/bob.bio.db.git
bob.pad.db = git git@gitlab.idiap.ch:bob/bob.pad.db.git
bob.bio.spear = git git@gitlab.idiap.ch:bob/bob.bio.spear.git
bob.db.cpqd_replay = git git@gitlab.idiap.ch:bob/bob.db.cpqd_replay.git
bob.learn.tensorflow = fs bob.learn.tensorflow full-path=/remote/idiap.svm/home.active/pkorshunov/src/bob.learn.tensorflow
[scripts]
recipe = bob.buildout:scripts
......
......@@ -116,15 +116,15 @@ setup(
],
'bob.pad.algorithm': [
'tensorflow = bob.pad.voice.algorithm.tensorflow_algorithm:algorithm',
'dummy-algo = bob.pad.voice.algorithm.dummy_algorithm:algorithm',
'tensorflow = bob.pad.voice.algorithm.TensorflowEval:algorithm',
'dummy-algo = bob.pad.voice.algorithm.dummy:algorithm',