Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.learn.tensorflow
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
11
Issues
11
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.learn.tensorflow
Commits
be59f56f
Commit
be59f56f
authored
Oct 12, 2016
by
Tiago de Freitas Pereira
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Organizing the trainers
parent
9da6ef05
Changes
13
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
384 additions
and
291 deletions
+384
-291
bob/learn/tensorflow/analyzers/ExperimentAnalizer.py
bob/learn/tensorflow/analyzers/ExperimentAnalizer.py
+12
-7
bob/learn/tensorflow/analyzers/SoftmaxAnalizer.py
bob/learn/tensorflow/analyzers/SoftmaxAnalizer.py
+4
-50
bob/learn/tensorflow/data/BaseDataShuffler.py
bob/learn/tensorflow/data/BaseDataShuffler.py
+33
-2
bob/learn/tensorflow/loss/ContrastiveLoss.py
bob/learn/tensorflow/loss/ContrastiveLoss.py
+0
-1
bob/learn/tensorflow/loss/TripletLoss.py
bob/learn/tensorflow/loss/TripletLoss.py
+1
-2
bob/learn/tensorflow/script/train_mnist.py
bob/learn/tensorflow/script/train_mnist.py
+6
-1
bob/learn/tensorflow/script/train_mnist_siamese.py
bob/learn/tensorflow/script/train_mnist_siamese.py
+2
-1
bob/learn/tensorflow/script/train_mnist_triplet.py
bob/learn/tensorflow/script/train_mnist_triplet.py
+17
-73
bob/learn/tensorflow/script/train_mobio.py
bob/learn/tensorflow/script/train_mobio.py
+99
-0
bob/learn/tensorflow/trainers/SiameseTrainer.py
bob/learn/tensorflow/trainers/SiameseTrainer.py
+26
-23
bob/learn/tensorflow/trainers/Trainer.py
bob/learn/tensorflow/trainers/Trainer.py
+6
-7
bob/learn/tensorflow/trainers/TripletTrainer.py
bob/learn/tensorflow/trainers/TripletTrainer.py
+177
-124
doc/extra-intersphinx.txt
doc/extra-intersphinx.txt
+1
-0
No files found.
bob/learn/tensorflow/analyzers/ExperimentAnalizer.py
View file @
be59f56f
...
...
@@ -23,7 +23,7 @@ class ExperimentAnalizer:
"""
def
__init__
(
self
,
data_shuffler
,
machine
,
session
,
convergence_threshold
=
0.01
,
convergence_reference
=
'eer'
):
def
__init__
(
self
,
convergence_threshold
=
0.01
,
convergence_reference
=
'eer'
):
"""
Use the CNN as feature extractor for a n-class classification
...
...
@@ -38,9 +38,9 @@ class ExperimentAnalizer:
"""
self
.
data_shuffler
=
data_shuffler
self
.
machine
=
machi
ne
self
.
session
=
session
self
.
data_shuffler
=
None
self
.
network
=
No
ne
self
.
session
=
None
# Statistics
self
.
eer
=
[]
...
...
@@ -48,16 +48,21 @@ class ExperimentAnalizer:
self
.
far100
=
[]
self
.
far1000
=
[]
def
__call__
(
self
):
def
__call__
(
self
,
data_shuffler
,
network
,
session
):
if
self
.
data_shuffler
is
None
:
self
.
data_shuffler
=
data_shuffler
self
.
network
=
network
self
.
session
=
session
# Extracting features for enrollment
enroll_data
,
enroll_labels
=
self
.
data_shuffler
.
get_batch
()
enroll_features
=
self
.
machine
(
enroll_data
,
session
=
self
.
session
)
enroll_features
=
self
.
network
(
enroll_data
,
session
=
self
.
session
)
del
enroll_data
# Extracting features for probing
probe_data
,
probe_labels
=
self
.
data_shuffler
.
get_batch
()
probe_features
=
self
.
machine
(
probe_data
,
session
=
self
.
session
)
probe_features
=
self
.
network
(
probe_data
,
session
=
self
.
session
)
del
probe_data
# Creating models
...
...
bob/learn/tensorflow/analyzers/SoftmaxAnalizer.py
View file @
be59f56f
...
...
@@ -31,66 +31,20 @@ class SoftmaxAnalizer(object):
"""
self
.
data_shuffler
=
None
self
.
trainer
=
None
self
.
network
=
None
self
.
session
=
None
def
__call__
(
self
,
data_shuffler
,
trainer
,
session
):
def
__call__
(
self
,
data_shuffler
,
network
,
session
):
if
self
.
data_shuffler
is
None
:
self
.
data_shuffler
=
data_shuffler
self
.
trainer
=
trainer
self
.
network
=
network
self
.
session
=
session
# Creating the graph
feature_batch
,
label_batch
=
self
.
data_shuffler
.
get_placeholders
(
name
=
"validation_accuracy"
)
data
,
labels
=
self
.
data_shuffler
.
get_batch
()
graph
=
self
.
trainer
.
architecture
.
compute_graph
(
feature_batch
)
predictions
=
numpy
.
argmax
(
self
.
session
.
run
(
graph
,
feed_dict
=
{
feature_batch
:
data
[:]}),
1
)
accuracy
=
100.
*
numpy
.
sum
(
predictions
==
labels
)
/
predictions
.
shape
[
0
]
summaries
=
[]
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"accuracy_validation"
,
simple_value
=
float
(
accuracy
)))
return
summary_pb2
.
Summary
(
value
=
summaries
)
class
SoftmaxSiameseAnalizer
(
object
):
"""
Analizer.
"""
def
__init__
(
self
):
"""
Softmax analizer
** Parameters **
data_shuffler:
graph:
session:
convergence_threshold:
convergence_reference: References to analize the convergence. Possible values are `eer`, `far10`, `far10`
"""
self
.
data_shuffler
=
None
self
.
trainer
=
None
self
.
session
=
None
def
__call__
(
self
,
data_shuffler
,
machine
,
session
):
if
self
.
data_shuffler
is
None
:
self
.
data_shuffler
=
data_shuffler
self
.
trainer
=
trainer
self
.
session
=
session
# Creating the graph
#feature_batch, label_batch = self.data_shuffler.get_placeholders(name="validation_accuracy")
feature_left_batch
,
feature_right_batch
label_batch
=
self
.
data_shuffler
.
get_placeholders_pair
(
name
=
"validation_accuracy"
)
batch_left
,
batch_right
,
labels
=
self
.
data_shuffler
.
get_batch
()
left
=
self
.
machine
.
compute_graph
(
feature_batch
)
graph
=
self
.
network
.
compute_graph
(
feature_batch
)
predictions
=
numpy
.
argmax
(
self
.
session
.
run
(
graph
,
feed_dict
=
{
feature_batch
:
data
[:]}),
1
)
accuracy
=
100.
*
numpy
.
sum
(
predictions
==
labels
)
/
predictions
.
shape
[
0
]
...
...
bob/learn/tensorflow/data/BaseDataShuffler.py
View file @
be59f56f
...
...
@@ -72,11 +72,26 @@ class BaseDataShuffler(object):
if
self
.
data2_placeholder
is
None
:
self
.
data2_placeholder
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
tuple
([
None
]
+
list
(
self
.
shape
[
1
:])),
name
=
name
)
if
self
.
label_placeholder
:
if
self
.
label_placeholder
is
None
:
self
.
label_placeholder
=
tf
.
placeholder
(
tf
.
int64
,
shape
=
[
None
,
])
return
self
.
data_placeholder
,
self
.
data2_placeholder
,
self
.
label_placeholder
def
get_placeholders_triplet_forprefetch
(
self
,
name
=
""
):
"""
Returns a place holder with the size of your batch
"""
if
self
.
data_placeholder
is
None
:
self
.
data_placeholder
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
tuple
([
None
]
+
list
(
self
.
shape
[
1
:])),
name
=
name
)
if
self
.
data2_placeholder
is
None
:
self
.
data2_placeholder
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
tuple
([
None
]
+
list
(
self
.
shape
[
1
:])),
name
=
name
)
if
self
.
data3_placeholder
is
None
:
self
.
data3_placeholder
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
tuple
([
None
]
+
list
(
self
.
shape
[
1
:])),
name
=
name
)
return
self
.
data_placeholder
,
self
.
data2_placeholder
,
self
.
data3_placeholder
def
get_placeholders
(
self
,
name
=
""
):
"""
Returns a place holder with the size of your batch
...
...
@@ -102,10 +117,26 @@ class BaseDataShuffler(object):
self
.
data2_placeholder
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
self
.
shape
,
name
=
name
+
"_left"
)
if
self
.
label_placeholder
is
None
:
self
.
label_placeholder
=
tf
.
placeholder
(
tf
.
int64
,
shape
=
self
.
shape
[
0
],
name
=
"
label"
)
self
.
label_placeholder
=
tf
.
placeholder
(
tf
.
int64
,
shape
=
self
.
shape
[
0
],
name
=
name
+
"_
label"
)
return
self
.
data_placeholder
,
self
.
data2_placeholder
,
self
.
label_placeholder
def
get_placeholders_triplet
(
self
,
name
=
""
):
"""
Returns a place holder with the size of your batch
"""
if
self
.
data_placeholder
is
None
:
self
.
data_placeholder
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
self
.
shape
,
name
=
name
+
"_anchor"
)
if
self
.
data2_placeholder
is
None
:
self
.
data2_placeholder
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
self
.
shape
,
name
=
name
+
"_positive"
)
if
self
.
data3_placeholder
is
None
:
self
.
data3_placeholder
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
self
.
shape
,
name
=
name
+
"_negative"
)
return
self
.
data_placeholder
,
self
.
data2_placeholder
,
self
.
data3_placeholder
def
get_genuine_or_not
(
self
,
input_data
,
input_labels
,
genuine
=
True
):
if
genuine
:
...
...
bob/learn/tensorflow/loss/ContrastiveLoss.py
View file @
be59f56f
...
...
@@ -44,4 +44,3 @@ class ContrastiveLoss(BaseLoss):
loss
=
0.5
*
(
within_class
+
between_class
)
return
tf
.
reduce_mean
(
loss
),
tf
.
reduce_mean
(
between_class
),
tf
.
reduce_mean
(
within_class
)
#return loss, between_class, within_class, label, left_feature, right_feature, d
bob/learn/tensorflow/loss/TripletLoss.py
View file @
be59f56f
...
...
@@ -41,5 +41,4 @@ class TripletLoss(BaseLoss):
d_negative
=
tf
.
square
(
compute_euclidean_distance
(
anchor_feature
,
negative_feature
))
loss
=
tf
.
maximum
(
0.
,
d_positive
-
d_negative
+
self
.
margin
)
return
tf
.
reduce_mean
(
loss
),
tf
.
reduce_mean
(
d_positive
),
tf
.
reduce_mean
(
d_negative
)
#return loss, d_positive, d_negative
return
tf
.
reduce_mean
(
loss
),
tf
.
reduce_mean
(
d_negative
),
tf
.
reduce_mean
(
d_positive
)
bob/learn/tensorflow/script/train_mnist.py
View file @
be59f56f
...
...
@@ -26,6 +26,7 @@ from bob.learn.tensorflow.data import MemoryDataShuffler, TextDataShuffler
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
Dummy
,
Chopra
from
bob.learn.tensorflow.trainers
import
Trainer
from
bob.learn.tensorflow.loss
import
BaseLoss
from
..analyzers
import
ExperimentAnalizer
,
SoftmaxAnalizer
import
numpy
...
...
@@ -93,7 +94,11 @@ def main():
#architecture = Lenet(seed=SEED)
#architecture = Dummy(seed=SEED)
loss
=
BaseLoss
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
,
tf
.
reduce_mean
)
trainer
=
Trainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
prefetch
=
False
,
temp_dir
=
"./LOGS/cnn"
)
trainer
=
Trainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
analizer
=
ExperimentAnalizer
(),
prefetch
=
True
,
temp_dir
=
"./LOGS/cnn"
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
#trainer.train(train_data_shuffler)
else
:
...
...
bob/learn/tensorflow/script/train_mnist_siamese.py
View file @
be59f56f
...
...
@@ -132,7 +132,8 @@ def main():
iterations
=
ITERATIONS
,
snapshot
=
VALIDATION_TEST
,
optimizer
=
optimizer
,
temp_dir
=
"./LOGS/siamese-cnn"
)
prefetch
=
True
,
temp_dir
=
"./LOGS/siamese-cnn-prefetch"
)
#import ipdb; ipdb.set_trace();
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
...
...
bob/learn/tensorflow/script/train_mnist_triplet.py
View file @
be59f56f
...
...
@@ -39,89 +39,30 @@ def main():
perc_train
=
0.9
# Loading data
mnist
=
False
train_data
,
train_labels
,
validation_data
,
validation_labels
=
\
util
.
load_mnist
(
data_dir
=
"./src/bob.db.mnist/bob/db/mnist/"
)
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
if
mnist
:
train_data
,
train_labels
,
validation_data
,
validation_labels
=
\
util
.
load_mnist
(
data_dir
=
"./src/bob.db.mnist/bob/db/mnist/"
)
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
train_data_shuffler
=
MemoryDataShuffler
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
scale
=
True
,
batch_size
=
BATCH_SIZE
)
train_data_shuffler
=
MemoryDataShuffler
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
scale
=
True
,
batch_size
=
BATCH_SIZE
)
validation_data_shuffler
=
MemoryDataShuffler
(
validation_data
,
validation_labels
,
input_shape
=
[
28
,
28
,
1
],
scale
=
True
,
batch_size
=
VALIDATION_BATCH_SIZE
)
else
:
import
bob.db.mobio
db_mobio
=
bob
.
db
.
mobio
.
Database
()
import
bob.db.casia_webface
db_casia
=
bob
.
db
.
casia_webface
.
Database
()
# Preparing train set
train_objects
=
db_casia
.
objects
(
groups
=
"world"
)
#train_objects = db.objects(groups="world")
train_labels
=
[
int
(
o
.
client_id
)
for
o
in
train_objects
]
directory
=
"/idiap/resource/database/CASIA-WebFace/CASIA-WebFace"
train_file_names
=
[
o
.
make_path
(
directory
=
directory
,
extension
=
""
)
for
o
in
train_objects
]
#import ipdb;
#ipdb.set_trace();
#train_file_names = [o.make_path(
# directory="/idiap/group/biometric/databases/orl",
# extension=".pgm")
# for o in train_objects]
train_data_shuffler
=
TextDataShuffler
(
train_file_names
,
train_labels
,
input_shape
=
[
250
,
250
,
3
],
batch_size
=
BATCH_SIZE
)
#train_data_shuffler = TextDataShuffler(train_file_names, train_labels,
# input_shape=[56, 46, 1],
# batch_size=BATCH_SIZE)
# Preparing train set
directory
=
"/idiap/temp/tpereira/DEEP_FACE/CASIA/preprocessed"
validation_objects
=
db_mobio
.
objects
(
protocol
=
"male"
,
groups
=
"dev"
)
validation_labels
=
[
o
.
client_id
for
o
in
validation_objects
]
#validation_file_names = [o.make_path(
# directory="/idiap/group/biometric/databases/orl",
# extension=".pgm")
# for o in validation_objects]
validation_file_names
=
[
o
.
make_path
(
directory
=
directory
,
extension
=
".hdf5"
)
for
o
in
validation_objects
]
validation_data_shuffler
=
TextDataShuffler
(
validation_file_names
,
validation_labels
,
input_shape
=
[
250
,
250
,
3
],
batch_size
=
VALIDATION_BATCH_SIZE
)
#validation_data_shuffler = TextDataShuffler(validation_file_names, validation_labels,
# input_shape=[56, 46, 1],
# batch_size=VALIDATION_BATCH_SIZE)
validation_data_shuffler
=
MemoryDataShuffler
(
validation_data
,
validation_labels
,
input_shape
=
[
28
,
28
,
1
],
scale
=
True
,
batch_size
=
VALIDATION_BATCH_SIZE
)
# Preparing the architecture
n_classes
=
len
(
train_data_shuffler
.
possible_labels
)
#n_classes = 200
cnn
=
True
if
cnn
:
#architecture = Chopra(default_feature_layer="fc7")
architecture
=
Lenet
(
default_feature_layer
=
"fc2"
,
n_classes
=
n_classes
,
conv1_output
=
8
,
conv2_output
=
16
,
use_gpu
=
USE_GPU
)
architecture
=
Chopra
(
seed
=
SEED
,
fc1_output
=
n_classes
)
#architecture = Lenet(default_feature_layer="fc2", n_classes=n_classes, conv1_output=8, conv2_output=16,use_gpu=USE_GPU)
#architecture = VGG(n_classes=n_classes, use_gpu=USE_GPU)
#architecture = Dummy(seed=SEED)
#architecture = LenetDropout(default_feature_layer="fc2", n_classes=n_classes, conv1_output=4, conv2_output=8, use_gpu=USE_GPU)
loss
=
TripletLoss
()
...
...
@@ -130,6 +71,8 @@ def main():
loss
=
loss
,
iterations
=
ITERATIONS
,
snapshot
=
VALIDATION_TEST
,
temp_dir
=
"cnn-triplet"
,
prefetch
=
True
,
optimizer
=
optimizer
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
...
...
@@ -140,6 +83,7 @@ def main():
loss
=
TripletLoss
()
trainer
=
TripletTrainer
(
architecture
=
mlp
,
loss
=
loss
,
temp_dir
=
"dnn-triplet"
,
iterations
=
ITERATIONS
,
snapshot
=
VALIDATION_TEST
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
...
...
bob/learn/tensorflow/script/train_mobio.py
0 → 100644
View file @
be59f56f
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Simple script that trains CASIA WEBFACE
Usage:
train_mobio.py [--batch-size=<arg> --validation-batch-size=<arg> --iterations=<arg> --validation-interval=<arg> --use-gpu]
train_mobio.py -h | --help
Options:
-h --help Show this screen.
--batch-size=<arg> [default: 1]
--validation-batch-size=<arg> [default:128]
--iterations=<arg> [default: 30000]
--validation-interval=<arg> [default: 100]
"""
from
docopt
import
docopt
import
tensorflow
as
tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.data
import
MemoryDataShuffler
,
TextDataShuffler
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
LenetDropout
,
VGG
,
Chopra
,
Dummy
from
bob.learn.tensorflow.trainers
import
SiameseTrainer
,
Trainer
,
TripletTrainer
from
bob.learn.tensorflow.loss
import
ContrastiveLoss
,
BaseLoss
,
TripletLoss
import
numpy
def
main
():
args
=
docopt
(
__doc__
,
version
=
'Mnist training with TensorFlow'
)
BATCH_SIZE
=
int
(
args
[
'--batch-size'
])
VALIDATION_BATCH_SIZE
=
int
(
args
[
'--validation-batch-size'
])
ITERATIONS
=
int
(
args
[
'--iterations'
])
VALIDATION_TEST
=
int
(
args
[
'--validation-interval'
])
USE_GPU
=
args
[
'--use-gpu'
]
perc_train
=
0.9
import
bob.db.mobio
db_mobio
=
bob
.
db
.
mobio
.
Database
()
directory
=
"/idiap/temp/tpereira/DEEP_FACE/CASIA/preprocessed"
# Preparing train set
#train_objects = db_mobio.objects(protocol="male", groups="world")
train_objects
=
db_mobio
.
objects
(
protocol
=
"male"
,
groups
=
"dev"
)
train_labels
=
[
int
(
o
.
client_id
)
for
o
in
train_objects
]
n_classes
=
len
(
set
(
train_labels
))
train_file_names
=
[
o
.
make_path
(
directory
=
directory
,
extension
=
".hdf5"
)
for
o
in
train_objects
]
train_data_shuffler
=
TextDataShuffler
(
train_file_names
,
train_labels
,
input_shape
=
[
125
,
125
,
3
],
batch_size
=
BATCH_SIZE
)
# Preparing train set
validation_objects
=
db_mobio
.
objects
(
protocol
=
"male"
,
groups
=
"dev"
)
#validation_objects = db_mobio.objects(protocol="male", groups="world")
validation_labels
=
[
o
.
client_id
for
o
in
validation_objects
]
validation_file_names
=
[
o
.
make_path
(
directory
=
directory
,
extension
=
".hdf5"
)
for
o
in
validation_objects
]
validation_data_shuffler
=
TextDataShuffler
(
validation_file_names
,
validation_labels
,
input_shape
=
[
125
,
125
,
3
],
batch_size
=
VALIDATION_BATCH_SIZE
)
# Preparing the architecture
#architecture = Chopra(seed=SEED, fc1_output=n_classes)
architecture
=
Chopra
(
seed
=
SEED
)
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.00000001
)
#loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
#trainer = Trainer(architecture=architecture, loss=loss,
# iterations=ITERATIONS,
# prefetch=False,
# optimizer=optimizer,
# temp_dir="./LOGS/cnn")
#loss = ContrastiveLoss(contrastive_margin=4.)
#trainer = SiameseTrainer(architecture=architecture, loss=loss,
# iterations=ITERATIONS,
# prefetch=True,
# optimizer=optimizer,
# temp_dir="./LOGS_MOBIO/siamese-cnn-prefetch")
loss
=
TripletLoss
(
margin
=
4.
)
trainer
=
TripletTrainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
prefetch
=
True
,
optimizer
=
optimizer
,
temp_dir
=
"./LOGS_MOBIO/triplet-cnn-prefetch"
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
bob/learn/tensorflow/trainers/SiameseTrainer.py
View file @
be59f56f
...
...
@@ -58,7 +58,7 @@ class SiameseTrainer(Trainer):
prefetch
=
False
,
## Analizer
analizer
=
Softmax
Analizer
(),
analizer
=
Experiment
Analizer
(),
verbosity_level
=
2
):
...
...
@@ -85,10 +85,13 @@ class SiameseTrainer(Trainer):
verbosity_level
=
verbosity_level
)
self
.
between_class_graph
=
None
self
.
within_class_graph
=
None
self
.
between_class_graph
_train
=
None
self
.
within_class_graph
_train
=
None
def
compute_graph
(
self
,
data_shuffler
,
prefetch
=
False
,
name
=
""
):
self
.
between_class_graph_validation
=
None
self
.
within_class_graph_validation
=
None
def
compute_graph
(
self
,
data_shuffler
,
prefetch
=
False
,
name
=
""
,
train
=
True
):
"""
Computes the graph for the trainer.
...
...
@@ -102,13 +105,7 @@ class SiameseTrainer(Trainer):
# Defining place holders
if
prefetch
:
placeholder_left_data
,
placeholder_right_data
,
placeholder_labels
=
data_shuffler
.
get_placeholders_pair_forprefetch
(
name
=
"train"
)
# Creating two graphs
#placeholder_left_data, placeholder_labels = data_shuffler. \
# get_placeholders_forprefetch(name="train_left")
#placeholder_right_data, _ = data_shuffler.get_placeholders(name="train_right")
feature_left_batch
,
feature_right_batch
,
label_batch
=
data_shuffler
.
get_placeholders_pair
(
name
=
"train_"
)
placeholder_left_data
,
placeholder_right_data
,
placeholder_labels
=
data_shuffler
.
get_placeholders_pair_forprefetch
(
name
=
name
)
# Defining a placeholder queue for prefetching
queue
=
tf
.
FIFOQueue
(
capacity
=
100
,
...
...
@@ -126,9 +123,7 @@ class SiameseTrainer(Trainer):
raise
ValueError
(
"The variable `architecture` must be an instance of "
"`bob.learn.tensorflow.network.SequenceNetwork`"
)
else
:
feature_left_batch
,
feature_right_batch
,
label_batch
=
data_shuffler
.
get_placeholders_pair
(
name
=
"train_"
)
#feature_left_batch, label_batch = data_shuffler.get_placeholders(name="train_left")
#feature_right_batch, _ = data_shuffler.get_placeholders(name="train_right")
feature_left_batch
,
feature_right_batch
,
label_batch
=
data_shuffler
.
get_placeholders_pair
(
name
=
name
)
# Creating the siamese graph
train_left_graph
=
self
.
architecture
.
compute_graph
(
feature_left_batch
)
...
...
@@ -138,8 +133,12 @@ class SiameseTrainer(Trainer):
train_left_graph
,
train_right_graph
)
self
.
between_class_graph
=
between_class_graph
self
.
within_class_graph
=
within_class_graph
if
train
:
self
.
between_class_graph_train
=
between_class_graph
self
.
within_class_graph_train
=
within_class_graph
else
:
self
.
between_class_graph_validation
=
between_class_graph
self
.
within_class_graph_validation
=
within_class_graph
return
graph
...
...
@@ -153,7 +152,7 @@ class SiameseTrainer(Trainer):
"""
batch_left
,
batch_right
,
labels
=
data_shuffler
.
get_pair
()
placeholder_left_data
,
placeholder_right_data
,
placeholder_label
=
data_shuffler
.
get_placeholders_pair
(
name
=
"train"
)
placeholder_left_data
,
placeholder_right_data
,
placeholder_label
=
data_shuffler
.
get_placeholders_pair
()
feed_dict
=
{
placeholder_left_data
:
batch_left
,
placeholder_right_data
:
batch_right
,
...
...
@@ -172,12 +171,12 @@ class SiameseTrainer(Trainer):
"""
if
self
.
prefetch
:
_
,
l
,
bt_class
,
wt_class
,
lr
,
summary
=
session
.
run
([
self
.
optimizer
,
self
.
training_graph
,
self
.
between_class_graph
,
self
.
within_class_graph
,
self
.
training_graph
,
self
.
between_class_graph
_train
,
self
.
within_class_graph_train
,
self
.
learning_rate
,
self
.
summaries_train
])
else
:
feed_dict
=
self
.
get_feed_dict
(
self
.
train_data_shuffler
)
_
,
l
,
bt_class
,
wt_class
,
lr
,
summary
=
session
.
run
([
self
.
optimizer
,
self
.
training_graph
,
self
.
between_class_graph
,
self
.
within_class_graph
,
self
.
training_graph
,
self
.
between_class_graph
_train
,
self
.
within_class_graph_train
,
self
.
learning_rate
,
self
.
summaries_train
],
feed_dict
=
feed_dict
)
logger
.
info
(
"Loss training set step={0} = {1}"
.
format
(
step
,
l
))
...
...
@@ -197,12 +196,16 @@ class SiameseTrainer(Trainer):
if
self
.
validation_summary_writter
is
None
:
self
.
validation_summary_writter
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
session
.
graph
)
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
)
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
,
train
=
False
)
feed_dict
=
self
.
get_feed_dict
(
data_shuffler
)
l
=
session
.
run
(
self
.
validation_graph
,
feed_dict
=
feed_dict
)
l
,
bt_class
,
wt_class
=
session
.
run
([
self
.
validation_graph
,
self
.
between_class_graph_validation
,
self
.
within_class_graph_validation
],
feed_dict
=
feed_dict
)
summaries
=
[]
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"loss"
,
simple_value
=
float
(
l
)))
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"between_class_loss"
,
simple_value
=
float
(
bt_class
)))
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"within_class_loss"
,
simple_value
=
float
(
wt_class
)))
self
.
validation_summary_writter
.
add_summary
(
summary_pb2
.
Summary
(
value
=
summaries
),
step
)
logger
.
info
(
"Loss VALIDATION set step={0} = {1}"
.
format
(
step
,
l
))
...
...
@@ -213,8 +216,8 @@ class SiameseTrainer(Trainer):
# Train summary
tf
.
scalar_summary
(
'loss'
,
self
.
training_graph
,
name
=
"train"
)
tf
.
scalar_summary
(
'between_class_loss'
,
self
.
between_class_graph
,
name
=
"train"
)
tf
.
scalar_summary
(
'within_class_loss'
,
self
.
within_class_graph
,
name
=
"train"
)
tf
.
scalar_summary
(
'between_class_loss'
,
self
.
between_class_graph
_train
,
name
=
"train"
)
tf
.
scalar_summary
(
'within_class_loss'
,
self
.
within_class_graph
_train
,
name
=
"train"
)
tf
.
scalar_summary
(
'lr'
,
self
.
learning_rate
,
name
=
"train"
)
return
tf
.
merge_all_summaries
()
...
...
bob/learn/tensorflow/trainers/Trainer.py
View file @
be59f56f
...
...
@@ -175,7 +175,7 @@ class Trainer(object):
logger
.
info
(
"Loss training set step={0} = {1}"
.
format
(
step
,
l
))
self
.
train_summary_writter
.
add_summary
(
summary
,
step
)
def
compute_validation
(
self
,
session
,
data_shuffler
,
step
):
def
compute_validation
(
self
,
session
,
data_shuffler
,
step
):
"""
Computes the loss in the validation set
...
...
@@ -185,14 +185,14 @@ class Trainer(object):
step: Iteration number
"""
if
self
.
validation_summary_writter
is
None
:
self
.
validation_summary_writter
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
session
.
graph
)
# Opening a new session for validation
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
)
feed_dict
=
self
.
get_feed_dict
(
data_shuffler
)
l
=
session
.
run
(
self
.
validation_graph
,
feed_dict
=
feed_dict
)
if
self
.
validation_summary_writter
is
None
:
self
.
validation_summary_writter
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
session
.
graph
)
summaries
=
[]
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"loss"
,
simple_value
=
float
(
l
)))
self
.
validation_summary_writter
.
add_summary
(
summary_pb2
.
Summary
(
value
=
summaries
),
step
)
...
...
@@ -283,7 +283,6 @@ class Trainer(object):
# TENSOR BOARD SUMMARY
self
.
train_summary_writter
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'train'
),
session
.
graph
)
for
step
in
range
(
self
.
iterations
):
start
=
time
.
time
()
...
...
@@ -297,7 +296,7 @@ class Trainer(object):
if
self
.
analizer
is
not
None
:
self
.
validation_summary_writter
.
add_summary
(
self
.
analizer
(
validation_data_shuffler
,
self
,
session
),
step
)
validation_data_shuffler
,
self
.
architecture
,
session
),
step
)
logger
.
info
(
"Training finally finished"
)
...
...
bob/learn/tensorflow/trainers/TripletTrainer.py
View file @
be59f56f
This diff is collapsed.
Click to expand it.
doc/extra-intersphinx.txt
0 → 100644
View file @
be59f56f
tensorflow
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment