Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.learn.tensorflow
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
11
Issues
11
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.learn.tensorflow
Commits
004fe191
Commit
004fe191
authored
Oct 09, 2016
by
Tiago de Freitas Pereira
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Still strugling with a general training
parent
14147b9a
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
106 additions
and
18 deletions
+106
-18
bob/learn/tensorflow/analyzers/RegularAnalizer.py
bob/learn/tensorflow/analyzers/RegularAnalizer.py
+63
-0
bob/learn/tensorflow/data/MemoryDataShuffler.py
bob/learn/tensorflow/data/MemoryDataShuffler.py
+1
-1
bob/learn/tensorflow/trainers/Trainer.py
bob/learn/tensorflow/trainers/Trainer.py
+42
-17
No files found.
bob/learn/tensorflow/analyzers/RegularAnalizer.py
0 → 100644
View file @
004fe191
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Tue 09 Aug 2016 15:33 CEST
"""
Neural net work error rates analizer
"""
import
numpy
import
bob.measure
from
tensorflow.core.framework
import
summary_pb2
from
scipy.spatial.distance
import
cosine
class
ExperimentAnalizer
:
"""
Analizer.
"""
def
__init__
(
self
,
data_shuffler
,
machine
,
session
):
"""
Use the CNN as feature extractor for a n-class classification
** Parameters **
data_shuffler:
graph:
session:
convergence_threshold:
convergence_reference: References to analize the convergence. Possible values are `eer`, `far10`, `far10`
"""
self
.
data_shuffler
=
data_shuffler
self
.
machine
=
machine
self
.
session
=
session
placeholder_data
,
placeholder_labels
=
data_shuffler
.
get_placeholders
(
name
=
"validation"
)
graph
=
machine
.
compute_graph
(
placeholder_data
)
loss_validation
=
self
.
loss
(
validation_graph
,
validation_placeholder_labels
)
tf
.
scalar_summary
(
'loss'
,
loss_validation
,
name
=
"validation"
)
merged_validation
=
tf
.
merge_all_summaries
()
def
__call__
(
self
):
data
,
labels
=
self
.
data_shuffler
.
get_batch
()
feed_dict
=
{
validation_placeholder_data
:
validation_data
,
validation_placeholder_labels
:
validation_labels
}
# l, predictions = session.run([loss_validation, validation_prediction, ], feed_dict=feed_dict)
# l, summary = session.run([loss_validation, merged_validation], feed_dict=feed_dict)
# import ipdb; ipdb.set_trace();
l
=
session
.
run
(
loss_validation
,
feed_dict
=
feed_dict
)
summaries
=
[]
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"loss"
,
simple_value
=
float
(
l
)))
validation_writer
.
add_summary
(
summary_pb2
.
Summary
(
value
=
summaries
),
step
)
bob/learn/tensorflow/data/MemoryDataShuffler.py
View file @
004fe191
...
...
@@ -56,7 +56,7 @@ class MemoryDataShuffler(BaseDataShuffler):
selected_data
=
self
.
data
[
indexes
[
0
:
self
.
batch_size
],
:,
:,
:]
selected_labels
=
self
.
labels
[
indexes
[
0
:
self
.
batch_size
]]
return
selected_data
.
astype
(
"float32"
),
selected_labels
return
selected_data
,
selected_labels
.
astype
(
"int64"
)
def
get_pair
(
self
,
zero_one_labels
=
True
):
"""
...
...
bob/learn/tensorflow/trainers/Trainer.py
View file @
004fe191
...
...
@@ -11,6 +11,7 @@ import threading
import
numpy
import
os
import
bob.io.base
from
tensorflow.core.framework
import
summary_pb2
class
Trainer
(
object
):
...
...
@@ -20,7 +21,7 @@ class Trainer(object):
optimizer
=
tf
.
train
.
AdamOptimizer
(),
use_gpu
=
False
,
loss
=
None
,
temp_dir
=
""
,
temp_dir
=
"
cnn
"
,
# Learning rate
base_learning_rate
=
0.001
,
...
...
@@ -96,11 +97,14 @@ class Trainer(object):
self
.
weight_decay
# Decay step
)
# Creating directory
bob
.
io
.
base
.
create_directories_safe
(
self
.
temp_dir
)
# Defining place holders
train_placeholder_data
,
train_placeholder_labels
=
train_data_shuffler
.
get_placeholders_forprefetch
(
name
=
"train"
)
if
validation_data_shuffler
is
not
None
:
validation_placeholder_data
,
validation_placeholder_labels
=
\
validation_data_shuffler
.
get_placeholders
(
name
=
"validation"
)
#
if validation_data_shuffler is not None:
#
validation_placeholder_data, validation_placeholder_labels = \
#
validation_data_shuffler.get_placeholders(name="validation")
# Defining a placeholder queue for prefetching
queue
=
tf
.
FIFOQueue
(
capacity
=
10
,
dtypes
=
[
tf
.
float32
,
tf
.
int64
],
...
...
@@ -118,16 +122,23 @@ class Trainer(object):
# Creating graphs and defining the loss
train_graph
=
self
.
architecture
.
compute_graph
(
train_feature_batch
)
loss_train
=
self
.
loss
(
train_graph
,
train_label_batch
)
train_prediction
=
tf
.
nn
.
softmax
(
train_graph
)
if
validation_data_shuffler
is
not
None
:
validation_graph
=
self
.
architecture
.
compute_graph
(
validation_placeholder_data
)
loss_validation
=
self
.
loss
(
validation_graph
,
validation_placeholder_labels
)
validation_prediction
=
tf
.
nn
.
softmax
(
validation_graph
)
# Preparing the optimizer
self
.
optimizer
.
_learning_rate
=
learning_rate
optimizer
=
self
.
optimizer
.
minimize
(
loss_train
,
global_step
=
tf
.
Variable
(
0
))
# Train summary
tf
.
scalar_summary
(
'loss'
,
loss_train
,
name
=
"train"
)
tf
.
scalar_summary
(
'lr'
,
learning_rate
,
name
=
"train"
)
merged_train
=
tf
.
merge_all_summaries
()
# Validation
#if validation_data_shuffler is not None:
# validation_graph = self.architecture.compute_graph(validation_placeholder_data)
# loss_validation = self.loss(validation_graph, validation_placeholder_labels)
# tf.scalar_summary('loss', loss_validation, name="validation")
# merged_validation = tf.merge_all_summaries()
print
(
"Initializing !!"
)
# Training
hdf5
=
bob
.
io
.
base
.
HDF5File
(
os
.
path
.
join
(
self
.
temp_dir
,
'model.hdf5'
),
'w'
)
...
...
@@ -142,12 +153,15 @@ class Trainer(object):
threads
=
start_thread
()
train_writer
=
tf
.
train
.
SummaryWriter
(
'./LOGS/train'
,
session
.
graph
)
# TENSOR BOARD SUMMARY
train_writer
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'train'
),
session
.
graph
)
validation_writer
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
session
.
graph
)
for
step
in
range
(
self
.
iterations
):
_
,
l
,
lr
,
_
=
session
.
run
([
optimizer
,
loss_train
,
learning_rate
,
train_prediction
])
_
,
l
,
lr
,
summary
=
session
.
run
([
optimizer
,
loss_train
,
learning_rate
,
merged_train
])
train_writer
.
add_summary
(
summary
,
step
)
if
validation_data_shuffler
is
not
None
and
step
%
self
.
snapshot
==
0
:
validation_data
,
validation_labels
=
validation_data_shuffler
.
get_batch
()
...
...
@@ -155,16 +169,27 @@ class Trainer(object):
feed_dict
=
{
validation_placeholder_data
:
validation_data
,
validation_placeholder_labels
:
validation_labels
}
l
,
predictions
=
session
.
run
([
loss_validation
,
validation_prediction
],
feed_dict
=
feed_dict
)
accuracy
=
100.
*
numpy
.
sum
(
numpy
.
argmax
(
predictions
,
1
)
==
validation_labels
)
/
predictions
.
shape
[
0
]
#l, predictions = session.run([loss_validation, validation_prediction, ], feed_dict=feed_dict)
#l, summary = session.run([loss_validation, merged_validation], feed_dict=feed_dict)
#import ipdb; ipdb.set_trace();
l
=
session
.
run
(
loss_validation
,
feed_dict
=
feed_dict
)
summaries
=
[]
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"loss"
,
simple_value
=
float
(
l
)))
validation_writer
.
add_summary
(
summary_pb2
.
Summary
(
value
=
summaries
),
step
)
print
"Step {0}. Loss = {1}, Acc Validation={2}"
.
format
(
step
,
l
,
accuracy
)
#l = session.run([loss_validation], feed_dict=feed_dict)
#accuracy = 100. * numpy.sum(numpy.argmax(predictions, 1) == validation_labels) / predictions.shape[0]
#validation_writer.add_summary(summary, step)
#print "Step {0}. Loss = {1}, Acc Validation={2}".format(step, l, accuracy)
print
"Step {0}. Loss = {1}"
.
format
(
step
,
l
)
train_writer
.
close
()
self
.
architecture
.
save
(
hdf5
)
del
hdf5
# now they should definetely stop
thread_pool
.
request_stop
()
thread_pool
.
join
(
threads
)
self
.
architecture
.
save
(
hdf5
)
del
hdf5
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment