Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
3070af95
Commit
3070af95
authored
Feb 22, 2017
by
Tiago de Freitas Pereira
Browse files
Porting to tensorflow 1.0.0
parent
9f46e635
Pipeline
#7461
failed with stages
in 4 minutes and 17 seconds
Changes
12
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/initialization/Initialization.py
View file @
3070af95
...
...
@@ -29,7 +29,7 @@ class Initialization(object):
tf
.
set_random_seed
(
seed
)
def
variable_exist
(
self
,
var
):
return
var
in
[
v
.
name
.
split
(
"/"
)[
0
]
for
v
in
tf
.
a
l
l_variables
()]
return
var
in
[
v
.
name
.
split
(
"/"
)[
0
]
for
v
in
tf
.
glob
al_variables
()]
def
__call__
(
self
,
shape
,
name
,
scope
,
init_value
=
None
):
NotImplementedError
(
"Please implement this function in derived classes"
)
bob/learn/tensorflow/layers/Layer.py
View file @
3070af95
...
...
@@ -64,7 +64,7 @@ class Layer(object):
NotImplementedError
(
"Please implement this function in derived classes"
)
def
variable_exist
(
self
,
var
):
return
var
in
[
v
.
name
.
split
(
"/"
)[
0
]
for
v
in
tf
.
a
l
l_variables
()]
return
var
in
[
v
.
name
.
split
(
"/"
)[
0
]
for
v
in
tf
.
glob
al_variables
()]
def
batch_normalize
(
self
,
x
,
phase_train
):
"""
...
...
@@ -124,7 +124,7 @@ class Layer(object):
Doing this because of that https://github.com/tensorflow/tensorflow/issues/1325
"""
for
v
in
tf
.
a
l
l_variables
():
for
v
in
tf
.
glob
al_variables
():
if
(
len
(
v
.
name
.
split
(
"/"
))
>
1
)
and
(
var
in
v
.
name
.
split
(
"/"
)[
1
]):
return
v
...
...
bob/learn/tensorflow/loss/BaseLoss.py
View file @
3070af95
...
...
@@ -20,4 +20,4 @@ class BaseLoss(object):
self
.
name
=
name
def
__call__
(
self
,
graph
,
label
):
return
self
.
operation
(
self
.
loss
(
graph
,
label
),
name
=
self
.
name
)
return
self
.
operation
(
self
.
loss
(
logits
=
graph
,
labels
=
label
),
name
=
self
.
name
)
bob/learn/tensorflow/loss/ContrastiveLoss.py
View file @
3070af95
...
...
@@ -44,10 +44,10 @@ class ContrastiveLoss(BaseLoss):
one
=
tf
.
constant
(
1.0
)
d
=
compute_euclidean_distance
(
left_feature
,
right_feature
)
between_class
=
tf
.
mul
(
one
-
label
,
tf
.
square
(
d
))
# (1-Y)*(d^2)
between_class
=
tf
.
mul
tiply
(
one
-
label
,
tf
.
square
(
d
))
# (1-Y)*(d^2)
max_part
=
tf
.
square
(
tf
.
maximum
(
self
.
contrastive_margin
-
d
,
0
))
within_class
=
tf
.
mul
(
label
,
max_part
)
# (Y) * max((margin - d)^2, 0)
within_class
=
tf
.
mul
tiply
(
label
,
max_part
)
# (Y) * max((margin - d)^2, 0)
loss
=
0.5
*
(
within_class
+
between_class
)
...
...
bob/learn/tensorflow/loss/NegLogLoss.py
View file @
3070af95
...
...
@@ -25,9 +25,9 @@ class NegLogLoss(BaseLoss):
rank
=
len
(
shape
)
flat_params
=
tf
.
reshape
(
params
,
[
-
1
])
if
rank
>
2
:
indices_unpacked
=
tf
.
un
p
ack
(
tf
.
transpose
(
indices
,
[
rank
-
1
]
+
range
(
0
,
rank
-
1
),
name
))
indices_unpacked
=
tf
.
un
st
ack
(
tf
.
transpose
(
indices
,
[
rank
-
1
]
+
range
(
0
,
rank
-
1
),
name
))
elif
rank
==
2
:
indices_unpacked
=
tf
.
un
p
ack
(
indices
)
indices_unpacked
=
tf
.
un
st
ack
(
indices
)
else
:
indices_unpacked
=
indices
flat_indices
=
[
i
*
rank
+
indices_unpacked
[
i
]
for
i
in
range
(
0
,
len
(
indices_unpacked
))]
...
...
@@ -38,6 +38,6 @@ class NegLogLoss(BaseLoss):
log_probabilities
=
tf
.
nn
.
log_softmax
(
graph
)
# negative of the log-probability that correspond to the correct label
correct_probabilities
=
self
.
gather_nd
(
log_probabilities
,
label
)
neg_log_prob
=
tf
.
neg
(
correct_probabilities
)
neg_log_prob
=
tf
.
neg
ative
(
correct_probabilities
)
# use negative log likelihood as the loss
return
self
.
operation
(
neg_log_prob
)
bob/learn/tensorflow/loss/TripletLoss.py
View file @
3070af95
...
...
@@ -48,10 +48,10 @@ class TripletLoss(BaseLoss):
positive_embedding
=
tf
.
nn
.
l2_normalize
(
positive_embedding
,
1
,
1e-10
)
negative_embedding
=
tf
.
nn
.
l2_normalize
(
negative_embedding
,
1
,
1e-10
)
d_positive
=
tf
.
reduce_sum
(
tf
.
square
(
tf
.
sub
(
anchor_embedding
,
positive_embedding
)),
1
)
d_negative
=
tf
.
reduce_sum
(
tf
.
square
(
tf
.
sub
(
anchor_embedding
,
negative_embedding
)),
1
)
d_positive
=
tf
.
reduce_sum
(
tf
.
square
(
tf
.
sub
tract
(
anchor_embedding
,
positive_embedding
)),
1
)
d_negative
=
tf
.
reduce_sum
(
tf
.
square
(
tf
.
sub
tract
(
anchor_embedding
,
negative_embedding
)),
1
)
basic_loss
=
tf
.
add
(
tf
.
sub
(
d_positive
,
d_negative
),
self
.
margin
)
basic_loss
=
tf
.
add
(
tf
.
sub
tract
(
d_positive
,
d_negative
),
self
.
margin
)
loss
=
tf
.
reduce_mean
(
tf
.
maximum
(
basic_loss
,
0.0
),
0
)
return
loss
,
tf
.
reduce_mean
(
d_negative
),
tf
.
reduce_mean
(
d_positive
)
bob/learn/tensorflow/network/SequenceNetwork.py
View file @
3070af95
...
...
@@ -160,13 +160,13 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
"""Attach a lot of summaries to a Tensor."""
with
tf
.
name_scope
(
'summaries'
):
mean
=
tf
.
reduce_mean
(
var
)
tf
.
s
calar_summary
(
'mean/'
+
name
,
mean
)
tf
.
s
ummary
.
scalar
(
'mean/'
+
name
,
mean
)
with
tf
.
name_scope
(
'stddev'
):
stddev
=
tf
.
sqrt
(
tf
.
reduce_sum
(
tf
.
square
(
var
-
mean
)))
tf
.
s
calar_summary
(
'sttdev/'
+
name
,
stddev
)
tf
.
s
calar_summary
(
'max/'
+
name
,
tf
.
reduce_max
(
var
))
tf
.
s
calar_summary
(
'min/'
+
name
,
tf
.
reduce_min
(
var
))
tf
.
histogram
_summary
(
name
,
var
)
tf
.
s
ummary
.
scalar
(
'sttdev/'
+
name
,
stddev
)
tf
.
s
ummary
.
scalar
(
'max/'
+
name
,
tf
.
reduce_max
(
var
))
tf
.
s
ummary
.
scalar
(
'min/'
+
name
,
tf
.
reduce_min
(
var
))
tf
.
summary
.
histogram
(
name
,
var
)
def
generate_summaries
(
self
):
for
k
in
self
.
sequence_net
.
keys
():
...
...
@@ -310,7 +310,7 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
# Loading variables
place_holder
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
shape
,
name
=
"load"
)
self
.
compute_graph
(
place_holder
)
tf
.
initialize_
al
l
_variables
().
run
(
session
=
session
)
tf
.
glob
al_variables
_initializer
().
run
(
session
=
session
)
self
.
load_variables_only
(
hdf5
,
session
)
def
save
(
self
,
saver
,
path
):
...
...
bob/learn/tensorflow/test/test_cnn.py
View file @
3070af95
...
...
@@ -75,6 +75,7 @@ def dummy_experiment(data_s, architecture):
def
test_cnn_trainer
():
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
...
...
bob/learn/tensorflow/trainers/SiameseTrainer.py
View file @
3070af95
...
...
@@ -294,7 +294,7 @@ class SiameseTrainer(Trainer):
"""
if
self
.
validation_summary_writter
is
None
:
self
.
validation_summary_writter
=
tf
.
train
.
S
ummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
self
.
session
.
graph
)
self
.
validation_summary_writter
=
tf
.
s
ummary
.
File
Writer
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
self
.
session
.
graph
)
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
,
training
=
False
)
feed_dict
=
self
.
get_feed_dict
(
data_shuffler
)
...
...
@@ -315,11 +315,11 @@ class SiameseTrainer(Trainer):
"""
# Train summary
tf
.
s
calar_summary
(
'loss'
,
self
.
training_graph
,
name
=
"train"
)
tf
.
s
calar_summary
(
'between_class_loss'
,
self
.
between_class_graph_train
,
name
=
"train"
)
tf
.
s
calar_summary
(
'within_class_loss'
,
self
.
within_class_graph_train
,
name
=
"train"
)
tf
.
s
calar_summary
(
'lr'
,
self
.
learning_rate
,
name
=
"train"
)
return
tf
.
merge_all
_summaries
()
tf
.
s
ummary
.
scalar
(
'loss'
,
self
.
training_graph
)
tf
.
s
ummary
.
scalar
(
'between_class_loss'
,
self
.
between_class_graph_train
)
tf
.
s
ummary
.
scalar
(
'within_class_loss'
,
self
.
within_class_graph_train
)
tf
.
s
ummary
.
scalar
(
'lr'
,
self
.
learning_rate
)
return
tf
.
summary
.
merge_all
()
def
load_and_enqueue
(
self
):
"""
...
...
bob/learn/tensorflow/trainers/Trainer.py
View file @
3070af95
...
...
@@ -229,7 +229,7 @@ class Trainer(object):
l
=
self
.
session
.
run
(
self
.
validation_graph
,
feed_dict
=
feed_dict
)
if
self
.
validation_summary_writter
is
None
:
self
.
validation_summary_writter
=
tf
.
train
.
S
ummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
self
.
session
.
graph
)
self
.
validation_summary_writter
=
tf
.
s
ummary
.
File
Writer
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
self
.
session
.
graph
)
summaries
=
[
summary_pb2
.
Summary
.
Value
(
tag
=
"loss"
,
simple_value
=
float
(
l
))]
self
.
validation_summary_writter
.
add_summary
(
summary_pb2
.
Summary
(
value
=
summaries
),
step
)
...
...
@@ -240,9 +240,9 @@ class Trainer(object):
Creates a simple tensorboard summary with the value of the loss and learning rate
"""
# Train summary
tf
.
s
calar_summary
(
'loss'
,
self
.
training_graph
,
name
=
"train"
)
tf
.
s
calar_summary
(
'lr'
,
self
.
learning_rate
,
name
=
"train"
)
return
tf
.
merge_all
_summaries
()
tf
.
s
ummary
.
scalar
(
'loss'
,
self
.
training_graph
)
tf
.
s
ummary
.
scalar
(
'lr'
,
self
.
learning_rate
)
return
tf
.
summary
.
merge_all
()
def
start_thread
(
self
):
"""
...
...
@@ -281,7 +281,6 @@ class Trainer(object):
"""
Create all the necessary graphs for training, validation and inference graphs
"""
# Creating train graph
self
.
training_graph
=
self
.
compute_graph
(
train_data_shuffler
,
prefetch
=
self
.
prefetch
,
name
=
"train"
)
tf
.
add_to_collection
(
"training_graph"
,
self
.
training_graph
)
...
...
@@ -420,10 +419,10 @@ class Trainer(object):
tf
.
add_to_collection
(
"summaries_train"
,
self
.
summaries_train
)
tf
.
initialize_
al
l
_variables
().
run
(
session
=
self
.
session
)
tf
.
glob
al_variables
_initializer
().
run
(
session
=
self
.
session
)
# Original tensorflow saver object
saver
=
tf
.
train
.
Saver
(
var_list
=
tf
.
a
l
l_variables
())
saver
=
tf
.
train
.
Saver
(
var_list
=
tf
.
glob
al_variables
())
if
isinstance
(
train_data_shuffler
,
OnlineSampling
):
train_data_shuffler
.
set_feature_extractor
(
self
.
architecture
,
session
=
self
.
session
)
...
...
@@ -435,7 +434,7 @@ class Trainer(object):
threads
=
self
.
start_thread
()
# TENSOR BOARD SUMMARY
self
.
train_summary_writter
=
tf
.
train
.
S
ummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'train'
),
self
.
session
.
graph
)
self
.
train_summary_writter
=
tf
.
s
ummary
.
File
Writer
(
os
.
path
.
join
(
self
.
temp_dir
,
'train'
),
self
.
session
.
graph
)
for
step
in
range
(
start_step
,
self
.
iterations
):
start
=
time
.
time
()
...
...
bob/learn/tensorflow/trainers/TripletTrainer.py
View file @
3070af95
...
...
@@ -304,7 +304,7 @@ class TripletTrainer(Trainer):
"""
if
self
.
validation_summary_writter
is
None
:
self
.
validation_summary_writter
=
tf
.
train
.
S
ummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
self
.
validation_summary_writter
=
tf
.
s
ummary
.
File
Writer
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
self
.
session
.
graph
)
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
,
training
=
False
)
...
...
@@ -326,11 +326,11 @@ class TripletTrainer(Trainer):
"""
# Train summary
tf
.
s
calar_summary
(
'loss'
,
self
.
training_graph
,
name
=
"train"
)
tf
.
s
calar_summary
(
'between_class_loss'
,
self
.
between_class_graph_train
,
name
=
"train"
)
tf
.
s
calar_summary
(
'within_class_loss'
,
self
.
within_class_graph_train
,
name
=
"train"
)
tf
.
s
calar_summary
(
'lr'
,
self
.
learning_rate
,
name
=
"train"
)
return
tf
.
merge_all
_summaries
()
tf
.
s
ummary
.
scalar
(
'loss'
,
self
.
training_graph
)
tf
.
s
ummary
.
scalar
(
'between_class_loss'
,
self
.
between_class_graph_train
)
tf
.
s
ummary
.
scalar
(
'within_class_loss'
,
self
.
within_class_graph_train
)
tf
.
s
ummary
.
scalar
(
'lr'
,
self
.
learning_rate
)
return
tf
.
summary
.
merge_all
()
def
load_and_enqueue
(
self
):
"""
...
...
bob/learn/tensorflow/utils/util.py
View file @
3070af95
...
...
@@ -14,7 +14,7 @@ def compute_euclidean_distance(x, y):
"""
with
tf
.
name_scope
(
'euclidean_distance'
)
as
scope
:
d
=
tf
.
sqrt
(
tf
.
reduce_sum
(
tf
.
square
(
tf
.
sub
(
x
,
y
)),
1
))
d
=
tf
.
sqrt
(
tf
.
reduce_sum
(
tf
.
square
(
tf
.
sub
tract
(
x
,
y
)),
1
))
return
d
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment