Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
ba9a7fe1
Commit
ba9a7fe1
authored
Aug 16, 2016
by
Tiago de Freitas Pereira
Browse files
Debuging
parent
7da034f3
Changes
6
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/DataShuffler.py
View file @
ba9a7fe1
...
...
@@ -14,7 +14,7 @@ def scale_mean_norm(data, scale=0.00390625):
class
DataShuffler
(
object
):
def
__init__
(
self
,
data
,
labels
,
perc_train
=
0.9
,
scale
=
True
,
train_batch_size
=
1
,
validation_batch_size
=
1
00
):
def
__init__
(
self
,
data
,
labels
,
perc_train
=
0.9
,
scale
=
True
,
train_batch_size
=
1
,
validation_batch_size
=
3
00
):
"""
Some base functions for neural networks
...
...
bob/learn/tensorflow/layers/Conv2D.py
View file @
ba9a7fe1
...
...
@@ -63,11 +63,10 @@ class Conv2D(Layer):
with
tf
.
name_scope
(
str
(
self
.
name
)):
conv2d
=
tf
.
nn
.
conv2d
(
self
.
input_layer
,
self
.
W
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
if
self
.
activation
is
not
None
:
with
tf
.
name_scope
(
str
(
self
.
name
)
+
'activation'
):
if
self
.
activation
is
not
None
:
non_linear_conv2d
=
tf
.
nn
.
tanh
(
tf
.
nn
.
bias_add
(
conv2d
,
self
.
b
))
output
=
non_linear_conv2d
else
:
output
=
conv2d
else
:
output
=
conv2d
return
output
return
output
bob/learn/tensorflow/layers/FullyConnected.py
View file @
ba9a7fe1
...
...
@@ -44,21 +44,23 @@ class FullyConnected(Layer):
self
.
W
=
create_weight_variables
([
input_dim
,
self
.
output_dim
],
seed
=
self
.
seed
,
name
=
str
(
self
.
name
),
use_gpu
=
self
.
use_gpu
)
if
self
.
activation
is
not
None
:
self
.
b
=
create_bias_variables
([
self
.
output_dim
],
name
=
str
(
self
.
name
)
+
"_bias"
,
use_gpu
=
self
.
use_gpu
)
#
if self.activation is not None:
self
.
b
=
create_bias_variables
([
self
.
output_dim
],
name
=
str
(
self
.
name
)
+
"_bias"
,
use_gpu
=
self
.
use_gpu
)
def
get_graph
(
self
):
with
tf
.
name_scope
(
'fc'
):
with
tf
.
name_scope
(
str
(
self
.
name
)):
if
len
(
self
.
input_layer
.
get_shape
())
==
4
:
shape
=
self
.
input_layer
.
get_shape
().
as_list
()
fc
=
tf
.
reshape
(
self
.
input_layer
,
[
shape
[
0
],
shape
[
1
]
*
shape
[
2
]
*
shape
[
3
]])
else
:
fc
=
self
.
input_layer
if
self
.
activation
is
not
None
:
with
tf
.
name_scope
(
'activation'
):
if
self
.
activation
is
not
None
:
non_linear_fc
=
tf
.
nn
.
tanh
(
tf
.
matmul
(
fc
,
self
.
W
)
+
self
.
b
)
output
=
non_linear_fc
else
:
output
=
fc
else
:
output
=
tf
.
matmul
(
fc
,
self
.
W
)
+
self
.
b
return
output
return
output
bob/learn/tensorflow/layers/MaxPooling.py
View file @
ba9a7fe1
...
...
@@ -24,4 +24,4 @@ class MaxPooling(Layer):
with
tf
.
name_scope
(
str
(
self
.
name
)):
output
=
tf
.
nn
.
max_pool
(
self
.
input_layer
,
ksize
=
[
1
,
2
,
2
,
1
],
strides
=
[
1
,
2
,
2
,
1
],
padding
=
'SAME'
)
return
output
return
output
bob/learn/tensorflow/script/train_mnist.py
View file @
ba9a7fe1
...
...
@@ -44,7 +44,7 @@ def main():
lenet
=
Lenet
()
loss
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
trainer
=
Trainer
(
architecture
=
lenet
,
loss
=
loss
)
trainer
=
Trainer
(
architecture
=
lenet
,
loss
=
loss
,
iterations
=
ITERATIONS
)
trainer
.
train
(
data_shuffler
)
bob/learn/tensorflow/trainers/Trainer.py
View file @
ba9a7fe1
...
...
@@ -61,8 +61,8 @@ class Trainer(object):
#input_layer = InputLayer(name="input", input_data=train_placeholder_data)
import
ipdb
;
ipdb
.
set_trace
();
#
import ipdb;
#
ipdb.set_trace();
train_graph
=
self
.
architecture
.
compute_graph
(
train_placeholder_data
)
...
...
@@ -80,12 +80,18 @@ class Trainer(object):
)
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
learning_rate
).
minimize
(
loss_train
,
global_step
=
batch
)
train_prediction
=
tf
.
nn
.
softmax
(
train_graph
)
validation_prediction
=
tf
.
nn
.
softmax
(
validation_graph
)
print
(
"Initializing !!"
)
# Training
with
tf
.
Session
()
as
session
:
train_writer
=
tf
.
train
.
SummaryWriter
(
'./LOGS/train'
,
session
.
graph
)
tf
.
initialize_all_variables
().
run
()
for
step
in
range
(
self
.
iterations
):
...
...
@@ -95,15 +101,15 @@ class Trainer(object):
train_placeholder_labels
:
train_labels
}
_
,
l
,
lr
,
_
=
session
.
run
([
optimizer
,
loss_train
,
learning_rate
,
train_prediction
],
feed_dict
=
feed_dict
)
learning_rate
,
train_prediction
],
feed_dict
=
feed_dict
)
if
step
%
self
.
snapshot
==
0
:
validation_data
,
validation_labels
=
data_shuffler
.
get_batch
(
train_dataset
=
False
)
feed_dict
=
{
validation_placeholder_data
:
validation_data
,
validation_placeholder_labels
:
validation_labels
}
import
ipdb
;
ipdb
.
set_trace
();
#
import ipdb;
#
ipdb.set_trace();
l
,
predictions
=
session
.
run
([
loss_validation
,
validation_prediction
],
feed_dict
=
feed_dict
)
accuracy
=
100.
*
numpy
.
sum
(
numpy
.
argmax
(
predictions
,
1
)
==
validation_labels
)
/
predictions
.
shape
[
0
]
...
...
@@ -114,3 +120,4 @@ class Trainer(object):
# validation_data_node)
#print("Step {0}. Loss = {1}, Lr={2}, Accuracy validation = {3}".format(step, l, lr, accuracy))
#sys.stdout.flush()
train_writer
.
close
()
\ No newline at end of file
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment