Debuging

parent 7da034f3
......@@ -14,7 +14,7 @@ def scale_mean_norm(data, scale=0.00390625):
class DataShuffler(object):
def __init__(self, data, labels, perc_train=0.9, scale=True, train_batch_size=1, validation_batch_size=100):
def __init__(self, data, labels, perc_train=0.9, scale=True, train_batch_size=1, validation_batch_size=300):
"""
Some base functions for neural networks
......
......@@ -64,7 +64,6 @@ class Conv2D(Layer):
conv2d = tf.nn.conv2d(self.input_layer, self.W, strides=[1, 1, 1, 1], padding='SAME')
if self.activation is not None:
with tf.name_scope(str(self.name) + 'activation'):
non_linear_conv2d = tf.nn.tanh(tf.nn.bias_add(conv2d, self.b))
output = non_linear_conv2d
else:
......
......@@ -44,21 +44,23 @@ class FullyConnected(Layer):
self.W = create_weight_variables([input_dim, self.output_dim],
seed=self.seed, name=str(self.name), use_gpu=self.use_gpu)
if self.activation is not None:
#if self.activation is not None:
self.b = create_bias_variables([self.output_dim], name=str(self.name)+"_bias", use_gpu=self.use_gpu)
def get_graph(self):
with tf.name_scope('fc'):
with tf.name_scope(str(self.name)):
if len(self.input_layer.get_shape()) == 4:
shape = self.input_layer.get_shape().as_list()
fc = tf.reshape(self.input_layer, [shape[0], shape[1] * shape[2] * shape[3]])
else:
fc = self.input_layer
if self.activation is not None:
with tf.name_scope('activation'):
non_linear_fc = tf.nn.tanh(tf.matmul(fc, self.W) + self.b)
output = non_linear_fc
else:
output = fc
output = tf.matmul(fc, self.W) + self.b
return output
......@@ -44,7 +44,7 @@ def main():
lenet = Lenet()
loss = tf.nn.sparse_softmax_cross_entropy_with_logits
trainer = Trainer(architecture=lenet, loss=loss)
trainer = Trainer(architecture=lenet, loss=loss, iterations=ITERATIONS)
trainer.train(data_shuffler)
......@@ -61,8 +61,8 @@ class Trainer(object):
#input_layer = InputLayer(name="input", input_data=train_placeholder_data)
import ipdb;
ipdb.set_trace();
#import ipdb;
#ipdb.set_trace();
train_graph = self.architecture.compute_graph(train_placeholder_data)
......@@ -80,12 +80,18 @@ class Trainer(object):
)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_train,
global_step=batch)
train_prediction = tf.nn.softmax(train_graph)
validation_prediction = tf.nn.softmax(validation_graph)
print("Initializing !!")
# Training
with tf.Session() as session:
train_writer = tf.train.SummaryWriter('./LOGS/train',
session.graph)
tf.initialize_all_variables().run()
for step in range(self.iterations):
......@@ -102,8 +108,8 @@ class Trainer(object):
feed_dict = {validation_placeholder_data: validation_data,
validation_placeholder_labels: validation_labels}
import ipdb;
ipdb.set_trace();
#import ipdb;
#ipdb.set_trace();
l, predictions = session.run([loss_validation, validation_prediction], feed_dict=feed_dict)
accuracy = 100. * numpy.sum(numpy.argmax(predictions, 1) == validation_labels) / predictions.shape[0]
......@@ -114,3 +120,4 @@ class Trainer(object):
# validation_data_node)
#print("Step {0}. Loss = {1}, Lr={2}, Accuracy validation = {3}".format(step, l, lr, accuracy))
#sys.stdout.flush()
train_writer.close()
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment