Skip to content
Snippets Groups Projects
Commit 5c4ecf64 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Allow for transfer learning maybe?

parent e1992171
Branches
Tags
1 merge request!33Changes to the biogenerator
......@@ -3,68 +3,71 @@ import tensorflow as tf
def architecture(input_layer, mode=tf.estimator.ModeKeys.TRAIN,
kernerl_size=(3, 3), n_classes=2,
data_format='channels_last'):
data_format='channels_last', reuse=False):
# Keep track of all the endpoints
endpoints = {}
# Convolutional Layer #1
# Computes 32 features using a kernerl_size filter with ReLU activation.
# Padding is added to preserve width and height.
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=kernerl_size,
padding="same",
activation=tf.nn.relu,
data_format=data_format)
endpoints['conv1'] = conv1
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2,
data_format=data_format)
endpoints['pool1'] = pool1
# Convolutional Layer #2
# Computes 64 features using a kernerl_size filter.
# Padding is added to preserve width and height.
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=kernerl_size,
padding="same",
activation=tf.nn.relu,
data_format=data_format)
endpoints['conv2'] = conv2
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2,
data_format=data_format)
endpoints['pool2'] = pool2
# Flatten tensor into a batch of vectors
# TODO: use tf.layers.flatten in tensorflow 1.4 and above
pool2_flat = tf.contrib.layers.flatten(pool2)
endpoints['pool2_flat'] = pool2_flat
# Dense Layer
# Densely connected layer with 1024 neurons
dense = tf.layers.dense(
inputs=pool2_flat, units=1024, activation=tf.nn.relu)
endpoints['dense'] = dense
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
endpoints['dropout'] = dropout
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 2]
logits = tf.layers.dense(inputs=dropout, units=n_classes)
endpoints['logits'] = logits
with tf.variable_scope('SimpleCNN', reuse=reuse):
# Convolutional Layer #1
# Computes 32 features using a kernerl_size filter with ReLU
# activation.
# Padding is added to preserve width and height.
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=kernerl_size,
padding="same",
activation=tf.nn.relu,
data_format=data_format)
endpoints['conv1'] = conv1
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
strides=2, data_format=data_format)
endpoints['pool1'] = pool1
# Convolutional Layer #2
# Computes 64 features using a kernerl_size filter.
# Padding is added to preserve width and height.
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=kernerl_size,
padding="same",
activation=tf.nn.relu,
data_format=data_format)
endpoints['conv2'] = conv2
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
strides=2, data_format=data_format)
endpoints['pool2'] = pool2
# Flatten tensor into a batch of vectors
# TODO: use tf.layers.flatten in tensorflow 1.4 and above
pool2_flat = tf.contrib.layers.flatten(pool2)
endpoints['pool2_flat'] = pool2_flat
# Dense Layer
# Densely connected layer with 1024 neurons
dense = tf.layers.dense(
inputs=pool2_flat, units=1024, activation=tf.nn.relu)
endpoints['dense'] = dense
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4,
training=mode == tf.estimator.ModeKeys.TRAIN)
endpoints['dropout'] = dropout
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 2]
logits = tf.layers.dense(inputs=dropout, units=n_classes)
endpoints['logits'] = logits
return logits, endpoints
......@@ -117,7 +120,6 @@ def model_fn(features, labels, mode, params=None, config=None):
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment