Commit fa2835e7 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Add base_arch

parent ed5faf01
import tensorflow as tf
def architecture(input_layer, mode=tf.estimator.ModeKeys.TRAIN,
kernerl_size=(3, 3), n_classes=2,
data_format='channels_last', reuse=False):
def base_architecture(input_layer, mode, kernerl_size, data_format, **kwargs):
# Keep track of all the endpoints
endpoints = {}
# Convolutional Layer #1
# Computes 32 features using a kernerl_size filter with ReLU
# activation.
# Padding is added to preserve width and height.
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=kernerl_size,
padding="same",
activation=tf.nn.relu,
data_format=data_format)
endpoints['conv1'] = conv1
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
strides=2, data_format=data_format)
endpoints['pool1'] = pool1
# Convolutional Layer #2
# Computes 64 features using a kernerl_size filter.
# Padding is added to preserve width and height.
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=kernerl_size,
padding="same",
activation=tf.nn.relu,
data_format=data_format)
endpoints['conv2'] = conv2
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
strides=2, data_format=data_format)
endpoints['pool2'] = pool2
# Flatten tensor into a batch of vectors
# TODO: use tf.layers.flatten in tensorflow 1.4 and above
pool2_flat = tf.contrib.layers.flatten(pool2)
endpoints['pool2_flat'] = pool2_flat
# Dense Layer
# Densely connected layer with 1024 neurons
dense = tf.layers.dense(
inputs=pool2_flat, units=1024, activation=tf.nn.relu)
endpoints['dense'] = dense
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4,
training=mode == tf.estimator.ModeKeys.TRAIN)
endpoints['dropout'] = dropout
return dropout, endpoints
def architecture(input_layer, mode=tf.estimator.ModeKeys.TRAIN,
kernerl_size=(3, 3), n_classes=2,
data_format='channels_last', reuse=False, **kwargs):
with tf.variable_scope('SimpleCNN', reuse=reuse):
# Convolutional Layer #1
# Computes 32 features using a kernerl_size filter with ReLU
# activation.
# Padding is added to preserve width and height.
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=kernerl_size,
padding="same",
activation=tf.nn.relu,
data_format=data_format)
endpoints['conv1'] = conv1
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
strides=2, data_format=data_format)
endpoints['pool1'] = pool1
# Convolutional Layer #2
# Computes 64 features using a kernerl_size filter.
# Padding is added to preserve width and height.
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=kernerl_size,
padding="same",
activation=tf.nn.relu,
data_format=data_format)
endpoints['conv2'] = conv2
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
strides=2, data_format=data_format)
endpoints['pool2'] = pool2
# Flatten tensor into a batch of vectors
# TODO: use tf.layers.flatten in tensorflow 1.4 and above
pool2_flat = tf.contrib.layers.flatten(pool2)
endpoints['pool2_flat'] = pool2_flat
# Dense Layer
# Densely connected layer with 1024 neurons
dense = tf.layers.dense(
inputs=pool2_flat, units=1024, activation=tf.nn.relu)
endpoints['dense'] = dense
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4,
training=mode == tf.estimator.ModeKeys.TRAIN)
endpoints['dropout'] = dropout
dropout, endpoints = base_architecture(
input_layer, mode, kernerl_size, data_format)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 2]
# Output Tensor Shape: [batch_size, n_classes]
logits = tf.layers.dense(inputs=dropout, units=n_classes)
endpoints['logits'] = logits
......@@ -75,7 +82,7 @@ def architecture(input_layer, mode=tf.estimator.ModeKeys.TRAIN,
def model_fn(features, labels, mode, params=None, config=None):
"""Model function for CNN."""
data = features['data']
keys = features['key']
key = features['key']
params = params or {}
learning_rate = params.get('learning_rate', 1e-5)
......@@ -95,7 +102,7 @@ def model_fn(features, labels, mode, params=None, config=None):
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor"),
'keys': keys,
'key': key,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment