Skip to content
Snippets Groups Projects
Commit 5a60ec56 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

New networks

parent c010eddc
No related branches found
No related tags found
1 merge request!13Updates
...@@ -22,130 +22,136 @@ class LightCNN9(object): ...@@ -22,130 +22,136 @@ class LightCNN9(object):
self.batch_norm = batch_norm self.batch_norm = batch_norm
self.n_classes = n_classes self.n_classes = n_classes
def __call__(self, inputs, reuse=False): def __call__(self, inputs, reuse=False, get_class_layer=True):
slim = tf.contrib.slim slim = tf.contrib.slim
with tf.device(self.device): #with tf.device(self.device):
initializer = tf.contrib.layers.xavier_initializer(uniform=False, dtype=tf.float32, seed=self.seed) initializer = tf.contrib.layers.xavier_initializer(uniform=False, dtype=tf.float32, seed=self.seed)
graph = slim.conv2d(inputs, 96, [5, 5], activation_fn=tf.nn.relu, graph = slim.conv2d(inputs, 96, [5, 5], activation_fn=tf.nn.relu,
stride=1, stride=1,
weights_initializer=initializer, weights_initializer=initializer,
scope='Conv1', scope='Conv1',
reuse=reuse) reuse=reuse)
graph = maxout(graph, graph = maxout(graph,
num_units=48, num_units=48,
name='Maxout1') name='Maxout1')
graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool1') graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool1')
#### ####
graph = slim.conv2d(graph, 96, [1, 1], activation_fn=tf.nn.relu, graph = slim.conv2d(graph, 96, [1, 1], activation_fn=tf.nn.relu,
stride=1, stride=1,
weights_initializer=initializer, weights_initializer=initializer,
scope='Conv2a', scope='Conv2a',
reuse=reuse) reuse=reuse)
graph = maxout(graph,
num_units=48,
name='Maxout2a')
graph = maxout(graph, graph = slim.conv2d(graph, 192, [3, 3], activation_fn=tf.nn.relu,
num_units=48, stride=1,
name='Maxout2a') weights_initializer=initializer,
scope='Conv2',
reuse=reuse)
graph = slim.conv2d(graph, 192, [3, 3], activation_fn=tf.nn.relu, graph = maxout(graph,
stride=1, num_units=96,
weights_initializer=initializer, name='Maxout2')
scope='Conv2',
reuse=reuse)
graph = maxout(graph, graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool2')
num_units=96,
name='Maxout2')
graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool2') #####
##### graph = slim.conv2d(graph, 192, [1, 1], activation_fn=tf.nn.relu,
stride=1,
weights_initializer=initializer,
scope='Conv3a',
reuse=reuse)
graph = slim.conv2d(graph, 192, [1, 1], activation_fn=tf.nn.relu, graph = maxout(graph,
stride=1, num_units=96,
weights_initializer=initializer, name='Maxout3a')
scope='Conv3a',
reuse=reuse)
graph = maxout(graph, graph = slim.conv2d(graph, 384, [3, 3], activation_fn=tf.nn.relu,
num_units=96, stride=1,
name='Maxout3a') weights_initializer=initializer,
scope='Conv3',
reuse=reuse)
graph = slim.conv2d(graph, 384, [3, 3], activation_fn=tf.nn.relu, graph = maxout(graph,
stride=1, num_units=192,
weights_initializer=initializer, name='Maxout3')
scope='Conv3',
reuse=reuse)
graph = maxout(graph, graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool3')
num_units=192,
name='Maxout3')
graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool3') #####
##### graph = slim.conv2d(graph, 384, [1, 1], activation_fn=tf.nn.relu,
stride=1,
weights_initializer=initializer,
scope='Conv4a',
reuse=reuse)
graph = slim.conv2d(graph, 384, [1, 1], activation_fn=tf.nn.relu, graph = maxout(graph,
stride=1, num_units=192,
weights_initializer=initializer, name='Maxout4a')
scope='Conv4a',
reuse=reuse)
graph = maxout(graph, graph = slim.conv2d(graph, 256, [3, 3], activation_fn=tf.nn.relu,
num_units=192, stride=1,
name='Maxout4a') weights_initializer=initializer,
scope='Conv4',
reuse=reuse)
graph = slim.conv2d(graph, 256, [3, 3], activation_fn=tf.nn.relu, graph = maxout(graph,
stride=1, num_units=128,
weights_initializer=initializer, name='Maxout4')
scope='Conv4',
reuse=reuse)
graph = maxout(graph, #####
num_units=128,
name='Maxout4')
##### graph = slim.conv2d(graph, 256, [1, 1], activation_fn=tf.nn.relu,
stride=1,
weights_initializer=initializer,
scope='Conv5a',
reuse=reuse)
graph = slim.conv2d(graph, 256, [1, 1], activation_fn=tf.nn.relu, graph = maxout(graph,
stride=1, num_units=128,
weights_initializer=initializer, name='Maxout5a')
scope='Conv5a',
reuse=reuse)
graph = maxout(graph, graph = slim.conv2d(graph, 256, [3, 3], activation_fn=tf.nn.relu,
num_units=128, stride=1,
name='Maxout5a') weights_initializer=initializer,
scope='Conv5',
reuse=reuse)
graph = maxout(graph,
num_units=128,
name='Maxout5')
graph = slim.conv2d(graph, 256, [3, 3], activation_fn=tf.nn.relu, graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool4')
stride=1,
weights_initializer=initializer,
scope='Conv5',
reuse=reuse)
graph = maxout(graph, graph = slim.flatten(graph, scope='flatten1')
num_units=128,
name='Maxout5')
graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool4') graph = slim.dropout(graph, keep_prob=0.3, scope='dropout1')
graph = slim.flatten(graph, scope='flatten1') graph = slim.fully_connected(graph, 512,
weights_initializer=initializer,
activation_fn=tf.nn.relu,
scope='fc1',
reuse=reuse)
#graph = maxout(graph,
# num_units=256,
# name='Maxoutfc1')
graph = slim.dropout(graph, keep_prob=0.3, scope='dropout2')
graph = slim.fully_connected(graph, 512,
weights_initializer=initializer,
activation_fn=tf.nn.relu,
scope='fc1',
reuse=reuse)
graph = maxout(graph,
num_units=256,
name='Maxoutfc1')
if get_class_layer:
graph = slim.fully_connected(graph, self.n_classes, graph = slim.fully_connected(graph, self.n_classes,
weights_initializer=initializer, weights_initializer=initializer,
activation_fn=None, activation_fn=None,
......
from .Chopra import Chopra from .Chopra import Chopra
from .LightCNN9 import LightCNN9 from .LightCNN9 import LightCNN9
from .LightCNN29 import LightCNN29
from .Dummy import Dummy from .Dummy import Dummy
from .MLP import MLP from .MLP import MLP
from .Embedding import Embedding from .Embedding import Embedding
from .InceptionResnetV2 import inception_resnet_v2
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
def __appropriate__(*args): def __appropriate__(*args):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment