Commit 3241c38a authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Add keras version of models

parent 4a714e00
import tensorflow as tf
def AlexNet_simplified(name="AlexNet", **kwargs):
"""A simplified implementation of AlexNet presented in:
Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner, “Gradient-based learning applied to
document recognition,” Proceedings of the IEEE, vol. 86, no. 11, pp. 2278–2324, 1998.
"""
model = tf.keras.Sequential(
[
tf.keras.Input(shape=(227, 227, 3)),
tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, name="C1", activation="relu"),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, name="P1"),
tf.keras.layers.Conv2D(filters=256, kernel_size=5, strides=1, name="C2", activation="relu", padding="same"),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, name="P2"),
tf.keras.layers.Conv2D(filters=384, kernel_size=3, strides=1, name="C3", activation="relu", padding="same"),
tf.keras.layers.Conv2D(filters=384, kernel_size=3, strides=1, name="C4", activation="relu", padding="same"),
tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, name="C5", activation="relu", padding="same"),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, name="P5"),
tf.keras.layers.Flatten(name="FLATTEN"),
tf.keras.layers.Dropout(rate=0.5, name="D6"),
tf.keras.layers.Dense(units=4096, activation="relu", name="F6"),
tf.keras.layers.Dropout(rate=0.5, name="D7"),
tf.keras.layers.Dense(units=4096, activation="relu", name="F7"),
tf.keras.layers.Dense(units=1000, activation="softmax", name="OUTPUT"),
],
name=name,
**kwargs
)
return model
if __name__ == "__main__":
import pkg_resources
from bob.learn.tensorflow.utils import model_summary
model = AlexNet_simplified()
model.summary()
rows = model_summary(model, do_print=True)
del rows[-2]
from tabulate import tabulate
print(tabulate(rows, headers="firstrow", tablefmt="latex"))
import tensorflow as tf
from .densenet import densenet161
from bob.learn.tensorflow.models.densenet import densenet161
def _get_l2_kw(weight_decay):
......@@ -9,13 +9,64 @@ def _get_l2_kw(weight_decay):
return l2_kw
class ConvDecoder(tf.keras.Sequential):
"""The decoder similar to the one in
https://github.com/google/compare_gan/blob/master/compare_gan/architectures/sndcgan.py
"""
# class ConvDecoder(tf.keras.Sequential):
# """The decoder similar to the one in
# https://github.com/google/compare_gan/blob/master/compare_gan/architectures/sndcgan.py
# """
# def __init__(
# self,
# z_dim,
# decoder_layers=(
# (512, 7, 7, 0),
# (256, 4, 2, 1),
# (128, 4, 2, 1),
# (64, 4, 2, 1),
# (32, 4, 2, 1),
# (16, 4, 2, 1),
# (3, 1, 1, 0),
# ),
# weight_decay=1e-5,
# name="Decoder",
# **kwargs,
# ):
# self.z_dim = z_dim
# self.data_format = data_format = "channels_last"
# l2_kw = _get_l2_kw(weight_decay)
# layers = [
# tf.keras.layers.Reshape((1, 1, z_dim), input_shape=(z_dim,), name="reshape")
# ]
# for i, (filters, kernel_size, strides, cropping) in enumerate(decoder_layers):
# dconv = tf.keras.layers.Conv2DTranspose(
# filters,
# kernel_size,
# strides=strides,
# use_bias=i == len(decoder_layers) - 1,
# data_format=data_format,
# name=f"dconv_{i}",
# **l2_kw,
# )
# crop = tf.keras.layers.Cropping2D(
# cropping=cropping, data_format=data_format, name=f"crop_{i}"
# )
# if i == len(decoder_layers) - 1:
# act = tf.keras.layers.Activation("tanh", name=f"tanh_{i}")
# bn = None
# else:
# act = tf.keras.layers.Activation("relu", name=f"relu_{i}")
# bn = tf.keras.layers.BatchNormalization(
# scale=False, fused=False, name=f"bn_{i}"
# )
# if bn is not None:
# layers.extend([dconv, crop, bn, act])
# else:
# layers.extend([dconv, crop, act])
# with tf.name_scope(name):
# super().__init__(layers=layers, name=name, **kwargs)
def __init__(
self,
def ConvDecoder(
z_dim,
decoder_layers=(
(512, 7, 7, 0),
......@@ -27,14 +78,20 @@ class ConvDecoder(tf.keras.Sequential):
(3, 1, 1, 0),
),
weight_decay=1e-5,
last_act="tanh",
name="Decoder",
**kwargs,
):
self.z_dim = z_dim
self.data_format = data_format = "channels_last"
):
"""The decoder similar to the one in
https://github.com/google/compare_gan/blob/master/compare_gan/architectures/sndcgan.py
"""
z_dim = z_dim
data_format = "channels_last"
l2_kw = _get_l2_kw(weight_decay)
layers = [
tf.keras.layers.Reshape((1, 1, z_dim), input_shape=(z_dim,), name="reshape")
tf.keras.layers.Reshape(
(1, 1, z_dim), input_shape=(z_dim,), name=f"{name}/reshape"
)
]
for i, (filters, kernel_size, strides, cropping) in enumerate(decoder_layers):
dconv = tf.keras.layers.Conv2DTranspose(
......@@ -43,27 +100,28 @@ class ConvDecoder(tf.keras.Sequential):
strides=strides,
use_bias=i == len(decoder_layers) - 1,
data_format=data_format,
name=f"dconv_{i}",
name=f"{name}/dconv_{i}",
**l2_kw,
)
crop = tf.keras.layers.Cropping2D(
cropping=cropping, data_format=data_format, name=f"crop_{i}"
cropping=cropping, data_format=data_format, name=f"{name}/crop_{i}"
)
if i == len(decoder_layers) - 1:
act = tf.keras.layers.Activation("tanh", name=f"tanh_{i}")
act = tf.keras.layers.Activation(
f"{last_act}", name=f"{name}/{last_act}_{i}"
)
bn = None
else:
act = tf.keras.layers.Activation("relu", name=f"relu_{i}")
act = tf.keras.layers.Activation("relu", name=f"{name}/relu_{i}")
bn = tf.keras.layers.BatchNormalization(
scale=False, fused=False, name=f"bn_{i}"
scale=False, fused=False, name=f"{name}/bn_{i}"
)
if bn is not None:
layers.extend([dconv, crop, bn, act])
else:
layers.extend([dconv, crop, act])
with tf.name_scope(name):
super().__init__(layers=layers, name=name, **kwargs)
return tf.keras.Sequential(layers, name=name, **kwargs)
class Autoencoder(tf.keras.Model):
......@@ -90,10 +148,28 @@ class Autoencoder(tf.keras.Model):
x_hat = self.decoder(z, training=training)
return z, x_hat
def autoencoder_face(z_dim=256, weight_decay=1e-9):
def autoencoder_face(z_dim=256, weight_decay=1e-9, decoder_last_act="tanh"):
encoder = densenet161(
output_classes=z_dim, weight_decay=weight_decay, weights=None, name="DenseNet"
)
decoder = ConvDecoder(z_dim=z_dim, weight_decay=weight_decay, name="Decoder")
decoder = ConvDecoder(
z_dim=z_dim,
weight_decay=weight_decay,
last_act=decoder_last_act,
name="Decoder",
)
autoencoder = Autoencoder(encoder, decoder, name="Autoencoder")
return autoencoder
if __name__ == "__main__":
import pkg_resources
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
model = ConvDecoder(z_dim=256, weight_decay=1e-9, last_act="tanh", name="Decoder")
model.summary()
rows = model_summary(model, do_print=True)
del rows[-2]
print(tabulate(rows, headers="firstrow", tablefmt="latex"))
......@@ -36,6 +36,10 @@ class ConvBlock(tf.keras.Model):
axis = -1 if data_format == "channels_last" else 1
inter_filter = num_filters * 4
self.num_filters = num_filters
self.bottleneck = bottleneck
self.dropout_rate = dropout_rate
self.norm1 = tf.keras.layers.BatchNormalization(axis=axis, name="norm1")
if self.bottleneck:
self.relu1 = tf.keras.layers.Activation("relu", name="relu1")
......@@ -52,10 +56,10 @@ class ConvBlock(tf.keras.Model):
self.norm2 = tf.keras.layers.BatchNormalization(axis=axis, name="norm2")
self.relu2 = tf.keras.layers.Activation("relu", name="relu2")
# don't forget to set use_bias=False when using batchnorm
self.conv2_pad = tf.keras.layers.ZeroPadding2D(
padding=1, data_format=data_format, name="conv2_pad"
)
# don't forget to set use_bias=False when using batchnorm
self.conv2 = tf.keras.layers.Conv2D(
num_filters,
(3, 3),
......@@ -109,6 +113,9 @@ class DenseBlock(tf.keras.Model):
):
super().__init__(**kwargs)
self.num_layers = num_layers
self.growth_rate = growth_rate
self.bottleneck = bottleneck
self.dropout_rate = dropout_rate
self.axis = -1 if data_format == "channels_last" else 1
self.blocks = []
......@@ -127,7 +134,9 @@ class DenseBlock(tf.keras.Model):
def call(self, x, training=None):
for i in range(int(self.num_layers)):
output = self.blocks[i](x, training=training)
x = tf.concat([x, output], axis=self.axis)
x = tf.keras.layers.Concatenate(axis=self.axis, name=f"concat_{i+1}")(
[x, output]
)
return x
......@@ -139,14 +148,12 @@ class TransitionBlock(tf.keras.Model):
num_filters: number of filters passed to a convolutional layer.
data_format: "channels_first" or "channels_last"
weight_decay: weight decay
dropout_rate: dropout rate.
"""
def __init__(
self, num_filters, data_format, weight_decay=1e-4, dropout_rate=0, **kwargs
):
def __init__(self, num_filters, data_format, weight_decay=1e-4, **kwargs):
super().__init__(**kwargs)
axis = -1 if data_format == "channels_last" else 1
self.num_filters = num_filters
self.norm = tf.keras.layers.BatchNormalization(axis=axis, name="norm")
self.relu = tf.keras.layers.Activation("relu", name="relu")
......@@ -309,11 +316,11 @@ class DenseNet(tf.keras.Model):
self.dense_blocks.append(
DenseBlock(
self.num_layers_in_each_block[i],
self.growth_rate,
self.data_format,
self.bottleneck,
self.weight_decay,
self.dropout_rate,
growth_rate=self.growth_rate,
data_format=self.data_format,
bottleneck=self.bottleneck,
weight_decay=self.weight_decay,
dropout_rate=self.dropout_rate,
name=f"dense_block_{i+1}",
)
)
......@@ -321,9 +328,8 @@ class DenseNet(tf.keras.Model):
self.transition_blocks.append(
TransitionBlock(
num_filters_after_each_block[i + 1],
self.data_format,
self.weight_decay,
self.dropout_rate,
data_format=self.data_format,
weight_decay=self.weight_decay,
name=f"transition_block_{i+1}",
)
)
......@@ -440,3 +446,37 @@ class DeepPixBiS(tf.keras.Model):
except TypeError:
x = l(x)
return x
if __name__ == "__main__":
import pkg_resources
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
def print_model(inputs, outputs):
model = tf.keras.Model(inputs, outputs)
rows = model_summary(model, do_print=True)
del rows[-2]
print(tabulate(rows, headers="firstrow", tablefmt="latex"))
# inputs = tf.keras.Input((224, 224, 3), name="input")
# model = densenet161(weights=None)
# outputs = model.call(inputs)
# print_model(inputs, outputs)
# inputs = tf.keras.Input((56, 56, 96))
# outputs = model.dense_blocks[0].call(inputs)
# print_model(inputs, outputs)
# inputs = tf.keras.Input((56, 56, 96))
# outputs = model.dense_blocks[0].blocks[0].call(inputs)
# print_model(inputs, outputs)
# inputs = tf.keras.Input((56, 56, 384))
# outputs = model.transition_blocks[0].call(inputs)
# print_model(inputs, outputs)
inputs = tf.keras.Input((224, 224, 3), name="input")
model = DeepPixBiS()
outputs = model.call(inputs)
print_model(inputs, outputs)
import tensorflow as tf
class LRN(tf.keras.layers.Lambda):
"""local response normalization with default parameters for GoogLeNet
"""
def __init__(self, alpha=0.0001, beta=0.75, depth_radius=5, **kwargs):
self.alpha = alpha
self.beta = beta
self.depth_radius = depth_radius
def lrn(inputs):
return tf.nn.local_response_normalization(
inputs, alpha=self.alpha, beta=self.beta, depth_radius=self.depth_radius
)
return super().__init__(lrn, **kwargs)
class InceptionModule(tf.keras.Model):
"""The inception module as it was introduced in:
C. Szegedy et al., “Going deeper with convolutions,” in Proceedings of the IEEE
Conference on Computer Vision and Pattern Recognition, 2015, pp. 1–9.
"""
def __init__(
self,
filter_1x1,
filter_3x3_reduce,
filter_3x3,
filter_5x5_reduce,
filter_5x5,
pool_proj,
name="InceptionModule",
**kwargs
):
super().__init__(name=name, **kwargs)
self.filter_1x1 = filter_1x1
self.filter_3x3_reduce = filter_3x3_reduce
self.filter_3x3 = filter_3x3
self.filter_5x5_reduce = filter_5x5_reduce
self.filter_5x5 = filter_5x5
self.pool_proj = pool_proj
self.branch1_conv1 = tf.keras.layers.Conv2D(
filter_1x1, 1, padding="same", activation="relu", name="branch1_conv1"
)
self.branch2_conv1 = tf.keras.layers.Conv2D(
filter_3x3_reduce,
1,
padding="same",
activation="relu",
name="branch2_conv1",
)
self.branch2_conv2 = tf.keras.layers.Conv2D(
filter_3x3, 3, padding="same", activation="relu", name="branch2_conv2"
)
self.branch3_conv1 = tf.keras.layers.Conv2D(
filter_5x5_reduce,
1,
padding="same",
activation="relu",
name="branch3_conv1",
)
self.branch3_conv2 = tf.keras.layers.Conv2D(
filter_5x5, 5, padding="same", activation="relu", name="branch3_conv2"
)
self.branch4_pool1 = tf.keras.layers.MaxPool2D(
3, 1, padding="same", name="branch4_pool1"
)
self.branch4_conv1 = tf.keras.layers.Conv2D(
pool_proj, 1, padding="same", activation="relu", name="branch4_conv1"
)
self.concat = tf.keras.layers.Concatenate(
axis=-1 if tf.keras.backend.image_data_format() == "channels_last" else -3,
name="concat",
)
def call(self, inputs):
b1 = self.branch1_conv1(inputs)
b2 = self.branch2_conv1(inputs)
b2 = self.branch2_conv2(b2)
b3 = self.branch3_conv1(inputs)
b3 = self.branch3_conv2(b3)
b4 = self.branch4_pool1(inputs)
b4 = self.branch4_conv1(b4)
return self.concat([b1, b2, b3, b4])
def GoogLeNet(*, num_classes=1000, name="GoogLeNet", **kwargs):
"""GoogLeNet as depicted in Figure 3 of
C. Szegedy et al., “Going deeper with convolutions,” in Proceedings of the IEEE
Conference on Computer Vision and Pattern Recognition, 2015, pp. 1–9.
and implemented in caffe:
https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet
"""
model = tf.keras.Sequential(
[
tf.keras.Input(shape=(224, 224, 3)),
tf.keras.layers.Conv2D(
64, 7, strides=2, padding="same", activation="relu", name="conv1/7x7_s2"
),
tf.keras.layers.MaxPool2D(3, 2, padding="same", name="pool1/3x3_s2"),
LRN(name="pool1/norm1"),
tf.keras.layers.Conv2D(64, 1, padding="same", activation="relu", name="conv2/3x3_reduce"),
tf.keras.layers.Conv2D(
192, 3, padding="same", activation="relu", name="conv2/3x3"
),
LRN(name="conv2/norm2"),
tf.keras.layers.MaxPool2D(3, 2, padding="same", name="pool2/3x3_s2"),
InceptionModule(64, 96, 128, 16, 32, 32, name="inception_3a"),
InceptionModule(128, 128, 192, 32, 96, 64, name="inception_3b"),
tf.keras.layers.MaxPool2D(3, 2, padding="same", name="pool3/3x3_s2"),
InceptionModule(192, 96, 208, 16, 48, 64, name="inception_4a"),
InceptionModule(160, 112, 224, 24, 64, 64, name="inception_4b"),
InceptionModule(128, 128, 256, 24, 64, 64, name="inception_4c"),
InceptionModule(112, 144, 288, 32, 64, 64, name="inception_4d"),
InceptionModule(256, 160, 320, 32, 128, 128, name="inception_4e"),
tf.keras.layers.MaxPool2D(3, 2, padding="same", name="pool4/3x3_s2"),
InceptionModule(256, 160, 320, 32, 128, 128, name="inception_5a"),
InceptionModule(384, 192, 384, 48, 128, 128, name="inception_5b"),
tf.keras.layers.GlobalAvgPool2D(name="pool5"),
tf.keras.layers.Dropout(rate=0.4, name="dropout"),
tf.keras.layers.Dense(num_classes, name="output", activation="softmax"),
],
name=name,
**kwargs
)
return model
if __name__ == "__main__":
import pkg_resources
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
inputs = tf.keras.Input((28, 28, 192), name="input")
model = InceptionModule(64, 96, 128, 16, 32, 32)
outputs = model.call(inputs)
model = tf.keras.Model(inputs, outputs)
rows = model_summary(model, do_print=True)
del rows[-2]
print(tabulate(rows, headers="firstrow", tablefmt="latex"))
model = GoogLeNet()
rows = model_summary(model, do_print=True)
del rows[-2]
print(tabulate(rows, headers="firstrow", tablefmt="latex"))
This diff is collapsed.
import tensorflow as tf
def LeNet5_simplified(name="LeNet5", **kwargs):
"""A heavily simplified implementation of LeNet-5 presented in:
Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner, “Gradient-based learning applied to
document recognition,” Proceedings of the IEEE, vol. 86, no. 11, pp. 2278–2324, 1998.
"""
model = tf.keras.Sequential(
[
tf.keras.Input(shape=(32, 32, 1)),
tf.keras.layers.Conv2D(
filters=6, kernel_size=5, name="C1", activation="tanh"
),
tf.keras.layers.AvgPool2D(pool_size=2, name="S2"),
tf.keras.layers.Conv2D(
filters=16, kernel_size=5, name="C3", activation="tanh"
),
tf.keras.layers.AvgPool2D(pool_size=2, name="S4"),
tf.keras.layers.Conv2D(
filters=120, kernel_size=5, name="C5", activation="tanh"
),
tf.keras.layers.Flatten(name="FLATTEN"),
tf.keras.layers.Dense(units=84, activation="tanh", name="F6"),
tf.keras.layers.Dense(units=10, activation="sigmoid", name="OUTPUT"),
],
name=name,
**kwargs
)
return model
if __name__ == "__main__":
import pkg_resources
from bob.learn.tensorflow.utils import model_summary
model = LeNet5_simplified()
model.summary()
rows = model_summary(model, do_print=True)
del rows[-2]
from tabulate import tabulate
print(tabulate(rows, headers="firstrow", tablefmt="latex"))
import tensorflow as tf
def MSUPatch(name="MSUPatch", **kwargs):
return tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
50, (5, 5), padding="same", use_bias=False, name="Conv-1", input_shape=(96, 96, 3)
),
tf.keras.layers.BatchNormalization(scale=False, name="BN-1"),
tf.keras.layers.Activation("relu", name="ReLU-1"),
tf.keras.layers.MaxPool2D(padding="same", name="MaxPool-1"),
tf.keras.layers.Conv2D(100, (3, 3), padding="same", use_bias=False, name="Conv-2"),
tf.keras.layers.BatchNormalization(scale=False, name="BN-2"),
tf.keras.layers.Activation("relu", name="ReLU-2"),
tf.keras.layers.MaxPool2D(padding="same", name="MaxPool-2"),
tf.keras.layers.Conv2D(150, (3, 3), padding="same", use_bias=False, name="Conv-3"),
tf.keras.layers.BatchNormalization(scale=False, name="BN-3"),
tf.keras.layers.Activation("relu", name="ReLU-3"),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding="same", name="MaxPool-3"),
tf.keras.layers.Conv2D(200, (3, 3), padding="same", use_bias=False, name="Conv-4"),
tf.keras.layers.BatchNormalization(scale=False, name="BN-4"),
tf.keras.layers.Activation("relu", name="ReLU-4"),
tf.keras.layers.MaxPool2D(padding="same", name="MaxPool-4"),
tf.keras.layers.Conv2D(250, (3, 3), padding="same", use_bias=False, name="Conv-5"),
tf.keras.layers.BatchNormalization(scale=False, name="BN-5"),
tf.keras.layers.Activation("relu", name="ReLU-5"),
tf.keras.layers.MaxPool2D(padding="same", name="MaxPool-5"),
tf.keras.layers.Flatten(name="Flatten"),
tf.keras.layers.Dense(1000, use_bias=False, name="FC-1"),
tf.keras.layers.BatchNormalization(scale=False, name="BN-6"),
tf.keras.layers.Activation("relu", name="ReLU-6"),
tf.keras.layers.Dropout(rate=0.5, name="Dropout"),
tf.keras.layers.Dense(400, use_bias=False, name="FC-2"),
tf.keras.layers.BatchNormalization(scale=False, name="BN-7"),
tf.keras.layers.Activation("relu", name="ReLU-7"),
tf.keras.layers.Dense(2, name="FC-3"),
],
name=name,
**kwargs
)
if __name__ == "__main__":
import pkg_resources
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
model = MSUPatch()
model.summary()
rows = model_summary(model, do_print=True)
del rows[-2]
print(tabulate(rows, headers="firstrow", tablefmt="latex"))
"""
The network using keras (same as new_architecture function below)::
from tensorflow.python.keras import *
from tensorflow.python.keras.layers import *
simplecnn = Sequential([
Conv2D(32,(3,3),padding='same',use_bias=False, input_shape=(28,28,3)),
BatchNormalization(scale=False),
Activation('relu'),
MaxPool2D(padding='same'),
Conv2D(64,(3,3),padding='same',use_bias=False),
BatchNormalization(scale=False),
Activation('relu'),
MaxPool2D(padding='same'),
Flatten(),
Dense(1024, use_bias=False),
BatchNormalization(scale=False),
Activation('relu'),
Dropout(rate=0.4),
Dense(2, activation="softmax"),
])
simplecnn.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 28, 28, 32) 864
_________________________________________________________________
batch_normalization_1 (Batch (None, 28, 28, 32) 96
_________________________________________________________________
activation_1 (Activation) (None, 28, 28, 32) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 14, 14, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 14, 14, 64) 18432
_________________________________________________________________
batch_normalization_2 (Batch (None, 14, 14, 64) 192
_________________________________________________________________
activation_2 (Activation) (None, 14, 14, 64) 0
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 64) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 3136) 0
_________________________________________________________________
dense_1 (Dense) (None, 1024) 3211264
_________________________________________________________________
batch_normalization_3 (Batch (None, 1024) 3072
_________________________________________________________________
activation_3 (Activation) (None, 1024) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_2 (Dense) (None, 2) 2050
=================================================================
Total params: 3,235,970
Trainable params: 3,233,730
Non-trainable params: 2,240
_________________________________________________________________
"""
from tensorflow.python.keras import Sequential, Input
from tensorflow.python.keras.layers import (
Conv2D,
BatchNormalization,
Activation,
MaxPool2D,
Flatten,
Dense,
Dropout,
)
def SimpleCNN(input_shape=(28, 28, 3), inputs=None, name="SimpleCNN", **kwargs):
if inputs is None:
inputs = Input(input_shape)
model = Sequential(
[
inputs,
Conv2D(32, (3, 3), padding="same", use_bias=False),
BatchNormalization(scale=False),
Activation("relu"),
MaxPool2D(padding="same"),