Commit e2deb70d authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Fix the documentation

parent 5ba976b8
Pipeline #24654 failed with stage
in 52 minutes and 49 seconds
......@@ -30,13 +30,13 @@ class BioGenerator(object):
``data = load_data(database, biofile)``.
:any:`bob.bio.base.read_original_data` is wrapped to be used by
default.
multiple_samples : bool, optional
multiple_samples : :obj:`bool`, optional
If true, it assumes that the bio database's samples actually contain
multiple samples. This is useful for when you want to for example treat
video databases as image databases.
output_types : (object, object, object)
The types of the returned samples.
output_shapes : (tf.TensorShape, tf.TensorShape, tf.TensorShape)
output_shapes : ``(tf.TensorShape, tf.TensorShape, tf.TensorShape)``
The shapes of the returned samples.
"""
......
"""
The network using keras (same as new_architecture function below):
```
from tensorflow.python.keras import *
from tensorflow.python.keras.layers import *
simplecnn = Sequential([
Conv2D(32,(3,3),padding='same',use_bias=False, input_shape=(28,28,3)),
BatchNormalization(scale=False),
Activation('relu'),
MaxPool2D(padding='same'),
Conv2D(64,(3,3),padding='same',use_bias=False),
BatchNormalization(scale=False),
Activation('relu'),
MaxPool2D(padding='same'),
Flatten(),
Dense(1024, use_bias=False),
BatchNormalization(scale=False),
Activation('relu'),
Dropout(rate=0.4),
Dense(2),
])
simplecnn.summary()
```
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 28, 28, 32) 864
_________________________________________________________________
batch_normalization_1 (Batch (None, 28, 28, 32) 96
_________________________________________________________________
activation_1 (Activation) (None, 28, 28, 32) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 14, 14, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 14, 14, 64) 18432
_________________________________________________________________
batch_normalization_2 (Batch (None, 14, 14, 64) 192
_________________________________________________________________
activation_2 (Activation) (None, 14, 14, 64) 0
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 64) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 3136) 0
_________________________________________________________________
dense_1 (Dense) (None, 1024) 3211264
_________________________________________________________________
batch_normalization_3 (Batch (None, 1024) 3072
_________________________________________________________________
activation_3 (Activation) (None, 1024) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_2 (Dense) (None, 2) 2050
=================================================================
Total params: 3,235,970
Trainable params: 3,233,730
Non-trainable params: 2,240
_________________________________________________________________
The network using keras (same as new_architecture function below)::
from tensorflow.python.keras import *
from tensorflow.python.keras.layers import *
simplecnn = Sequential([
Conv2D(32,(3,3),padding='same',use_bias=False, input_shape=(28,28,3)),
BatchNormalization(scale=False),
Activation('relu'),
MaxPool2D(padding='same'),
Conv2D(64,(3,3),padding='same',use_bias=False),
BatchNormalization(scale=False),
Activation('relu'),
MaxPool2D(padding='same'),
Flatten(),
Dense(1024, use_bias=False),
BatchNormalization(scale=False),
Activation('relu'),
Dropout(rate=0.4),
Dense(2),
])
simplecnn.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 28, 28, 32) 864
_________________________________________________________________
batch_normalization_1 (Batch (None, 28, 28, 32) 96
_________________________________________________________________
activation_1 (Activation) (None, 28, 28, 32) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 14, 14, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 14, 14, 64) 18432
_________________________________________________________________
batch_normalization_2 (Batch (None, 14, 14, 64) 192
_________________________________________________________________
activation_2 (Activation) (None, 14, 14, 64) 0
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 64) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 3136) 0
_________________________________________________________________
dense_1 (Dense) (None, 1024) 3211264
_________________________________________________________________
batch_normalization_3 (Batch (None, 1024) 3072
_________________________________________________________________
activation_3 (Activation) (None, 1024) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_2 (Dense) (None, 2) 2050
=================================================================
Total params: 3,235,970
Trainable params: 3,233,730
Non-trainable params: 2,240
_________________________________________________________________
"""
......
......@@ -21,7 +21,7 @@ def compute_features(input_image, architecture, checkpoint_dir, target_end_point
Parameters
----------
input_image: numpy.array
input_image: :any:`numpy.array`
Input image in the format WxHxC
architecture:
......@@ -77,7 +77,7 @@ def compute_gram(features):
Parameters
----------
features: numpy.array
features: :any:`numpy.array`
Convolved features in the format NxWxHxC
"""
......@@ -90,7 +90,7 @@ def compute_gram(features):
return grams
def do_style_transfer(content_image, style_images,
def do_style_transfer(content_image, style_images,
architecture, checkpoint_dir, scopes,
content_end_points, style_end_points,
preprocess_fn=None, un_preprocess_fn=None, pure_noise=False,
......@@ -105,7 +105,7 @@ def do_style_transfer(content_image, style_images,
Parameters
----------
content_image: numpy.array
content_image: :any:`numpy.array`
Content image in the Bob format (C x W x H)
style_images: :any:`list`
......@@ -128,7 +128,7 @@ def do_style_transfer(content_image, style_images,
preprocess_fn:
Preprocess function. Pointer to a function that preprocess the INPUT signal
unpreprocess_fn:
Un preprocess function. Pointer to a function that preprocess the OUTPUT signal
......@@ -138,7 +138,7 @@ def do_style_transfer(content_image, style_images,
iterations:
Number of iterations to generate the image
learning_rate:
Adam learning rate
......@@ -147,7 +147,7 @@ def do_style_transfer(content_image, style_images,
style_weight:
Weight of the style loss
denoise_weight:
Weight denoising loss
"""
......@@ -178,10 +178,10 @@ def do_style_transfer(content_image, style_images,
content_end_points, preprocess_fn)
# Base style features
logger.info("Computing style features")
logger.info("Computing style features")
style_grams = []
for image in style_images:
style_features = compute_features(image, architecture, checkpoint_dir,
style_features = compute_features(image, architecture, checkpoint_dir,
style_end_points, preprocess_fn)
style_grams.append(compute_gram(style_features))
......@@ -206,7 +206,7 @@ def do_style_transfer(content_image, style_images,
# Computing style_loss
style_gram_noises = []
s_loss = 0
for grams_per_image in style_grams:
for c in style_end_points:
......@@ -227,7 +227,7 @@ def do_style_transfer(content_image, style_images,
tf.contrib.framework.init_from_checkpoint(tf.train.latest_checkpoint(checkpoint_dir) if os.path.isdir(checkpoint_dir) else checkpoint_dir, scopes)
# Training
with tf.Session() as sess:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(iterations):
......@@ -258,7 +258,7 @@ def do_style_transfer(content_image, style_images,
else:
normalized_style_image_yuv = bob.ip.color.rgb_to_yuv(bob.ip.color.gray_to_rgb(bob.ip.color.rgb_to_gray(normalized_style_image)))
content_image_yuv = bob.ip.color.rgb_to_yuv(bob.io.base.load(content_image_path))
output_image = numpy.zeros(shape=content_image_yuv.shape, dtype="uint8")
output_image[0,:,:] = normalized_style_image_yuv[0,:,:]
output_image[1,:,:] = content_image_yuv[1,:,:]
......
......@@ -40,6 +40,7 @@ requirements:
run:
- python
- setuptools
- numpy
- scipy
- six
- tensorflow >=1.4
......
......@@ -31,7 +31,7 @@ Architectures
bob.learn.tensorflow.network.inception_resnet_v1
bob.learn.tensorflow.network.inception_resnet_v2_batch_norm
bob.learn.tensorflow.network.inception_resnet_v1_batch_norm
bob.learn.tensorflow.network.SimpleCNN.base_architecture
bob.learn.tensorflow.network.SimpleCNN.slim_architecture
bob.learn.tensorflow.network.vgg_19
bob.learn.tensorflow.network.vgg_16
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment