Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
573b8230
Commit
573b8230
authored
Sep 08, 2020
by
Amir MOHAMMADI
Browse files
Ran the automated conversion script
parent
f8449547
Changes
40
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/dataset/__init__.py
View file @
573b8230
...
...
@@ -4,9 +4,9 @@ import os
import
bob.io.base
DEFAULT_FEATURE
=
{
"data"
:
tf
.
FixedLenFeature
([],
tf
.
string
),
"label"
:
tf
.
FixedLenFeature
([],
tf
.
int64
),
"key"
:
tf
.
FixedLenFeature
([],
tf
.
string
),
"data"
:
tf
.
io
.
FixedLenFeature
([],
tf
.
string
),
"label"
:
tf
.
io
.
FixedLenFeature
([],
tf
.
int64
),
"key"
:
tf
.
io
.
FixedLenFeature
([],
tf
.
string
),
}
...
...
@@ -32,110 +32,110 @@ def from_filename_to_tensor(filename, extension=None):
"""
if
extension
==
"hdf5"
:
return
tf
.
py_func
(
from_hdf5file_to_tensor
,
[
filename
],
[
tf
.
float32
])
return
tf
.
compat
.
v1
.
py_func
(
from_hdf5file_to_tensor
,
[
filename
],
[
tf
.
float32
])
else
:
return
tf
.
cast
(
tf
.
image
.
decode_image
(
tf
.
read_file
(
filename
)),
tf
.
float32
)
def
append_image_augmentation
(
image
,
gray_scale
=
False
,
output_shape
=
None
,
random_flip
=
False
,
random_brightness
=
False
,
random_contrast
=
False
,
random_saturation
=
False
,
random_rotate
=
False
,
per_image_normalization
=
True
,
random_gamma
=
False
,
random_crop
=
False
,
):
"""
Append to the current tensor some random image augmentation operation
**Parameters**
gray_scale:
Convert to gray scale?
output_shape:
If set, will randomly crop the image given the output shape
random_flip:
Randomly flip an image horizontally (https://www.tensorflow.org/api_docs/python/tf/image/random_flip_left_right)
random_brightness:
Adjust the brightness of an RGB image by a random factor (https://www.tensorflow.org/api_docs/python/tf/image/random_brightness)
random_contrast:
Adjust the contrast of an RGB image by a random factor (https://www.tensorflow.org/api_docs/python/tf/image/random_contrast)
random_saturation:
Adjust the saturation of an RGB image by a random factor (https://www.tensorflow.org/api_docs/python/tf/image/random_saturation)
random_rotate:
Randomly rotate face images between -5 and 5 degrees
per_image_normalization:
Linearly scales image to have zero mean and unit norm.
"""
# Changing the range from 0-255 to 0-1
image
=
tf
.
cast
(
image
,
tf
.
float32
)
/
255
# FORCING A SEED FOR THE RANDOM OPERATIONS
tf
.
set_random_seed
(
0
)
if
output_shape
is
not
None
:
assert
len
(
output_shape
)
==
2
if
random_crop
:
image
=
tf
.
random_crop
(
image
,
size
=
list
(
output_shape
)
+
[
3
])
else
:
image
=
tf
.
image
.
resize_
image_
with_crop_or_pad
(
image
,
output_shape
[
0
],
output_shape
[
1
]
)
if
random_flip
:
image
=
tf
.
image
.
random_flip_left_right
(
image
)
if
random_brightness
:
image
=
tf
.
image
.
random_brightness
(
image
,
max_delta
=
0.15
)
image
=
tf
.
clip_by_value
(
image
,
0
,
1
)
if
random_contrast
:
image
=
tf
.
image
.
random_contrast
(
image
,
lower
=
0.85
,
upper
=
1.15
)
image
=
tf
.
clip_by_value
(
image
,
0
,
1
)
if
random_saturation
:
image
=
tf
.
image
.
random_saturation
(
image
,
lower
=
0.85
,
upper
=
1.15
)
image
=
tf
.
clip_by_value
(
image
,
0
,
1
)
if
random_gamma
:
image
=
tf
.
image
.
adjust_gamma
(
image
,
gamma
=
tf
.
random
.
uniform
(
shape
=
[],
minval
=
0.85
,
maxval
=
1.15
)
)
image
=
tf
.
clip_by_value
(
image
,
0
,
1
)
if
random_rotate
:
# from https://stackoverflow.com/a/53855704/1286165
degree
=
0.08726646259971647
# math.pi * 5 /180
random_angles
=
tf
.
random
.
uniform
(
shape
=
(
1
,),
minval
=-
degree
,
maxval
=
degree
)
image
=
tf
.
contrib
.
image
.
transform
(
image
,
tf
.
contrib
.
image
.
angles_to_projective_transforms
(
random_angles
,
tf
.
cast
(
tf
.
shape
(
image
)[
-
3
],
tf
.
float32
),
tf
.
cast
(
tf
.
shape
(
image
)[
-
2
],
tf
.
float32
),
),
)
if
gray_scale
:
image
=
tf
.
image
.
rgb_to_grayscale
(
image
,
name
=
"rgb_to_gray"
)
# normalizing data
if
per_image_normalization
:
image
=
tf
.
image
.
per_image_standardization
(
image
)
return
image
return
tf
.
cast
(
tf
.
image
.
decode_image
(
tf
.
io
.
read_file
(
filename
)),
tf
.
float32
)
#
def append_image_augmentation(
#
image,
#
gray_scale=False,
#
output_shape=None,
#
random_flip=False,
#
random_brightness=False,
#
random_contrast=False,
#
random_saturation=False,
#
random_rotate=False,
#
per_image_normalization=True,
#
random_gamma=False,
#
random_crop=False,
#
):
#
"""
#
Append to the current tensor some random image augmentation operation
#
**Parameters**
#
gray_scale:
#
Convert to gray scale?
#
output_shape:
#
If set, will randomly crop the image given the output shape
#
random_flip:
#
Randomly flip an image horizontally (https://www.tensorflow.org/api_docs/python/tf/image/random_flip_left_right)
#
random_brightness:
#
Adjust the brightness of an RGB image by a random factor (https://www.tensorflow.org/api_docs/python/tf/image/random_brightness)
#
random_contrast:
#
Adjust the contrast of an RGB image by a random factor (https://www.tensorflow.org/api_docs/python/tf/image/random_contrast)
#
random_saturation:
#
Adjust the saturation of an RGB image by a random factor (https://www.tensorflow.org/api_docs/python/tf/image/random_saturation)
#
random_rotate:
#
Randomly rotate face images between -5 and 5 degrees
#
per_image_normalization:
#
Linearly scales image to have zero mean and unit norm.
#
"""
#
# Changing the range from 0-255 to 0-1
#
image = tf.cast(image, tf.float32) / 255
#
# FORCING A SEED FOR THE RANDOM OPERATIONS
#
tf.
compat.v1.
set_random_seed(0)
#
if output_shape is not None:
#
assert len(output_shape) == 2
#
if random_crop:
#
image = tf.
image.
random_crop(image, size=list(output_shape) + [3])
#
else:
#
image = tf.image.resize_with_crop_or_pad(
#
image, output_shape[0], output_shape[1]
#
)
#
if random_flip:
#
image = tf.image.random_flip_left_right(image)
#
if random_brightness:
#
image = tf.image.random_brightness(image, max_delta=0.15)
#
image = tf.clip_by_value(image, 0, 1)
#
if random_contrast:
#
image = tf.image.random_contrast(image, lower=0.85, upper=1.15)
#
image = tf.clip_by_value(image, 0, 1)
#
if random_saturation:
#
image = tf.image.random_saturation(image, lower=0.85, upper=1.15)
#
image = tf.clip_by_value(image, 0, 1)
#
if random_gamma:
#
image = tf.image.adjust_gamma(
#
image, gamma=tf.random.uniform(shape=[], minval=0.85, maxval=1.15)
#
)
#
image = tf.clip_by_value(image, 0, 1)
#
if random_rotate:
#
# from https://stackoverflow.com/a/53855704/1286165
#
degree = 0.08726646259971647 # math.pi * 5 /180
#
random_angles = tf.random.uniform(shape=(1,), minval=-degree, maxval=degree)
#
image = tf.contrib.image.transform(
#
image,
#
tf.contrib.image.angles_to_projective_transforms(
#
random_angles,
#
tf.cast(tf.shape(
input=
image)[-3], tf.float32),
#
tf.cast(tf.shape(
input=
image)[-2], tf.float32),
#
),
#
)
#
if gray_scale:
#
image = tf.image.rgb_to_grayscale(image, name="rgb_to_gray")
#
# normalizing data
#
if per_image_normalization:
#
image = tf.image.per_image_standardization(image)
#
return image
def
arrange_indexes_by_label
(
input_labels
,
possible_labels
):
...
...
@@ -343,7 +343,7 @@ def blocks_tensorflow(images, block_size):
output_size
=
list
(
block_size
)
output_size
[
0
]
=
-
1
output_size
[
-
1
]
=
images
.
shape
[
-
1
]
blocks
=
tf
.
extract_
image_
patches
(
blocks
=
tf
.
image
.
extract_patches
(
images
,
block_size
,
block_size
,
[
1
,
1
,
1
,
1
],
"VALID"
)
n_blocks
=
int
(
numpy
.
prod
(
blocks
.
shape
[
1
:
3
]))
...
...
@@ -366,11 +366,11 @@ def tf_repeat(tensor, repeats):
A Tensor. Has the same type as input. Has the shape of tensor.shape *
repeats
"""
with
tf
.
variable_scope
(
"repeat"
):
with
tf
.
compat
.
v1
.
variable_scope
(
"repeat"
):
expanded_tensor
=
tf
.
expand_dims
(
tensor
,
-
1
)
multiples
=
[
1
]
+
repeats
tiled_tensor
=
tf
.
tile
(
expanded_tensor
,
multiples
=
multiples
)
repeated_tesnor
=
tf
.
reshape
(
tiled_tensor
,
tf
.
shape
(
tensor
)
*
repeats
)
repeated_tesnor
=
tf
.
reshape
(
tiled_tensor
,
tf
.
shape
(
input
=
tensor
)
*
repeats
)
return
repeated_tesnor
...
...
bob/learn/tensorflow/dataset/image.py
View file @
573b8230
...
...
@@ -99,7 +99,7 @@ def shuffle_data_and_labels_image_augmentation(filenames,
dataset
=
dataset
.
shuffle
(
buffer_size
).
batch
(
batch_size
).
repeat
(
epochs
)
data
,
labels
=
data
set
.
make_one_shot_iterator
().
get_next
()
data
,
labels
=
tf
.
compat
.
v1
.
data
.
make_one_shot_iterator
(
dataset
).
get_next
()
return
data
,
labels
...
...
@@ -215,7 +215,7 @@ def load_pngs(img_path, img_shape):
object
The loaded png file
"""
img_raw
=
tf
.
read_file
(
img_path
)
img_raw
=
tf
.
io
.
read_file
(
img_path
)
img_tensor
=
tf
.
image
.
decode_png
(
img_raw
,
channels
=
img_shape
[
-
1
])
img_final
=
tf
.
reshape
(
img_tensor
,
img_shape
)
return
img_final
bob/learn/tensorflow/dataset/siamese_image.py
View file @
573b8230
...
...
@@ -103,7 +103,7 @@ def shuffle_data_and_labels_image_augmentation(filenames,
extension
=
extension
)
dataset
=
dataset
.
shuffle
(
buffer_size
).
batch
(
batch_size
).
repeat
(
epochs
)
data
,
labels
=
data
set
.
make_one_shot_iterator
().
get_next
()
data
,
labels
=
tf
.
compat
.
v1
.
data
.
make_one_shot_iterator
(
dataset
).
get_next
()
return
data
,
labels
...
...
bob/learn/tensorflow/dataset/tfrecords.py
View file @
573b8230
...
...
@@ -77,9 +77,9 @@ def dataset_to_tfrecord(dataset, output):
return
example_proto
.
SerializeToString
()
def
tf_serialize_example
(
*
args
):
args
=
tf
.
contrib
.
framework
.
nest
.
flatten
(
args
)
args
=
[
tf
.
serialize_tensor
(
f
)
for
f
in
args
]
tf_string
=
tf
.
py_func
(
serialize_example_pyfunction
,
args
,
tf
.
string
)
args
=
tf
.
nest
.
flatten
(
args
)
args
=
[
tf
.
io
.
serialize_tensor
(
f
)
for
f
in
args
]
tf_string
=
tf
.
compat
.
v1
.
py_func
(
serialize_example_pyfunction
,
args
,
tf
.
string
)
return
tf
.
reshape
(
tf_string
,
())
# The result is a scalar
dataset
=
dataset
.
map
(
tf_serialize_example
)
...
...
@@ -122,20 +122,20 @@ def dataset_from_tfrecord(tfrecord, num_parallel_reads=None):
meta
=
json
.
load
(
f
)
for
k
,
v
in
meta
.
items
():
meta
[
k
]
=
eval
(
v
)
output_types
=
tf
.
contrib
.
framework
.
nest
.
flatten
(
meta
[
"output_types"
])
output_shapes
=
tf
.
contrib
.
framework
.
nest
.
flatten
(
meta
[
"output_shapes"
])
output_types
=
tf
.
nest
.
flatten
(
meta
[
"output_types"
])
output_shapes
=
tf
.
nest
.
flatten
(
meta
[
"output_shapes"
])
feature_description
=
{}
for
i
in
range
(
len
(
output_types
)):
key
=
f
"feature
{
i
}
"
feature_description
[
key
]
=
tf
.
FixedLenFeature
([],
tf
.
string
)
feature_description
[
key
]
=
tf
.
io
.
FixedLenFeature
([],
tf
.
string
)
def
_parse_function
(
example_proto
):
# Parse the input tf.Example proto using the dictionary above.
args
=
tf
.
parse_single_example
(
example_proto
,
feature_description
)
args
=
tf
.
contrib
.
framework
.
nest
.
flatten
(
args
)
args
=
[
tf
.
parse_tensor
(
v
,
t
)
for
v
,
t
in
zip
(
args
,
output_types
)]
args
=
tf
.
io
.
parse_single_example
(
serialized
=
example_proto
,
features
=
feature_description
)
args
=
tf
.
nest
.
flatten
(
args
)
args
=
[
tf
.
io
.
parse_tensor
(
v
,
t
)
for
v
,
t
in
zip
(
args
,
output_types
)]
args
=
[
tf
.
reshape
(
v
,
s
)
for
v
,
s
in
zip
(
args
,
output_shapes
)]
return
tf
.
contrib
.
framework
.
nest
.
pack_sequence_as
(
meta
[
"output_types"
],
args
)
return
tf
.
nest
.
pack_sequence_as
(
meta
[
"output_types"
],
args
)
return
raw_dataset
.
map
(
_parse_function
)
...
...
@@ -161,9 +161,9 @@ def example_parser(serialized_example, feature, data_shape, data_type):
"""
# Decode the record read by the reader
features
=
tf
.
parse_single_example
(
serialized_example
,
features
=
feature
)
features
=
tf
.
io
.
parse_single_example
(
serialized
=
serialized
_example
,
features
=
feature
)
# Convert the image data from string back to the numbers
image
=
tf
.
decode_raw
(
features
[
"data"
],
data_type
)
image
=
tf
.
io
.
decode_raw
(
features
[
"data"
],
data_type
)
# Cast label data into int64
label
=
tf
.
cast
(
features
[
"label"
],
tf
.
int64
)
# Reshape image data into the original shape
...
...
@@ -193,9 +193,9 @@ def image_augmentation_parser(
"""
# Decode the record read by the reader
features
=
tf
.
parse_single_example
(
serialized_example
,
features
=
feature
)
features
=
tf
.
io
.
parse_single_example
(
serialized
=
serialized
_example
,
features
=
feature
)
# Convert the image data from string back to the numbers
image
=
tf
.
decode_raw
(
features
[
"data"
],
data_type
)
image
=
tf
.
io
.
decode_raw
(
features
[
"data"
],
data_type
)
# Reshape image data into the original shape
image
=
tf
.
reshape
(
image
,
data_shape
)
...
...
@@ -231,7 +231,7 @@ def read_and_decode(filename_queue, data_shape, data_type=tf.float32, feature=No
if
feature
is
None
:
feature
=
DEFAULT_FEATURE
# Define a reader and read the next record
reader
=
tf
.
TFRecordReader
()
reader
=
tf
.
compat
.
v1
.
TFRecordReader
()
_
,
serialized_example
=
reader
.
read
(
filename_queue
)
return
example_parser
(
serialized_example
,
feature
,
data_shape
,
data_type
)
...
...
@@ -459,7 +459,7 @@ def shuffle_data_and_labels(
dataset
=
create_dataset_from_records
(
tfrecord_filenames
,
data_shape
,
data_type
)
dataset
=
dataset
.
shuffle
(
buffer_size
).
batch
(
batch_size
).
repeat
(
epochs
)
data
,
labels
,
key
=
data
set
.
make_one_shot_iterator
().
get_next
()
data
,
labels
,
key
=
tf
.
compat
.
v1
.
data
.
make_one_shot_iterator
(
dataset
).
get_next
()
features
=
dict
()
features
[
"data"
]
=
data
features
[
"key"
]
=
key
...
...
@@ -495,7 +495,7 @@ def batch_data_and_labels(
dataset
=
create_dataset_from_records
(
tfrecord_filenames
,
data_shape
,
data_type
)
dataset
=
dataset
.
batch
(
batch_size
).
repeat
(
epochs
)
data
,
labels
,
key
=
data
set
.
make_one_shot_iterator
().
get_next
()
data
,
labels
,
key
=
tf
.
compat
.
v1
.
data
.
make_one_shot_iterator
(
dataset
).
get_next
()
features
=
dict
()
features
[
"data"
]
=
data
features
[
"key"
]
=
key
...
...
@@ -565,7 +565,7 @@ def batch_data_and_labels_image_augmentation(
dataset
=
dataset
.
batch
(
batch_size
,
drop_remainder
=
drop_remainder
)
dataset
=
dataset
.
repeat
(
epochs
)
data
,
labels
,
key
=
data
set
.
make_one_shot_iterator
().
get_next
()
data
,
labels
,
key
=
tf
.
compat
.
v1
.
data
.
make_one_shot_iterator
(
dataset
).
get_next
()
features
=
dict
()
features
[
"data"
]
=
data
features
[
"key"
]
=
key
...
...
@@ -602,26 +602,26 @@ def describe_tf_record(tf_record_path, shape, batch_size=1):
"""
tf_records
=
[
os
.
path
.
join
(
tf_record_path
,
f
)
for
f
in
os
.
listdir
(
tf_record_path
)]
filename_queue
=
tf
.
train
.
string_input_producer
(
filename_queue
=
tf
.
compat
.
v1
.
train
.
string_input_producer
(
tf_records
,
num_epochs
=
1
,
name
=
"input"
)
feature
=
{
"data"
:
tf
.
FixedLenFeature
([],
tf
.
string
),
"label"
:
tf
.
FixedLenFeature
([],
tf
.
int64
),
"key"
:
tf
.
FixedLenFeature
([],
tf
.
string
),
"data"
:
tf
.
io
.
FixedLenFeature
([],
tf
.
string
),
"label"
:
tf
.
io
.
FixedLenFeature
([],
tf
.
int64
),
"key"
:
tf
.
io
.
FixedLenFeature
([],
tf
.
string
),
}
# Define a reader and read the next record
reader
=
tf
.
TFRecordReader
()
reader
=
tf
.
compat
.
v1
.
TFRecordReader
()
_
,
serialized_example
=
reader
.
read
(
filename_queue
)
# Decode the record read by the reader
features
=
tf
.
parse_single_example
(
serialized_example
,
features
=
feature
)
features
=
tf
.
io
.
parse_single_example
(
serialized
=
serialized
_example
,
features
=
feature
)
# Convert the image data from string back to the numbers
image
=
tf
.
decode_raw
(
features
[
"data"
],
tf
.
uint8
)
image
=
tf
.
io
.
decode_raw
(
features
[
"data"
],
tf
.
uint8
)
# Cast label data into int32
label
=
tf
.
cast
(
features
[
"label"
],
tf
.
int64
)
...
...
@@ -631,7 +631,7 @@ def describe_tf_record(tf_record_path, shape, batch_size=1):
image
=
tf
.
reshape
(
image
,
shape
)
# Getting the batches in order
data_ph
,
label_ph
,
img_name_ph
=
tf
.
train
.
batch
(
data_ph
,
label_ph
,
img_name_ph
=
tf
.
compat
.
v1
.
train
.
batch
(
[
image
,
label
,
img_name
],
batch_size
=
batch_size
,
capacity
=
1000
,
...
...
@@ -640,13 +640,13 @@ def describe_tf_record(tf_record_path, shape, batch_size=1):
)
# Start the reading
session
=
tf
.
Session
()
tf
.
local_variables_initializer
().
run
(
session
=
session
)
tf
.
global_variables_initializer
().
run
(
session
=
session
)
session
=
tf
.
compat
.
v1
.
Session
()
tf
.
compat
.
v1
.
local_variables_initializer
().
run
(
session
=
session
)
tf
.
compat
.
v1
.
global_variables_initializer
().
run
(
session
=
session
)
# Preparing the batches
thread_pool
=
tf
.
train
.
Coordinator
()
threads
=
tf
.
train
.
start_queue_runners
(
coord
=
thread_pool
,
sess
=
session
)
threads
=
tf
.
compat
.
v1
.
train
.
start_queue_runners
(
coord
=
thread_pool
,
sess
=
session
)
logger
.
info
(
"Counting in %s"
,
tf_record_path
)
labels
=
set
()
...
...
bob/learn/tensorflow/dataset/triplet_image.py
View file @
573b8230
...
...
@@ -104,7 +104,7 @@ def shuffle_data_and_labels_image_augmentation(filenames,
dataset
=
dataset
.
shuffle
(
buffer_size
).
batch
(
batch_size
).
repeat
(
epochs
)
#dataset = dataset.batch(buffer_size).batch(batch_size).repeat(epochs)
data
=
data
set
.
make_one_shot_iterator
().
get_next
()
data
=
tf
.
compat
.
v1
.
data
.
make_one_shot_iterator
(
dataset
).
get_next
()
return
data
...
...
bob/learn/tensorflow/gan/losses.py
View file @
573b8230
...
...
@@ -8,8 +8,8 @@ def relativistic_discriminator_loss(
real_weights
=
1.0
,
generated_weights
=
1.0
,
scope
=
None
,
loss_collection
=
tf
.
GraphKeys
.
LOSSES
,
reduction
=
tf
.
losses
.
Reduction
.
SUM_BY_NONZERO_WEIGHTS
,
loss_collection
=
tf
.
compat
.
v1
.
GraphKeys
.
LOSSES
,
reduction
=
tf
.
compat
.
v1
.
losses
.
Reduction
.
SUM_BY_NONZERO_WEIGHTS
,
add_summaries
=
False
,
):
"""Relativistic (average) loss
...
...
@@ -34,7 +34,7 @@ def relativistic_discriminator_loss(
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with
tf
.
name_scope
(
with
tf
.
compat
.
v1
.
name_scope
(
scope
,
"discriminator_relativistic_loss"
,
(
...
...
@@ -47,13 +47,13 @@ def relativistic_discriminator_loss(
)
as
scope
:
real_logit
=
discriminator_real_outputs
-
tf
.
reduce_mean
(
discriminator_gen_outputs
input_tensor
=
discriminator_gen_outputs
)
fake_logit
=
discriminator_gen_outputs
-
tf
.
reduce_mean
(
discriminator_real_outputs
input_tensor
=
discriminator_real_outputs
)
loss_on_real
=
tf
.
losses
.
sigmoid_cross_entropy
(
loss_on_real
=
tf
.
compat
.
v1
.
losses
.
sigmoid_cross_entropy
(
tf
.
ones_like
(
real_logit
),
real_logit
,
real_weights
,
...
...
@@ -62,7 +62,7 @@ def relativistic_discriminator_loss(
loss_collection
=
None
,
reduction
=
reduction
,
)
loss_on_generated
=
tf
.
losses
.
sigmoid_cross_entropy
(
loss_on_generated
=
tf
.
compat
.
v1
.
losses
.
sigmoid_cross_entropy
(
tf
.
zeros_like
(
fake_logit
),
fake_logit
,
generated_weights
,
...
...
@@ -72,12 +72,12 @@ def relativistic_discriminator_loss(
)
loss
=
loss_on_real
+
loss_on_generated
tf
.
losses
.
add_loss
(
loss
,
loss_collection
)
tf
.
compat
.
v1
.
losses
.
add_loss
(
loss
,
loss_collection
)
if
add_summaries
:
tf
.
summary
.
scalar
(
"discriminator_gen_relativistic_loss"
,
loss_on_generated
)
tf
.
summary
.
scalar
(
"discriminator_real_relativistic_loss"
,
loss_on_real
)
tf
.
summary
.
scalar
(
"discriminator_relativistic_loss"
,
loss
)
tf
.
compat
.
v1
.
summary
.
scalar
(
"discriminator_gen_relativistic_loss"
,
loss_on_generated
)
tf
.
compat
.
v1
.
summary
.
scalar
(
"discriminator_real_relativistic_loss"
,
loss_on_real
)
tf
.
compat
.
v1
.
summary
.
scalar
(
"discriminator_relativistic_loss"
,
loss
)
return
loss
...
...
@@ -89,8 +89,8 @@ def relativistic_generator_loss(
real_weights
=
1.0
,
generated_weights
=
1.0
,
scope
=
None
,
loss_collection
=
tf
.
GraphKeys
.
LOSSES
,
reduction
=
tf
.
losses
.
Reduction
.
SUM_BY_NONZERO_WEIGHTS
,
loss_collection
=
tf
.
compat
.
v1
.
GraphKeys
.
LOSSES
,
reduction
=
tf
.
compat
.
v1
.
losses
.
Reduction
.
SUM_BY_NONZERO_WEIGHTS
,
add_summaries
=
False
,
confusion_labels
=
False
,
):
...
...
@@ -116,7 +116,7 @@ def relativistic_generator_loss(
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with
tf
.
name_scope
(
with
tf
.
compat
.
v1
.
name_scope
(
scope
,
"generator_relativistic_loss"
,
(
...
...
@@ -129,10 +129,10 @@ def relativistic_generator_loss(
)
as
scope
:
real_logit
=
discriminator_real_outputs
-
tf
.
reduce_mean
(
discriminator_gen_outputs
input_tensor
=
discriminator_gen_outputs
)
fake_logit
=
discriminator_gen_outputs
-
tf
.
reduce_mean
(
discriminator_real_outputs
input_tensor
=
discriminator_real_outputs
)
if
confusion_labels
:
...
...
@@ -142,7 +142,7 @@ def relativistic_generator_loss(
real_labels
=
tf
.
zeros_like
(
real_logit
)
fake_labels
=
tf
.
ones_like
(
fake_logit
)
loss_on_real
=
tf
.
losses
.
sigmoid_cross_entropy
(
loss_on_real
=
tf
.
compat
.
v1
.
losses
.
sigmoid_cross_entropy
(
real_labels
,
real_logit
,
real_weights
,
...
...
@@ -151,7 +151,7 @@ def relativistic_generator_loss(
loss_collection
=
None
,
reduction
=
reduction
,
)
loss_on_generated
=
tf
.
losses
.
sigmoid_cross_entropy
(
loss_on_generated
=
tf
.
compat
.
v1
.
losses
.
sigmoid_cross_entropy
(
fake_labels
,
fake_logit
,
generated_weights
,
...
...
@@ -161,11 +161,11 @@ def relativistic_generator_loss(
)
loss
=
loss_on_real
+
loss_on_generated
tf
.
losses
.
add_loss
(
loss
,
loss_collection
)
tf
.
compat
.
v1
.
losses
.
add_loss
(
loss
,
loss_collection
)
if
add_summaries
:
tf
.
summary
.
scalar
(
"generator_gen_relativistic_loss"
,
loss_on_generated
)
tf
.
summary
.
scalar
(
"generator_real_relativistic_loss"
,
loss_on_real
)
tf
.
summary
.
scalar
(
"generator_relativistic_loss"
,
loss
)
tf
.
compat
.
v1
.
summary
.
scalar
(
"generator_gen_relativistic_loss"
,
loss_on_generated
)
tf
.
compat
.
v1
.
summary
.
scalar
(
"generator_real_relativistic_loss"
,
loss_on_real
)
tf
.
compat
.
v1
.
summary
.
scalar
(
"generator_relativistic_loss"
,
loss
)
return
loss
bob/learn/tensorflow/image/filter.py
View file @
573b8230
...
...
@@ -5,13 +5,13 @@ def gaussian_kernel(size: int, mean: float, std: float):
"""Makes 2D gaussian Kernel for convolution.
Code adapted from: https://stackoverflow.com/a/52012658/1286165"""
d
=
tf
.
distributions
.
Normal
(
mean
,
std
)
d
=
tf
.
compat
.
v1
.
distributions
.
Normal
(
mean
,
std
)
vals
=
d
.
prob
(
tf
.
range
(
start
=-
size
,
limit
=
size
+
1
,
dtype
=
tf
.
float32
))
gauss_kernel
=
tf
.
einsum
(
"i,j->ij"
,
vals
,
vals
)
return
gauss_kernel
/
tf
.
reduce_sum
(
gauss_kernel
)
return
gauss_kernel
/
tf
.
reduce_sum
(
input_tensor
=
gauss_kernel
)
class
GaussianFilter
:
...
...
@@ -25,13 +25,13 @@ class GaussianFilter:
self
.
gauss_kernel
=
gaussian_kernel
(
size
,
mean
,
std
)[:,
:,
None
,
None
]
def
__call__
(
self
,
image
):
shape
=
tf
.
shape
(
image
)
shape
=
tf
.
shape
(
input
=
image
)
image
=
tf
.
reshape
(
image
,
[
-
1
,
shape
[
-
3
],
shape
[
-
2
],
shape
[
-
1
]])