Commit 7fabc04e authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Add more utils

parent cda150df
...@@ -6,3 +6,6 @@ from .eval import * ...@@ -6,3 +6,6 @@ from .eval import *
from .keras import * from .keras import *
from .train import * from .train import *
from .graph import * from .graph import *
from .network import *
from .math import *
from .reproducible import *
import tensorflow as tf
def gram_matrix(input_tensor):
"""Computes the gram matrix
Parameters
----------
input_tensor : object
The input tensor. Usually it's the activation of a conv layer. The input shape
must be ``BHWC``.
Returns
-------
object
The computed gram matrix as a tensor.
Example
-------
>>>> gram_matrix(tf.zeros((32, 4, 6, 12)))
<tf.Tensor: id=53, shape=(32, 12, 12), dtype=float32, numpy=
array([[[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]],
"""
result = tf.linalg.einsum("bijc,bijd->bcd", input_tensor, input_tensor)
input_shape = tf.shape(input_tensor)
num_locations = tf.cast(input_shape[1] * input_shape[2], tf.float32)
return result / (num_locations)
def upper_triangle_and_diagonal(A):
"""Returns a flat version of upper triangle of a 2D array (including diagonal).
This function is useful to be applied on gram matrices since they contain duplicate
information.
Parameters
----------
A
A two dimensional array.
Returns
-------
object
The flattened upper triangle of array
Example
-------
>>> A = [
... [1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... ]
>>> upper_triangle_and_diagonal(A)
[1,2,3,5,6,9]
"""
ones = tf.ones_like(A)
# Upper triangular matrix of 0s and 1s (including diagonal)
mask = tf.matrix_band_part(ones, 0, -1)
upper_triangular_flat = tf.boolean_mask(A, mask)
return upper_triangular_flat
def upper_triangle(A):
ones = tf.ones_like(A)
# Upper triangular matrix of 0s and 1s (including diagonal)
mask_a = tf.matrix_band_part(ones, 0, -1)
# Diagonal
mask_b = tf.matrix_band_part(ones, 0, 0)
mask = tf.cast(mask_a - mask_b, dtype=tf.bool)
upper_triangular_flat = tf.boolean_mask(A, mask)
return upper_triangular_flat
...@@ -2,19 +2,21 @@ import tensorflow as tf ...@@ -2,19 +2,21 @@ import tensorflow as tf
import tensorflow.contrib.slim as slim import tensorflow.contrib.slim as slim
def append_logits(graph, def append_logits(
graph,
n_classes, n_classes,
reuse=False, reuse=False,
l2_regularizer=5e-05, l2_regularizer=5e-05,
weights_std=0.1, trainable_variables=None, weights_std=0.1,
name='Logits'): trainable_variables=None,
name="Logits",
):
trainable = is_trainable(name, trainable_variables) trainable = is_trainable(name, trainable_variables)
return slim.fully_connected( return slim.fully_connected(
graph, graph,
n_classes, n_classes,
activation_fn=None, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer( weights_initializer=tf.truncated_normal_initializer(stddev=weights_std),
stddev=weights_std),
weights_regularizer=slim.l2_regularizer(l2_regularizer), weights_regularizer=slim.l2_regularizer(l2_regularizer),
scope=name, scope=name,
reuse=reuse, reuse=reuse,
...@@ -47,4 +49,3 @@ def is_trainable(name, trainable_variables, mode=tf.estimator.ModeKeys.TRAIN): ...@@ -47,4 +49,3 @@ def is_trainable(name, trainable_variables, mode=tf.estimator.ModeKeys.TRAIN):
# Here is my choice to shutdown the whole scope # Here is my choice to shutdown the whole scope
return name in trainable_variables return name in trainable_variables
...@@ -9,7 +9,7 @@ from tensorflow.core.protobuf import rewriter_config_pb2 ...@@ -9,7 +9,7 @@ from tensorflow.core.protobuf import rewriter_config_pb2
def set_seed( def set_seed(
seed=0, python_hash_seed=0, log_device_placement=False, allow_soft_placement=False, seed=0, python_hash_seed=0, log_device_placement=False, allow_soft_placement=False,
arithmetic_optimization=None, arithmetic_optimization=None, allow_growth=None,
): ):
"""Sets the seeds in python, numpy, and tensorflow in order to help """Sets the seeds in python, numpy, and tensorflow in order to help
training reproducible networks. training reproducible networks.
...@@ -68,6 +68,10 @@ def set_seed( ...@@ -68,6 +68,10 @@ def set_seed(
off = rewriter_config_pb2.RewriterConfig.OFF off = rewriter_config_pb2.RewriterConfig.OFF
session_config.graph_options.rewrite_options.arithmetic_optimization = off session_config.graph_options.rewrite_options.arithmetic_optimization = off
if allow_growth is not None:
session_config.gpu_options.allow_growth = allow_growth
session_config.gpu_options.per_process_gpu_memory_fraction = 0.8
# The below tf.set_random_seed() will make random number generation # The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state. # in the TensorFlow backend have a well-defined initial state.
# For further details, see: # For further details, see:
......
...@@ -35,6 +35,35 @@ def compute_euclidean_distance(x, y): ...@@ -35,6 +35,35 @@ def compute_euclidean_distance(x, y):
return d return d
def pdist_safe(A, metric="sqeuclidean"):
if metric != "sqeuclidean":
raise NotImplementedError()
r = tf.reduce_sum(A * A, 1)
r = tf.reshape(r, [-1, 1])
D = r - 2 * tf.matmul(A, A, transpose_b=True) + tf.transpose(r)
return D
def cdist(A, B, metric="sqeuclidean"):
if metric != "sqeuclidean":
raise NotImplementedError()
M1, M2 = tf.shape(A)[0], tf.shape(B)[0]
# code from https://stackoverflow.com/a/43839605/1286165
p1 = tf.matmul(
tf.expand_dims(tf.reduce_sum(tf.square(A), 1), 1), tf.ones(shape=(1, M2))
)
p2 = tf.transpose(
tf.matmul(
tf.reshape(tf.reduce_sum(tf.square(B), 1), shape=[-1, 1]),
tf.ones(shape=(M1, 1)),
transpose_b=True,
)
)
D = tf.add(p1, p2) - 2 * tf.matmul(A, B, transpose_b=True)
return D
def load_mnist(perc_train=0.9): def load_mnist(perc_train=0.9):
numpy.random.seed(0) numpy.random.seed(0)
import bob.db.mnist import bob.db.mnist
...@@ -184,7 +213,7 @@ def pdist(A): ...@@ -184,7 +213,7 @@ def pdist(A):
Compute a pairwise euclidean distance in the same fashion Compute a pairwise euclidean distance in the same fashion
as in scipy.spation.distance.pdist as in scipy.spation.distance.pdist
""" """
with tf.variable_scope("Pairwisedistance"): with tf.name_scope("Pairwisedistance"):
ones_1 = tf.reshape(tf.cast(tf.ones_like(A), tf.float32)[:, 0], [1, -1]) ones_1 = tf.reshape(tf.cast(tf.ones_like(A), tf.float32)[:, 0], [1, -1])
p1 = tf.matmul(tf.expand_dims(tf.reduce_sum(tf.square(A), 1), 1), ones_1) p1 = tf.matmul(tf.expand_dims(tf.reduce_sum(tf.square(A), 1), 1), ones_1)
...@@ -406,9 +435,7 @@ def bytes2human(n, format="%(value).1f %(symbol)s", symbols="customary"): ...@@ -406,9 +435,7 @@ def bytes2human(n, format="%(value).1f %(symbol)s", symbols="customary"):
return format % dict(symbol=symbols[0], value=n) return format % dict(symbol=symbols[0], value=n)
def random_choice_no_replacement( def random_choice_no_replacement(one_dim_input, num_indices_to_drop=3, sort=False):
one_dim_input, num_indices_to_drop=3, sort=False
):
"""Similar to np.random.choice with no replacement. """Similar to np.random.choice with no replacement.
Code from https://stackoverflow.com/a/54755281/1286165 Code from https://stackoverflow.com/a/54755281/1286165
""" """
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment