Commit 05a066c5 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

use tf.data instead of tf.contrib.data

parent e43aec3d
Pipeline #21748 failed with stage
in 29 minutes and 10 seconds
......@@ -25,7 +25,7 @@ def shuffle_data_and_labels_image_augmentation(filenames,
extension=None):
"""
Dump random batches from a list of image paths and labels:
The list of files and labels should be in the same order e.g.
filenames = ['class_1_img1', 'class_1_img2', 'class_2_img1']
labels = [0, 0, 1]
......@@ -34,28 +34,28 @@ def shuffle_data_and_labels_image_augmentation(filenames,
filenames:
List containing the path of the images
labels:
List containing the labels (needs to be in EXACT same order as filenames)
data_shape:
Samples shape saved in the tf-record
data_type:
tf data type(https://www.tensorflow.org/versions/r0.12/resources/dims_types#data_types)
batch_size:
Size of the batch
epochs:
Number of epochs to be batched
buffer_size:
Size of the shuffle bucket
gray_scale:
Convert to gray scale?
output_shape:
If set, will randomly crop the image given the output shape
......@@ -79,7 +79,7 @@ def shuffle_data_and_labels_image_augmentation(filenames,
extension:
If None, will load files using `tf.image.decode..` if set to `hdf5`, will load with `bob.io.base.load`
"""
dataset = create_dataset_from_path_augmentation(
......@@ -118,23 +118,23 @@ def create_dataset_from_path_augmentation(filenames,
extension=None):
"""
Create dataset from a list of tf-record files
**Parameters**
filenames:
List containing the path of the images
labels:
List containing the labels (needs to be in EXACT same order as filenames)
data_shape:
Samples shape saved in the tf-record
data_type:
tf data type(https://www.tensorflow.org/versions/r0.12/resources/dims_types#data_types)
feature:
"""
parser = partial(
......@@ -151,7 +151,7 @@ def create_dataset_from_path_augmentation(filenames,
per_image_normalization=per_image_normalization,
extension=extension)
dataset = tf.contrib.data.Dataset.from_tensor_slices((filenames, labels))
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.map(parser)
return dataset
......
......@@ -25,43 +25,43 @@ def shuffle_data_and_labels_image_augmentation(filenames,
extension=None):
"""
Dump random batches for siamese networks from a list of image paths and labels:
The list of files and labels should be in the same order e.g.
filenames = ['class_1_img1', 'class_1_img2', 'class_2_img1']
labels = [0, 0, 1]
The batches returned with tf.Session.run() with be in the following format:
**data** a dictionary containing the keys ['left', 'right'], each one representing
**data** a dictionary containing the keys ['left', 'right'], each one representing
one element of the pair and **labels** which is [0, 1] where 0 is the genuine pair
and 1 is the impostor pair.
**Parameters**
filenames:
List containing the path of the images
labels:
List containing the labels (needs to be in EXACT same order as filenames)
data_shape:
Samples shape saved in the tf-record
data_type:
tf data type(https://www.tensorflow.org/versions/r0.12/resources/dims_types#data_types)
batch_size:
Size of the batch
epochs:
Number of epochs to be batched
buffer_size:
Size of the shuffle bucket
gray_scale:
Convert to gray scale?
output_shape:
If set, will randomly crop the image given the output shape
......@@ -79,10 +79,10 @@ def shuffle_data_and_labels_image_augmentation(filenames,
random_rotate:
Randomly rotate face images between -5 and 5 degrees
per_image_normalization:
Linearly scales image to have zero mean and unit norm.
Linearly scales image to have zero mean and unit norm.
extension:
If None, will load files using `tf.image.decode..` if set to `hdf5`, will load with `bob.io.base.load`
"""
......@@ -122,33 +122,33 @@ def create_dataset_from_path_augmentation(filenames,
extension=None):
"""
Create dataset from a list of tf-record files
**Parameters**
filenames:
List containing the path of the images
labels:
List containing the labels (needs to be in EXACT same order as filenames)
data_shape:
Samples shape saved in the tf-record
data_type:
tf data type(https://www.tensorflow.org/versions/r0.12/resources/dims_types#data_types)
batch_size:
Size of the batch
epochs:
Number of epochs to be batched
buffer_size:
Size of the shuffle bucket
gray_scale:
Convert to gray scale?
output_shape:
If set, will randomly crop the image given the output shape
......@@ -168,11 +168,11 @@ def create_dataset_from_path_augmentation(filenames,
Randomly rotate face images between -10 and 10 degrees
per_image_normalization:
Linearly scales image to have zero mean and unit norm.
Linearly scales image to have zero mean and unit norm.
extension:
If None, will load files using `tf.image.decode..` if set to `hdf5`, will load with `bob.io.base.load`
"""
parser = partial(
......@@ -191,7 +191,7 @@ def create_dataset_from_path_augmentation(filenames,
left_data, right_data, siamese_labels = siamease_pairs_generator(
filenames, labels)
dataset = tf.contrib.data.Dataset.from_tensor_slices(
dataset = tf.data.Dataset.from_tensor_slices(
(left_data, right_data, siamese_labels))
dataset = dataset.map(parser)
return dataset
......
......@@ -25,41 +25,41 @@ def shuffle_data_and_labels_image_augmentation(filenames,
extension=None):
"""
Dump random batches for triplee networks from a list of image paths and labels:
The list of files and labels should be in the same order e.g.
filenames = ['class_1_img1', 'class_1_img2', 'class_2_img1']
labels = [0, 0, 1]
The batches returned with tf.Session.run() with be in the following format:
**data** a dictionary containing the keys ['anchor', 'positive', 'negative'].
**Parameters**
filenames:
List containing the path of the images
labels:
List containing the labels (needs to be in EXACT same order as filenames)
data_shape:
Samples shape saved in the tf-record
data_type:
tf data type(https://www.tensorflow.org/versions/r0.12/resources/dims_types#data_types)
batch_size:
Size of the batch
epochs:
Number of epochs to be batched
buffer_size:
Size of the shuffle bucket
gray_scale:
Convert to gray scale?
output_shape:
If set, will randomly crop the image given the output shape
......@@ -80,10 +80,10 @@ def shuffle_data_and_labels_image_augmentation(filenames,
per_image_normalization:
Linearly scales image to have zero mean and unit norm.
extension:
If None, will load files using `tf.image.decode..` if set to `hdf5`, will load with `bob.io.base.load`
"""
dataset = create_dataset_from_path_augmentation(
......@@ -123,23 +123,23 @@ def create_dataset_from_path_augmentation(filenames,
extension=None):
"""
Create dataset from a list of tf-record files
**Parameters**
filenames:
List containing the path of the images
labels:
List containing the labels (needs to be in EXACT same order as filenames)
data_shape:
Samples shape saved in the tf-record
data_type:
tf data type(https://www.tensorflow.org/versions/r0.12/resources/dims_types#data_types)
feature:
"""
parser = partial(
......@@ -159,7 +159,7 @@ def create_dataset_from_path_augmentation(filenames,
anchor_data, positive_data, negative_data = triplets_random_generator(
filenames, labels)
dataset = tf.contrib.data.Dataset.from_tensor_slices(
dataset = tf.data.Dataset.from_tensor_slices(
(anchor_data, positive_data, negative_data))
dataset = dataset.map(parser)
return dataset
......
......@@ -75,7 +75,7 @@ def input_fn(mode, batch_size=1):
# Map example_parser over dataset, and batch results by up to batch_size
dataset = dataset.map(
example_parser, num_threads=1, output_buffer_size=batch_size)
example_parser, num_parallel_calls=1).prefetch(batch_size)
dataset = dataset.batch(batch_size)
images, labels, keys = dataset.make_one_shot_iterator().get_next()
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment