Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
565464b0
Commit
565464b0
authored
Oct 12, 2017
by
Tiago de Freitas Pereira
Browse files
Change all losses, data augmentation and architectures to functions
parent
bdbc9988
Pipeline
#13145
failed with stages
in 3 minutes and 48 seconds
Changes
33
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/datashuffler/Base.py
View file @
565464b0
...
...
@@ -8,7 +8,6 @@ import tensorflow as tf
import
bob.ip.base
import
numpy
import
six
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
class
Base
(
object
):
...
...
@@ -55,7 +54,7 @@ class Base(object):
batch_size
=
32
,
seed
=
10
,
data_augmentation
=
None
,
normalizer
=
Linear
()
,
normalizer
=
None
,
prefetch
=
False
,
prefetch_capacity
=
50
,
prefetch_threads
=
5
):
...
...
@@ -218,7 +217,10 @@ class Base(object):
For the time being I'm only scaling from 0-1
"""
return
self
.
normalizer
(
x
)
if
self
.
normalizer
is
None
:
return
x
else
:
return
self
.
normalizer
(
x
)
def
_aggregate_batch
(
self
,
data_holder
,
use_list
=
False
):
size
=
len
(
data_holder
[
0
])
...
...
bob/learn/tensorflow/datashuffler/DataAugmentation.py
deleted
100755 → 0
View file @
bdbc9988
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Sun 16 Oct 2016 14:32:36 CEST
import
numpy
class
DataAugmentation
(
object
):
"""
Base class for applying common real-time data augmentation.
This class is meant to be used as an argument of `input_data`. When training
a model, the defined augmentation methods will be applied at training
time only.
"""
def
__init__
(
self
,
seed
=
10
):
self
.
filter_bank
=
[]
numpy
.
random
.
seed
(
seed
)
def
__call__
(
self
,
image
):
"""
Apply a random filter to and image
"""
if
len
(
self
.
filter_bank
)
<=
0
:
raise
ValueError
(
"There is not filters in the filter bank"
)
filter
=
self
.
filter_bank
[
numpy
.
random
.
randint
(
len
(
self
.
filter_bank
))]
return
filter
(
image
)
bob/learn/tensorflow/datashuffler/Disk.py
View file @
565464b0
...
...
@@ -11,7 +11,6 @@ import bob.core
from
.Base
import
Base
logger
=
bob
.
core
.
log
.
setup
(
"bob.learn.tensorflow"
)
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
class
Disk
(
Base
):
...
...
@@ -53,7 +52,7 @@ class Disk(Base):
batch_size
=
1
,
seed
=
10
,
data_augmentation
=
None
,
normalizer
=
Linear
()
,
normalizer
=
None
,
prefetch
=
False
,
prefetch_capacity
=
10
,
prefetch_threads
=
5
...
...
bob/learn/tensorflow/datashuffler/ImageAugmentation.py
View file @
565464b0
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
import
bob.ip.base
import
numpy
from
.DataAugmentation
import
DataAugmentation
class
ImageAugmentation
(
DataAugmentation
):
def
add_gaussian_blur
(
image
,
seed
=
10
):
"""
Class for applying common real-time random data augmentation for images.
Add random gaussian blur
"""
numpy
.
random
.
seed
(
seed
)
def
__init__
(
self
,
seed
=
10
):
possible_sigmas
=
numpy
.
arange
(
0.1
,
3.
,
0.1
)
possible_radii
=
[
1
,
2
,
3
]
super
(
ImageAugmentation
,
self
).
__init__
(
seed
=
seed
)
sigma
=
possible_sigmas
[
numpy
.
random
.
randint
(
len
(
possible_sigmas
))]
radius
=
possible_radii
[
numpy
.
random
.
randint
(
len
(
possible_radii
))]
self
.
filter_bank
=
[
self
.
__add_none
,
self
.
__add_none
,
self
.
__add_gaussian_blur
,
self
.
__add_left_right_flip
,
self
.
__add_none
,
self
.
__add_salt_and_pepper
]
#self.__add_rotation,
gaussian_filter
=
bob
.
ip
.
base
.
Gaussian
(
sigma
=
(
sigma
,
sigma
),
radius
=
(
radius
,
radius
))
def
__add_none
(
self
,
image
):
return
image
return
gaussian_filter
(
image
)
def
__add_gaussian_blur
(
self
,
image
):
possible_sigmas
=
numpy
.
arange
(
0.1
,
3.
,
0.1
)
possible_radii
=
[
1
,
2
,
3
]
sigma
=
possible_sigmas
[
numpy
.
random
.
randint
(
len
(
possible_sigmas
))]
radius
=
possible_radii
[
numpy
.
random
.
randint
(
len
(
possible_radii
))]
def
add_rotation
(
image
):
"""
Add random rotation
"""
gaussian_filter
=
bob
.
ip
.
base
.
Gaussian
(
sigma
=
(
sigma
,
sigma
),
radius
=
(
radius
,
radi
u
s
))
possible_angles
=
numpy
.
arange
(
-
15
,
15
,
0.5
)
angle
=
possible_angles
[
numpy
.
random
.
ra
n
di
nt
(
len
(
possible_angle
s
))
]
return
gaussian_fil
te
r
(
image
)
return
bob
.
ip
.
base
.
rota
te
(
image
,
angle
)
def
__add_left_right_flip
(
self
,
image
):
return
bob
.
ip
.
base
.
flop
(
image
)
def
__add_rotation
(
self
,
image
):
possible_angles
=
numpy
.
arange
(
-
15
,
15
,
0.5
)
angle
=
possible_angles
[
numpy
.
random
.
randint
(
len
(
possible_angles
))]
def
add_salt_and_pepper
(
image
):
"""
Add random salt and pepper
"""
return
bob
.
ip
.
base
.
rotate
(
image
,
angle
)
possible_levels
=
numpy
.
arange
(
0.01
,
0.1
,
0.01
)
level
=
possible_levels
[
numpy
.
random
.
randint
(
len
(
possible_levels
))]
def
__add_salt_and_pepper
(
self
,
image
):
possible_levels
=
numpy
.
arange
(
0.01
,
0.1
,
0.01
)
level
=
possible_levels
[
numpy
.
random
.
randint
(
len
(
possible_levels
))]
return
compute_salt_and_peper
(
image
,
level
)
return
self
.
compute_salt_and_peper
(
image
,
level
)
def
compute_salt_and_peper
(
self
,
image
,
level
):
"""
Compute a salt and pepper noise
"""
r
=
numpy
.
random
.
rand
(
*
image
.
shape
)
def
compute_salt_and_peper
(
image
,
level
):
"""
Compute a salt and pepper noise
"""
r
=
numpy
.
random
.
rand
(
*
image
.
shape
)
# 0 noise
indexes_0
=
r
<=
(
level
/
0.5
)
image
[
indexes_0
]
=
0.0
#
0
noise
indexes_
0
=
r
<=
(
level
/
0.5
)
image
[
indexes_
0
]
=
0
.0
#
255
noise
indexes_
255
=
(
1
-
level
/
2
)
<=
r
;
image
[
indexes_
255
]
=
255
.0
# 255 noise
indexes_255
=
(
1
-
level
/
2
)
<=
r
;
image
[
indexes_255
]
=
255.0
return
image
return
image
bob/learn/tensorflow/datashuffler/Memory.py
View file @
565464b0
...
...
@@ -5,7 +5,6 @@
import
numpy
from
.Base
import
Base
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
import
tensorflow
as
tf
...
...
@@ -47,7 +46,7 @@ class Memory(Base):
batch_size
=
1
,
seed
=
10
,
data_augmentation
=
None
,
normalizer
=
Linear
()
,
normalizer
=
None
,
prefetch
=
False
,
prefetch_capacity
=
10
,
prefetch_threads
=
5
...
...
bob/learn/tensorflow/datashuffler/Normalizer.py
View file @
565464b0
...
...
@@ -4,52 +4,27 @@
import
numpy
class
S
cale
F
actor
(
object
):
def
s
cale
_f
actor
(
x
,
scale_factor
=
0.00390625
):
"""
Normalize a sample by a scale factor
"""
return
x
*
scale_factor
def
__init__
(
self
,
scale_factor
=
0.00390625
):
self
.
scale_factor
=
scale_factor
def
__call__
(
self
,
x
):
return
x
*
self
.
scale_factor
class
MeanOffset
(
object
):
def
mean_offset
(
x
,
mean_offset
):
"""
Normalize a sample by a mean offset
"""
def
__init__
(
self
,
mean_offset
):
self
.
mean_offset
=
mean_offset
def
__call__
(
self
,
x
):
for
i
in
range
(
len
(
self
.
mean_offset
)):
x
[:,
:,
i
]
=
x
[:,
:,
i
]
-
self
.
mean_offset
[
i
]
return
x
class
Linear
(
object
):
def
__init__
(
self
):
pass
def
__call__
(
self
,
x
):
return
x
class
PerImageStandarization
(
object
):
for
i
in
range
(
len
(
mean_offset
)):
x
[:,
:,
i
]
=
x
[:,
:,
i
]
-
mean_offset
[
i
]
def
__init__
(
self
):
pass
return
x
def
__call__
(
self
,
x
):
def
per_image_standarization
(
x
):
mean
=
numpy
.
mean
(
x
)
std
=
numpy
.
std
(
x
)
mean
=
numpy
.
mean
(
x
)
std
=
numpy
.
std
(
x
)
return
(
x
-
mean
)
/
max
(
std
,
1
/
numpy
.
sqrt
(
numpy
.
prod
(
x
.
shape
)))
return
(
x
-
mean
)
/
max
(
std
,
1
/
numpy
.
sqrt
(
numpy
.
prod
(
x
.
shape
)))
bob/learn/tensorflow/datashuffler/SiameseDisk.py
View file @
565464b0
...
...
@@ -11,8 +11,6 @@ logger = bob.core.log.setup("bob.learn.tensorflow")
from
.Disk
import
Disk
from
.Siamese
import
Siamese
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
class
SiameseDisk
(
Siamese
,
Disk
):
"""
...
...
@@ -52,7 +50,7 @@ class SiameseDisk(Siamese, Disk):
batch_size
=
1
,
seed
=
10
,
data_augmentation
=
None
,
normalizer
=
Linear
()
,
normalizer
=
None
,
prefetch
=
False
,
prefetch_capacity
=
10
,
prefetch_threads
=
5
...
...
bob/learn/tensorflow/datashuffler/SiameseMemory.py
View file @
565464b0
...
...
@@ -8,7 +8,6 @@ import six
from
.Memory
import
Memory
from
.Siamese
import
Siamese
import
tensorflow
as
tf
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
class
SiameseMemory
(
Siamese
,
Memory
):
...
...
@@ -50,7 +49,7 @@ class SiameseMemory(Siamese, Memory):
batch_size
=
32
,
seed
=
10
,
data_augmentation
=
None
,
normalizer
=
Linear
()
,
normalizer
=
None
,
prefetch
=
False
,
prefetch_capacity
=
50
,
prefetch_threads
=
10
...
...
bob/learn/tensorflow/datashuffler/TFRecord.py
View file @
565464b0
...
...
@@ -6,7 +6,6 @@ import numpy
import
tensorflow
as
tf
import
bob.ip.base
import
numpy
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
class
TFRecord
(
object
):
...
...
bob/learn/tensorflow/datashuffler/TFRecordImage.py
View file @
565464b0
...
...
@@ -7,7 +7,6 @@ import numpy
import
tensorflow
as
tf
import
bob.ip.base
import
numpy
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
from
.TFRecord
import
TFRecord
class
TFRecordImage
(
TFRecord
):
...
...
bob/learn/tensorflow/datashuffler/TripletDisk.py
View file @
565464b0
...
...
@@ -15,7 +15,6 @@ import tensorflow as tf
from
.Disk
import
Disk
from
.Triplet
import
Triplet
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
class
TripletDisk
(
Triplet
,
Disk
):
...
...
@@ -57,7 +56,7 @@ class TripletDisk(Triplet, Disk):
batch_size
=
1
,
seed
=
10
,
data_augmentation
=
None
,
normalizer
=
Linear
()
,
normalizer
=
None
,
prefetch
=
False
,
prefetch_capacity
=
50
,
prefetch_threads
=
10
...
...
bob/learn/tensorflow/datashuffler/TripletMemory.py
View file @
565464b0
...
...
@@ -8,7 +8,6 @@ import tensorflow as tf
import
six
from
.Memory
import
Memory
from
.Triplet
import
Triplet
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
class
TripletMemory
(
Triplet
,
Memory
):
...
...
@@ -50,7 +49,7 @@ class TripletMemory(Triplet, Memory):
batch_size
=
1
,
seed
=
10
,
data_augmentation
=
None
,
normalizer
=
Linear
()
,
normalizer
=
None
,
prefetch
=
False
,
prefetch_capacity
=
50
,
prefetch_threads
=
10
...
...
bob/learn/tensorflow/datashuffler/TripletWithFastSelectionDisk.py
View file @
565464b0
...
...
@@ -13,7 +13,6 @@ from scipy.spatial.distance import euclidean, cdist
import
logging
logger
=
logging
.
getLogger
(
"bob.learn"
)
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
class
TripletWithFastSelectionDisk
(
Triplet
,
Disk
,
OnlineSampling
):
...
...
@@ -67,7 +66,7 @@ class TripletWithFastSelectionDisk(Triplet, Disk, OnlineSampling):
seed
=
10
,
data_augmentation
=
None
,
total_identities
=
10
,
normalizer
=
Linear
()
):
normalizer
=
None
):
super
(
TripletWithFastSelectionDisk
,
self
).
__init__
(
data
=
data
,
...
...
bob/learn/tensorflow/datashuffler/TripletWithSelectionDisk.py
View file @
565464b0
...
...
@@ -10,11 +10,9 @@ from .Disk import Disk
from
.Triplet
import
Triplet
from
.OnlineSampling
import
OnlineSampling
from
scipy.spatial.distance
import
euclidean
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
import
logging
logger
=
logging
.
getLogger
(
"bob.learn.tensorflow"
)
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
class
TripletWithSelectionDisk
(
Triplet
,
Disk
,
OnlineSampling
):
...
...
@@ -57,7 +55,7 @@ class TripletWithSelectionDisk(Triplet, Disk, OnlineSampling):
seed
=
10
,
data_augmentation
=
None
,
total_identities
=
10
,
normalizer
=
Linear
()
):
normalizer
=
None
):
super
(
TripletWithSelectionDisk
,
self
).
__init__
(
data
=
data
,
...
...
bob/learn/tensorflow/datashuffler/TripletWithSelectionMemory.py
View file @
565464b0
...
...
@@ -9,7 +9,6 @@ import tensorflow as tf
from
.OnlineSampling
import
OnlineSampling
from
.Memory
import
Memory
from
.Triplet
import
Triplet
from
bob.learn.tensorflow.datashuffler.Normalizer
import
Linear
from
scipy.spatial.distance
import
euclidean
,
cdist
import
logging
...
...
@@ -68,7 +67,7 @@ class TripletWithSelectionMemory(Triplet, Memory, OnlineSampling):
seed
=
10
,
data_augmentation
=
None
,
total_identities
=
10
,
normalizer
=
Linear
()
):
normalizer
=
None
):
super
(
TripletWithSelectionMemory
,
self
).
__init__
(
data
=
data
,
...
...
bob/learn/tensorflow/datashuffler/__init__.py
View file @
565464b0
...
...
@@ -15,10 +15,7 @@ from .SiameseDisk import SiameseDisk
from
.TripletDisk
import
TripletDisk
from
.TripletWithSelectionDisk
import
TripletWithSelectionDisk
from
.DataAugmentation
import
DataAugmentation
from
.ImageAugmentation
import
ImageAugmentation
from
.Normalizer
import
ScaleFactor
,
MeanOffset
,
Linear
,
PerImageStandarization
from
.Normalizer
import
scale_factor
,
mean_offset
,
per_image_standarization
from
.DiskAudio
import
DiskAudio
from
.TFRecord
import
TFRecord
...
...
@@ -53,9 +50,7 @@ __appropriate__(
SiameseDisk
,
TripletDisk
,
TripletWithSelectionDisk
,
DataAugmentation
,
ImageAugmentation
,
ScaleFactor
,
MeanOffset
,
Linear
,
scale_factor
,
mean_offset
,
per_image_standarization
,
DiskAudio
,
TFRecord
,
TFRecordImage
...
...
bob/learn/tensorflow/network/Dummy.py
View file @
565464b0
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Dummy architecture
"""
import
tensorflow
as
tf
def
dummy
(
conv1_kernel_size
=
3
,
conv1_output
=
1
,
fc1_output
=
2
,
seed
=
10
):
"""
Create all the necessary variables for this CNN
class
Dummy
(
object
):
def
__init__
(
self
,
conv1_kernel_size
=
3
,
conv1_output
=
1
,
fc1_output
=
2
,
seed
=
10
,
n_classes
=
None
):
"""
Create all the necessary variables for this CNN
**Parameters**
conv1_kernel_size=3,
conv1_output=2,
n_classes=10
**Parameters**
conv1_kernel_size:
conv1_output:
fc1_output:
seed = 10
"""
seed = 10
"""
self
.
conv1_output
=
conv1_output
self
.
conv1_kernel_size
=
conv1_kernel_size
self
.
fc1_output
=
fc1_output
self
.
seed
=
seed
self
.
n_classes
=
n_classes
slim
=
tf
.
contrib
.
slim
def
__call__
(
self
,
inputs
,
reuse
=
False
,
end_point
=
"logits"
):
slim
=
tf
.
contrib
.
slim
end_points
=
dict
()
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
uniform
=
False
,
dtype
=
tf
.
float32
,
seed
=
seed
)
end_points
=
dict
()
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
uniform
=
False
,
dtype
=
tf
.
float32
,
seed
=
self
.
seed
)
graph
=
slim
.
conv2d
(
inputs
,
conv1_output
,
conv1_kernel_size
,
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'conv1'
)
end_points
[
'conv1'
]
=
graph
graph
=
slim
.
conv2d
(
inputs
,
self
.
conv1_output
,
self
.
conv1_kernel_size
,
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'conv1'
)
end_points
[
'conv1'
]
=
graph
graph
=
slim
.
flatten
(
graph
,
scope
=
'flatten1'
)
end_points
[
'flatten1'
]
=
graph
graph
=
slim
.
flatten
(
graph
,
scope
=
'flatten1'
)
end_points
[
'flatten1'
]
=
graph
graph
=
slim
.
fully_connected
(
graph
,
fc1_output
,
weights_initializer
=
initializer
,
activation_fn
=
None
,
scope
=
'fc1'
)
end_points
[
'fc1'
]
=
graph
graph
=
slim
.
fully_connected
(
graph
,
self
.
fc1_output
,
weights_initializer
=
initializer
,
activation_fn
=
None
,
scope
=
'fc1'
)
end_points
[
'fc1'
]
=
graph
if
self
.
n_classes
is
not
None
:
# Appending the logits layer
graph
=
append_logits
(
graph
,
self
.
n_classes
,
reuse
)
end_points
[
'logits'
]
=
graph
return
graph
,
end_points
return
end_points
[
end_point
]
bob/learn/tensorflow/network/Embedding.py
View file @
565464b0
...
...
@@ -6,7 +6,6 @@
import
tensorflow
as
tf
from
bob.learn.tensorflow.utils.session
import
Session
from
bob.learn.tensorflow.datashuffler
import
Linear
class
Embedding
(
object
):
...
...
@@ -20,8 +19,8 @@ class Embedding(object):
graph: Embedding graph
"""
def
__init__
(
self
,
input
,
graph
,
normalizer
=
Linear
()
):
self
.
input
=
input
def
__init__
(
self
,
input
s
,
graph
,
normalizer
=
None
):
self
.
input
s
=
input
s
self
.
graph
=
graph
self
.
normalizer
=
normalizer
...
...
@@ -32,6 +31,6 @@ class Embedding(object):
for
i
in
range
(
data
.
shape
[
0
]):
data
[
i
]
=
self
.
normalizer
(
data
[
i
])
feed_dict
=
{
self
.
input
:
data
}
feed_dict
=
{
self
.
input
s
:
data
}
return
session
.
run
([
self
.
graph
],
feed_dict
=
feed_dict
)[
0
]
bob/learn/tensorflow/network/LightCNN29.py
deleted
100755 → 0
View file @
bdbc9988
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import
tensorflow
as
tf
from
bob.learn.tensorflow.layers
import
maxout
from
.utils
import
append_logits
class
LightCNN29
(
object
):
"""Creates the graph for the Light CNN-9 in
Wu, Xiang, et al. "A light CNN for deep face representation with noisy labels." arXiv preprint arXiv:1511.02683 (2015).
"""
def
__init__
(
self
,
seed
=
10
,
n_classes
=
10
):
self
.
seed
=
seed
self
.
n_classes
=
n_classes
def
__call__
(
self
,
inputs
,
reuse
=
False
,
end_point
=
"logits"
):
slim
=
tf
.
contrib
.
slim
end_points
=
dict
()
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
uniform
=
False
,
dtype
=
tf
.
float32
,
seed
=
self
.
seed
)
graph
=
slim
.
conv2d
(
inputs
,
96
,
[
5
,
5
],
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'Conv1'
,
reuse
=
reuse
)
end_points
[
'conv1'
]
=
graph
graph
=
maxout
(
graph
,