Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.bio.face
Commits
27c75ad2
Commit
27c75ad2
authored
Jun 16, 2020
by
Tiago de Freitas Pereira
Browse files
Porting some baselines
parent
0a00da69
Changes
5
Hide whitespace changes
Inline
Side-by-side
bob/bio/face/config/baseline/gabor_graph.py
View file @
27c75ad2
from
bob.bio.base.pipelines.vanilla_biometrics
import
Distance
,
VanillaBiometricsPipeline
,
BioAlgorithmLegacy
from
bob.bio.face.helpers
import
face_crop_solver
from
bob.bio.base.pipelines.vanilla_biometrics
import
(
Distance
,
VanillaBiometricsPipeline
,
BioAlgorithmLegacy
,
)
from
bob.bio.face.config.baseline.helpers
import
crop_80x64
import
math
import
numpy
as
np
import
bob.bio.face
...
...
@@ -19,76 +23,39 @@ else:
####### SOLVING THE FACE CROPPER TO BE USED ##########
# Cropping
CROPPED_IMAGE_HEIGHT
=
80
CROPPED_IMAGE_WIDTH
=
CROPPED_IMAGE_HEIGHT
*
4
//
5
# eye positions for frontal images
RIGHT_EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
4
-
1
)
LEFT_EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
4
*
3
)
cropped_image_size
=
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
)
color_channel
=
"gray"
if
annotation_type
==
"bounding-box"
:
transform_extra_arguments
=
((
"annotations"
,
"annotations"
),)
TOP_LEFT_POS
=
(
0
,
0
)
BOTTOM_RIGHT_POS
=
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
)
# Detects the face and crops it without eye detection
face_cropper
=
face_crop_solver
(
cropped_image_size
,
color_channel
=
color_channel
,
cropped_positions
=
{
"topleft"
:
TOP_LEFT_POS
,
"bottomright"
:
BOTTOM_RIGHT_POS
},
fixed_positions
=
fixed_positions
,
)
elif
annotation_type
==
"eyes-center"
:
transform_extra_arguments
=
((
"annotations"
,
"annotations"
),)
# eye positions for frontal images
# Detects the face and crops it without eye detection
face_cropper
=
face_crop_solver
(
cropped_image_size
,
color_channel
=
color_channel
,
cropped_positions
=
{
"leye"
:
LEFT_EYE_POS
,
"reye"
:
RIGHT_EYE_POS
},
fixed_positions
=
fixed_positions
,
)
else
:
transform_extra_arguments
=
None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper
=
face_crop_solver
(
cropped_image_size
)
face_cropper
,
transform_extra_arguments
=
crop_80x64
(
annotation_type
,
fixed_positions
,
color_channel
=
"gray"
)
preprocessor
=
bob
.
bio
.
face
.
preprocessor
.
INormLBP
(
face_cropper
=
face_cropper
,
dtype
=
np
.
float64
face_cropper
=
face_cropper
,
dtype
=
np
.
float64
)
#### FEATURE EXTRACTOR ######
gabor_graph
=
bob
.
bio
.
face
.
extractor
.
GridGraph
(
# Gabor parameters
gabor_sigma
=
math
.
sqrt
(
2.0
)
*
math
.
pi
,
# what kind of information to extract
normalize_gabor_jets
=
True
,
# setup of the fixed grid
node_distance
=
(
8
,
8
),
# legacy objects needs to be wrapped with legacy transformers
from
bob.bio.base.transformers
import
ExtractorTransformer
gabor_graph
=
ExtractorTransformer
(
bob
.
bio
.
face
.
extractor
.
GridGraph
(
# Gabor parameters
gabor_sigma
=
math
.
sqrt
(
2.0
)
*
math
.
pi
,
# what kind of information to extract
normalize_gabor_jets
=
True
,
# setup of the fixed grid
node_distance
=
(
8
,
8
),
)
)
transformer
=
make_pipeline
(
wrap
(
[
"sample"
],
preprocessor
,
transform_extra_arguments
=
transform_extra_arguments
,
[
"sample"
],
preprocessor
,
transform_extra_arguments
=
transform_extra_arguments
,
),
wrap
([
"sample"
],
gabor_graph
),
)
gabor_jet
=
bob
.
bio
.
face
.
algorithm
.
GaborJet
(
gabor_jet_similarity_type
=
"PhaseDiffPlusCanberra"
,
multiple_feature_scoring
=
"max_jet"
,
...
...
@@ -98,7 +65,4 @@ gabor_jet = bob.bio.face.algorithm.GaborJet(
tempdir
=
tempfile
.
TemporaryDirectory
()
algorithm
=
BioAlgorithmLegacy
(
gabor_jet
,
base_dir
=
tempdir
.
name
)
pipeline
=
VanillaBiometricsPipeline
(
transformer
,
algorithm
)
pipeline
=
VanillaBiometricsPipeline
(
transformer
,
algorithm
)
bob/bio/face/config/baseline/helpers.py
View file @
27c75ad2
...
...
@@ -11,7 +11,7 @@ def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
This transformer is suited for Facenet based architectures
.. warning::
This will re
direct
images to :math:`160
\t
imes 160`
This will re
size
images to :math:`160
\t
imes 160`
"""
...
...
@@ -72,7 +72,7 @@ def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
This transformer is suited for Facenet based architectures
.. warning::
This will re
direct
images to :math:`1
60
\t
imes 1
60
`
This will re
size
images to :math:`1
12
\t
imes 1
12
`
"""
...
...
@@ -125,3 +125,78 @@ def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
)
return
transformer
def
crop_80x64
(
annotation_type
,
fixed_positions
=
None
,
color_channel
=
"gray"
):
"""
Crops a face to :math:`80
\t
imes 64`
Parameters
----------
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None
fixed_positions: tuple
A tuple containing the annotations. This is used in case your input is already registered
with fixed positions (eyes or bounding box)
color_channel: str
Returns
-------
face_cropper:
A face cropper to be used
transform_extra_arguments:
The parameters to the transformer
"""
# Cropping
CROPPED_IMAGE_HEIGHT
=
80
CROPPED_IMAGE_WIDTH
=
CROPPED_IMAGE_HEIGHT
*
4
//
5
# eye positions for frontal images
RIGHT_EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
4
-
1
)
LEFT_EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
4
*
3
)
cropped_image_size
=
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
)
color_channel
=
color_channel
if
annotation_type
==
"bounding-box"
:
transform_extra_arguments
=
((
"annotations"
,
"annotations"
),)
TOP_LEFT_POS
=
(
0
,
0
)
BOTTOM_RIGHT_POS
=
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
)
# Detects the face and crops it without eye detection
face_cropper
=
face_crop_solver
(
cropped_image_size
,
color_channel
=
color_channel
,
cropped_positions
=
{
"topleft"
:
TOP_LEFT_POS
,
"bottomright"
:
BOTTOM_RIGHT_POS
},
fixed_positions
=
fixed_positions
,
)
elif
annotation_type
==
"eyes-center"
:
transform_extra_arguments
=
((
"annotations"
,
"annotations"
),)
# eye positions for frontal images
# Detects the face and crops it without eye detection
face_cropper
=
face_crop_solver
(
cropped_image_size
,
color_channel
=
color_channel
,
cropped_positions
=
{
"leye"
:
LEFT_EYE_POS
,
"reye"
:
RIGHT_EYE_POS
},
fixed_positions
=
fixed_positions
,
)
else
:
transform_extra_arguments
=
None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper
=
face_crop_solver
(
cropped_image_size
)
return
face_cropper
,
transform_extra_arguments
bob/bio/face/config/baseline/lda.py
0 → 100644
View file @
27c75ad2
from
bob.bio.base.pipelines.vanilla_biometrics
import
(
Distance
,
VanillaBiometricsPipeline
,
BioAlgorithmLegacy
,
)
from
bob.bio.face.config.baseline.helpers
import
crop_80x64
import
numpy
as
np
import
bob.bio.face
from
sklearn.pipeline
import
make_pipeline
from
bob.pipelines
import
wrap
import
tempfile
from
bob.bio.base.transformers
import
AlgorithmTransformer
from
bob.pipelines.transformers
import
SampleLinearize
import
os
#### SOLVING IF THERE'S ANY DATABASE INFORMATION
if
"database"
in
locals
():
annotation_type
=
database
.
annotation_type
fixed_positions
=
database
.
fixed_positions
else
:
annotation_type
=
None
fixed_positions
=
None
####### SOLVING THE FACE CROPPER TO BE USED ##########
# Cropping
face_cropper
,
transform_extra_arguments
=
crop_80x64
(
annotation_type
,
fixed_positions
,
color_channel
=
"gray"
)
preprocessor
=
bob
.
bio
.
face
.
preprocessor
.
TanTriggs
(
face_cropper
=
face_cropper
,
dtype
=
np
.
float64
)
#### FEATURE EXTRACTOR ######
tempdir
=
tempfile
.
TemporaryDirectory
()
lda
=
bob
.
bio
.
base
.
algorithm
.
LDA
(
use_pinv
=
True
,
pca_subspace_dimension
=
0.90
)
lda_transformer
=
AlgorithmTransformer
(
lda
,
projector_file
=
os
.
path
.
join
(
tempdir
.
name
,
"Projector.hdf5"
)
)
transformer
=
make_pipeline
(
wrap
(
[
"sample"
],
preprocessor
,
transform_extra_arguments
=
transform_extra_arguments
,
),
SampleLinearize
(),
wrap
([
"sample"
],
lda_transformer
),
)
### BIOMETRIC ALGORITHM
algorithm
=
BioAlgorithmLegacy
(
lda
,
base_dir
=
tempdir
.
name
,
projector_file
=
os
.
path
.
join
(
tempdir
.
name
,
"Projector.hdf5"
),
)
pipeline
=
VanillaBiometricsPipeline
(
transformer
,
algorithm
)
bob/bio/face/config/baseline/lgbphs.py
0 → 100644
View file @
27c75ad2
from
bob.bio.base.pipelines.vanilla_biometrics
import
(
Distance
,
VanillaBiometricsPipeline
,
BioAlgorithmLegacy
,
)
from
bob.bio.face.config.baseline.helpers
import
crop_80x64
import
math
import
numpy
as
np
import
bob.bio.face
from
sklearn.pipeline
import
make_pipeline
from
bob.pipelines
import
wrap
import
tempfile
import
bob.math
#### SOLVING IF THERE'S ANY DATABASE INFORMATION
if
"database"
in
locals
():
annotation_type
=
database
.
annotation_type
fixed_positions
=
database
.
fixed_positions
else
:
annotation_type
=
None
fixed_positions
=
None
####### SOLVING THE FACE CROPPER TO BE USED ##########
# Cropping
face_cropper
,
transform_extra_arguments
=
crop_80x64
(
annotation_type
,
fixed_positions
,
color_channel
=
"gray"
)
preprocessor
=
bob
.
bio
.
face
.
preprocessor
.
TanTriggs
(
face_cropper
=
face_cropper
,
dtype
=
np
.
float64
)
#### FEATURE EXTRACTOR ######
lgbphs
=
bob
.
bio
.
face
.
extractor
.
LGBPHS
(
# block setup
block_size
=
8
,
block_overlap
=
0
,
# Gabor parameters
gabor_sigma
=
math
.
sqrt
(
2.
)
*
math
.
pi
,
# LBP setup (we use the defaults)
# histogram setup
sparse_histogram
=
True
)
transformer
=
make_pipeline
(
wrap
(
[
"sample"
],
preprocessor
,
transform_extra_arguments
=
transform_extra_arguments
,
),
wrap
([
"sample"
],
lgbphs
),
)
### BIOMETRIC ALGORITHM
histogram
=
bob
.
bio
.
face
.
algorithm
.
Histogram
(
distance_function
=
bob
.
math
.
histogram_intersection
,
is_distance_function
=
False
)
tempdir
=
tempfile
.
TemporaryDirectory
()
algorithm
=
BioAlgorithmLegacy
(
histogram
,
base_dir
=
tempdir
.
name
)
pipeline
=
VanillaBiometricsPipeline
(
transformer
,
algorithm
)
bob/bio/face/test/test_baselines.py
View file @
27c75ad2
...
...
@@ -3,7 +3,10 @@ import pkg_resources
import
numpy
as
np
from
bob.pipelines
import
Sample
,
SampleSet
,
DelayedSample
from
bob.bio.base
import
load_resource
from
bob.bio.base.pipelines.vanilla_biometrics
import
checkpoint_vanilla_biometrics
,
dask_vanilla_biometrics
from
bob.bio.base.pipelines.vanilla_biometrics
import
(
checkpoint_vanilla_biometrics
,
dask_vanilla_biometrics
,
)
import
tempfile
import
os
import
bob.io.base
...
...
@@ -44,23 +47,32 @@ def get_fake_sample_set(face_size=(160, 160), purpose="bioref"):
]
def
run_baseline
(
baseline
):
def
get_fake_samples_for_training
():
data
=
np
.
random
.
rand
(
10
,
3
,
400
,
400
)
annotations
=
{
"reye"
:
(
131
,
176
),
"leye"
:
(
222
,
170
)}
return
[
Sample
(
x
,
key
=
str
(
i
),
subject
=
str
(
i
),
annotations
=
annotations
)
for
i
,
x
in
enumerate
(
data
)
]
def
run_baseline
(
baseline
,
samples_for_training
=
[]):
biometric_references
=
get_fake_sample_set
(
purpose
=
"bioref"
)
probes
=
get_fake_sample_set
(
purpose
=
"probe"
)
# Regular pipeline
pipeline
=
load_resource
(
baseline
,
"baseline"
)
scores
=
pipeline
(
[]
,
biometric_references
,
probes
)
scores
=
pipeline
(
samples_for_training
,
biometric_references
,
probes
)
assert
len
(
scores
)
==
1
assert
len
(
scores
[
0
])
==
1
# CHECKPOINTING
import
ipdb
;
ipdb
.
set_trace
()
with
tempfile
.
TemporaryDirectory
()
as
d
:
checkpoint_pipeline
=
checkpoint_vanilla_biometrics
(
copy
.
deepcopy
(
pipeline
),
base_dir
=
d
)
checkpoint_pipeline
=
checkpoint_vanilla_biometrics
(
copy
.
deepcopy
(
pipeline
),
base_dir
=
d
)
checkpoint_scores
=
checkpoint_pipeline
([],
biometric_references
,
probes
)
assert
len
(
checkpoint_scores
)
==
1
assert
len
(
checkpoint_scores
[
0
])
==
1
...
...
@@ -72,13 +84,14 @@ def run_baseline(baseline):
assert
"samplewrapper-2"
in
dirs
assert
"scores"
in
dirs
# DASK
with
tempfile
.
TemporaryDirectory
()
as
d
:
dask_pipeline
=
dask_vanilla_biometrics
(
checkpoint_vanilla_biometrics
(
copy
.
deepcopy
(
pipeline
),
base_dir
=
d
))
dask_pipeline
=
dask_vanilla_biometrics
(
checkpoint_vanilla_biometrics
(
copy
.
deepcopy
(
pipeline
),
base_dir
=
d
)
)
dask_scores
=
dask_pipeline
([],
biometric_references
,
probes
)
dask_scores
=
dask_scores
.
compute
(
scheduler
=
"single-threaded"
)
dask_scores
=
dask_scores
.
compute
(
scheduler
=
"single-threaded"
)
assert
len
(
dask_scores
)
==
1
assert
len
(
dask_scores
[
0
])
==
1
assert
np
.
isclose
(
scores
[
0
][
0
].
data
,
dask_scores
[
0
][
0
].
data
)
...
...
@@ -109,12 +122,18 @@ def test_inception_resnetv1_msceleb():
def
test_inception_resnetv1_casiawebface
():
run_baseline
(
"inception_resnetv1_casiawebface"
)
def
test_arcface_insight_tf
():
import
tensorflow
as
tf
tf
.
compat
.
v1
.
reset_default_graph
()
run_baseline
(
"arcface_insight_tf"
)
def
test_gabor_graph
():
run_baseline
(
"gabor_graph"
)
\ No newline at end of file
run_baseline
(
"gabor_graph"
)
#def test_lda():
# run_baseline("lda", get_fake_samples_for_training())
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment