Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bob.pad.face
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
bob
bob.pad.face
Commits
446becbc
There was a problem fetching the pipeline summary.
Commit
446becbc
authored
7 years ago
by
Olegs NIKISINS
Browse files
Options
Downloads
Patches
Plain Diff
Added unit tests for classes used in the IQM-GMM pad algorithm
parent
4c4d3467
Branches
Branches containing commit
Tags
Tags containing commit
1 merge request
!19
Added face detection option to VideoFaceCrop preprocessor + unit tests for IQM-GMM algo.
Pipeline
#
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
bob/pad/face/test/test.py
+120
-0
120 additions, 0 deletions
bob/pad/face/test/test.py
with
120 additions
and
0 deletions
bob/pad/face/test/test.py
+
120
−
0
View file @
446becbc
...
...
@@ -29,8 +29,12 @@ from ..extractor import FrameDiffFeatures
from
..extractor
import
VideoLBPHistogram
from
..extractor
import
VideoQualityMeasure
from
..algorithm
import
VideoSvmPadAlgorithm
from
..algorithm
import
VideoGmmPadAlgorithm
import
random
#==============================================================================
...
...
@@ -149,6 +153,43 @@ def test_video_face_crop():
assert
np
.
sum
(
faces
[
0
][
1
])
==
429158
assert
np
.
sum
(
faces
[
-
1
][
1
])
==
429158
#==========================================================================
# test another configuration of the VideoFaceCrop preprocessor:
CROPPED_IMAGE_SIZE
=
(
64
,
64
)
# The size of the resulting face
CROPPED_POSITIONS
=
{
'
topleft
'
:
(
0
,
0
)
,
'
bottomright
'
:
CROPPED_IMAGE_SIZE
}
FIXED_POSITIONS
=
None
MASK_SIGMA
=
None
# The sigma for random values areas outside image
MASK_NEIGHBORS
=
5
# The number of neighbors to consider while extrapolating
MASK_SEED
=
None
# The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG
=
True
# Check the size of the face
MIN_FACE_SIZE
=
50
USE_LOCAL_CROPPER_FLAG
=
True
# Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG
=
True
# Return RGB cropped face using local cropper
DETECT_FACES_FLAG
=
True
# find annotations locally replacing the database annotations
preprocessor
=
VideoFaceCrop
(
cropped_image_size
=
CROPPED_IMAGE_SIZE
,
cropped_positions
=
CROPPED_POSITIONS
,
fixed_positions
=
FIXED_POSITIONS
,
mask_sigma
=
MASK_SIGMA
,
mask_neighbors
=
MASK_NEIGHBORS
,
mask_seed
=
None
,
check_face_size_flag
=
CHECK_FACE_SIZE_FLAG
,
min_face_size
=
MIN_FACE_SIZE
,
use_local_cropper_flag
=
USE_LOCAL_CROPPER_FLAG
,
rgb_output_flag
=
RGB_OUTPUT_FLAG
,
detect_faces_flag
=
DETECT_FACES_FLAG
)
video
,
_
=
convert_image_to_video_data
(
image
,
annotations
,
3
)
faces
=
preprocessor
(
frames
=
video
,
annotations
=
annotations
)
assert
len
(
faces
)
==
3
assert
faces
[
0
][
1
].
shape
==
(
3
,
64
,
64
)
assert
faces
[
-
1
][
1
].
shape
==
(
3
,
64
,
64
)
assert
np
.
sum
(
faces
[
0
][
1
])
==
1253048
assert
np
.
sum
(
faces
[
-
1
][
1
])
==
1253048
#==============================================================================
def
test_frame_difference
():
...
...
@@ -261,6 +302,34 @@ def test_video_lbp_histogram():
assert
(
lbp_histograms
[
0
][
1
][
-
1
]
-
0.031737773152965658
)
<
0.000001
#==============================================================================
def
test_video_quality_measure
():
"""
Test VideoQualityMeasure extractor.
"""
image
=
load
(
datafile
(
'
test_image.png
'
,
'
bob.pad.face.test
'
))
annotations
=
{
'
topleft
'
:
(
95
,
155
),
'
bottomright
'
:
(
215
,
265
)}
video
,
annotations
=
convert_image_to_video_data
(
image
,
annotations
,
2
)
GALBALLY
=
True
MSU
=
True
DTYPE
=
None
extractor
=
VideoQualityMeasure
(
galbally
=
GALBALLY
,
msu
=
MSU
,
dtype
=
DTYPE
)
features
=
extractor
(
video
)
assert
len
(
features
)
==
2
assert
len
(
features
[
0
][
1
])
==
139
assert
(
features
[
0
][
1
]
==
features
[
-
1
][
1
]).
all
()
assert
(
features
[
0
][
1
][
0
]
-
2.7748559659812599e-05
)
<
0.000001
assert
(
features
[
0
][
1
][
-
1
]
-
0.16410418866596271
)
<
0.000001
#==============================================================================
def
convert_array_to_list_of_frame_cont
(
data
):
"""
...
...
@@ -350,6 +419,57 @@ def test_video_svm_pad_algorithm():
assert
precision
>
0.99
#==============================================================================
def
test_video_gmm_pad_algorithm
():
"""
Test the VideoGmmPadAlgorithm algorithm.
"""
random
.
seed
(
7
)
N
=
1000
mu
=
1
sigma
=
1
real_array
=
np
.
transpose
(
np
.
vstack
([[
random
.
gauss
(
mu
,
sigma
)
for
_
in
range
(
N
)],
[
random
.
gauss
(
mu
,
sigma
)
for
_
in
range
(
N
)]])
)
mu
=
5
sigma
=
1
attack_array
=
np
.
transpose
(
np
.
vstack
([[
random
.
gauss
(
mu
,
sigma
)
for
_
in
range
(
N
)],
[
random
.
gauss
(
mu
,
sigma
)
for
_
in
range
(
N
)]])
)
real
=
convert_array_to_list_of_frame_cont
(
real_array
)
N_COMPONENTS
=
1
RANDOM_STATE
=
3
FRAME_LEVEL_SCORES_FLAG
=
True
algorithm
=
VideoGmmPadAlgorithm
(
n_components
=
N_COMPONENTS
,
random_state
=
RANDOM_STATE
,
frame_level_scores_flag
=
FRAME_LEVEL_SCORES_FLAG
)
# training_features[0] - training features for the REAL class.
real_array_converted
=
algorithm
.
convert_list_of_frame_cont_to_array
(
real
)
# output is array
assert
(
real_array
==
real_array_converted
).
all
()
# Train the GMM machine and get normalizers:
machine
,
features_mean
,
features_std
=
algorithm
.
train_gmm
(
real
=
real_array_converted
,
n_components
=
algorithm
.
n_components
,
random_state
=
algorithm
.
random_state
)
algorithm
.
machine
=
machine
algorithm
.
features_mean
=
features_mean
algorithm
.
features_std
=
features_std
scores_real
=
algorithm
.
project
(
real_array_converted
)
scores_attack
=
algorithm
.
project
(
attack_array
)
assert
(
np
.
min
(
scores_real
)
+
7.9423798970985917
)
<
0.000001
assert
(
np
.
max
(
scores_real
)
+
1.8380480068281055
)
<
0.000001
assert
(
np
.
min
(
scores_attack
)
+
38.831260843070098
)
<
0.000001
assert
(
np
.
max
(
scores_attack
)
+
5.3633030621521272
)
<
0.000001
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment