Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bob.bio.face
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
bob
bob.bio.face
Commits
8a6a7062
Commit
8a6a7062
authored
3 years ago
by
Tiago de Freitas Pereira
Browse files
Options
Downloads
Patches
Plain Diff
50 shades
parent
63fb09cc
No related branches found
No related tags found
1 merge request
!122
arcface update
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
notebooks/50-shades-of-face.ipynb
+23
-39
23 additions, 39 deletions
notebooks/50-shades-of-face.ipynb
with
23 additions
and
39 deletions
notebooks/50-shades-of-face.ipynb
+
23
−
39
View file @
8a6a7062
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
},
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count":
1
,
"execution_count":
6
,
"metadata": {},
"metadata": {},
"outputs": [],
"outputs": [],
"source": [
"source": [
...
@@ -29,18 +29,6 @@
...
@@ -29,18 +29,6 @@
"import os\n",
"import os\n",
"import scipy.spatial\n",
"import scipy.spatial\n",
"import bob.measure\n",
"import bob.measure\n",
"dask_client = None\n",
"\n",
"###\n",
"image_size = 112\n",
"\n",
"# eyes position in the vertical axis\n",
"# final position will be image_size/height_denominators\n",
"height_denominators = [4.5,4,3.5,3,2.8]\n",
"\n",
"# Eyes distance to be explored\n",
"eyes_distances = [30, 35, 40, 42,45,48]\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"output_path = \"./50-shades\"\n",
"output_path = \"./50-shades\"\n",
...
@@ -63,24 +51,14 @@
...
@@ -63,24 +51,14 @@
"\n"
"\n"
]
]
},
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setting up the grid\n",
"\n",
"If you want to run this on the cluster, don't forget to `SETSHELL grid` before running the cell below.\n"
]
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count":
2
,
"execution_count":
7
,
"metadata": {},
"metadata": {},
"outputs": [],
"outputs": [],
"source": [
"source": [
"# Starting dask client\n",
"# Starting dask client\n",
"\n",
"\n",
"\n",
"from dask.distributed import Client\n",
"from dask.distributed import Client\n",
"from bob.pipelines.distributed.sge import SGEMultipleQueuesCluster\n",
"from bob.pipelines.distributed.sge import SGEMultipleQueuesCluster\n",
"\n",
"\n",
...
@@ -100,13 +78,13 @@
...
@@ -100,13 +78,13 @@
},
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count":
3
,
"execution_count":
8
,
"metadata": {},
"metadata": {},
"outputs": [
"outputs": [
{
{
"data": {
"data": {
"application/vnd.jupyter.widget-view+json": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "
365e408f16e04de7ba0f709639b4ee8d
",
"model_id": "
bf5cb5839aaf46e8808e0c3ebc3abb88
",
"version_major": 2,
"version_major": 2,
"version_minor": 0
"version_minor": 0
},
},
...
@@ -123,6 +101,12 @@
...
@@ -123,6 +101,12 @@
"\n",
"\n",
"annotation_type = \"eyes-center\"\n",
"annotation_type = \"eyes-center\"\n",
"fixed_positions = None\n",
"fixed_positions = None\n",
"height_denominators = [4.5,4,3.5,3]\n",
"eyes_distances = [30, 35, 40, 42,45,48]\n",
"\n",
"#height_denominators = [3,4]\n",
"#eyes_distances = [42,43]\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"def get_cropers(final_size = 112,\n",
"def get_cropers(final_size = 112,\n",
...
@@ -195,8 +179,7 @@
...
@@ -195,8 +179,7 @@
"\n",
"\n",
"subplot_shape = (int((len(eyes_distances)*len(height_denominators))/len(height_denominators)),len(height_denominators))\n",
"subplot_shape = (int((len(eyes_distances)*len(height_denominators))/len(height_denominators)),len(height_denominators))\n",
"\n",
"\n",
"transformers = get_cropers(final_size=image_size,\n",
"transformers = get_cropers(height_denominators=height_denominators,\n",
" height_denominators=height_denominators,\n",
" eyes_distances=eyes_distances)\n",
" eyes_distances=eyes_distances)\n",
"\n",
"\n",
"plot_faces(transformers, database, subplot_shape)\n"
"plot_faces(transformers, database, subplot_shape)\n"
...
@@ -208,12 +191,12 @@
...
@@ -208,12 +191,12 @@
"source": [
"source": [
"## Run vanilla biometrics\n",
"## Run vanilla biometrics\n",
"\n",
"\n",
"Here we are running Vanilla Biometrics several times and collecting the `1-FNMR@FMR=0.
0
01` and plotting."
"Here we are running Vanilla Biometrics several times and collecting the `1-FNMR@FMR=0.01` and plotting."
]
]
},
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count":
4
,
"execution_count":
9
,
"metadata": {},
"metadata": {},
"outputs": [
"outputs": [
{
{
...
@@ -243,19 +226,13 @@
...
@@ -243,19 +226,13 @@
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n"
"There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n"
]
]
},
},
{
{
"data": {
"data": {
"application/vnd.jupyter.widget-view+json": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "
6556949a548c4f1da86073f0a9351109
",
"model_id": "
fe0dca81b68641348a28c2e165519e92
",
"version_major": 2,
"version_major": 2,
"version_minor": 0
"version_minor": 0
},
},
...
@@ -313,9 +290,9 @@
...
@@ -313,9 +290,9 @@
" \n",
" \n",
" scores_dev = os.path.join(output_path, \"scores-dev\")\n",
" scores_dev = os.path.join(output_path, \"scores-dev\")\n",
" \n",
" \n",
" # Picking FNMR@FAR=0.
0
01\n",
" # Picking FNMR@FAR=0.01\n",
" neg, pos = bob.bio.base.score.load.split_four_column(scores_dev)\n",
" neg, pos = bob.bio.base.score.load.split_four_column(scores_dev)\n",
" far_thres = bob.measure.far_threshold(neg, pos, 0.
0
01)\n",
" far_thres = bob.measure.far_threshold(neg, pos, 0.01)\n",
" fpr,fnr = bob.measure.fprfnr(neg, pos, far_thres)\n",
" fpr,fnr = bob.measure.fprfnr(neg, pos, far_thres)\n",
" fnmr_1 = round(1-fnr,2)\n",
" fnmr_1 = round(1-fnr,2)\n",
" fnmrs.append(fnmr_1)\n",
" fnmrs.append(fnmr_1)\n",
...
@@ -333,6 +310,13 @@
...
@@ -333,6 +310,13 @@
"# Shutting down client\n",
"# Shutting down client\n",
"dask_client.shutdown()"
"dask_client.shutdown()"
]
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
}
],
],
"metadata": {
"metadata": {
...
...
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
# 50 Shades of face
# 50 Shades of face
In this notebook we aim to evalute the impact of different face crops in FR baselines.
In this notebook we aim to evalute the impact of different face crops in FR baselines.
For that we are relying on the MOBIO dataset, which is not ideal, but it's short enough to run a bunch of experiments.
For that we are relying on the MOBIO dataset, which is not ideal, but it's short enough to run a bunch of experiments.
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
# Fetching resources
# Fetching resources
import
bob.bio.base
import
bob.bio.base
from
bob.bio.base.pipelines.vanilla_biometrics
import
execute_vanilla_biometrics
from
bob.bio.base.pipelines.vanilla_biometrics
import
execute_vanilla_biometrics
from
bob.bio.base.pipelines.vanilla_biometrics
import
Distance
from
bob.bio.base.pipelines.vanilla_biometrics
import
Distance
from
bob.bio.base.pipelines.vanilla_biometrics
import
VanillaBiometricsPipeline
from
bob.bio.base.pipelines.vanilla_biometrics
import
VanillaBiometricsPipeline
from
bob.bio.face.database
import
MobioDatabase
from
bob.bio.face.database
import
MobioDatabase
from
bob.bio.face.preprocessor
import
FaceCrop
from
bob.bio.face.preprocessor
import
FaceCrop
from
bob.extension
import
rc
from
bob.extension
import
rc
from
bob.pipelines
import
wrap
from
bob.pipelines
import
wrap
import
os
import
os
import
scipy.spatial
import
scipy.spatial
import
bob.measure
import
bob.measure
dask_client
=
None
###
image_size
=
112
# eyes position in the vertical axis
# final position will be image_size/height_denominators
height_denominators
=
[
4.5
,
4
,
3.5
,
3
,
2.8
]
# Eyes distance to be explored
eyes_distances
=
[
30
,
35
,
40
,
42
,
45
,
48
]
output_path
=
"
./50-shades
"
output_path
=
"
./50-shades
"
######## CHANGE YOUR FEATURE EXTRACTOR HERE
######## CHANGE YOUR FEATURE EXTRACTOR HERE
from
bob.bio.face.embeddings.mxnet_models
import
ArcFaceInsightFace
from
bob.bio.face.embeddings.mxnet_models
import
ArcFaceInsightFace
extractor_transformer
=
wrap
([
"
sample
"
],
ArcFaceInsightFace
())
extractor_transformer
=
wrap
([
"
sample
"
],
ArcFaceInsightFace
())
### CHANGE YOUR MATCHER HERE
### CHANGE YOUR MATCHER HERE
algorithm
=
Distance
(
distance_function
=
scipy
.
spatial
.
distance
.
cosine
,
is_distance_function
=
True
)
algorithm
=
Distance
(
distance_function
=
scipy
.
spatial
.
distance
.
cosine
,
is_distance_function
=
True
)
##### CHANGE YOUR DATABASE HERE
##### CHANGE YOUR DATABASE HERE
database
=
MobioDatabase
(
protocol
=
"
mobile0-male
"
)
database
=
MobioDatabase
(
protocol
=
"
mobile0-male
"
)
sample
=
database
.
references
()[
0
][
0
]
sample
=
database
.
references
()[
0
][
0
]
import
matplotlib.pyplot
as
plt
import
matplotlib.pyplot
as
plt
import
bob.io.image
import
bob.io.image
```
```
%% Cell type:markdown id: tags:
## Setting up the grid
If you want to run this on the cluster, don't forget to
`SETSHELL grid`
before running the cell below.
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
# Starting dask client
# Starting dask client
from
dask.distributed
import
Client
from
dask.distributed
import
Client
from
bob.pipelines.distributed.sge
import
SGEMultipleQueuesCluster
from
bob.pipelines.distributed.sge
import
SGEMultipleQueuesCluster
cluster
=
SGEMultipleQueuesCluster
(
min_jobs
=
1
)
cluster
=
SGEMultipleQueuesCluster
(
min_jobs
=
1
)
dask_client
=
Client
(
cluster
)
dask_client
=
Client
(
cluster
)
```
```
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
## Running different face crops
## Running different face crops
Here we are varying the
`eyes_distances`
and the ration
`fig_size/height_denominators`
,
Here we are varying the
`eyes_distances`
and the ration
`fig_size/height_denominators`
,
generating the transformers and plotting the outcome
generating the transformers and plotting the outcome
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
%
matplotlib
widget
%
matplotlib
widget
annotation_type
=
"
eyes-center
"
annotation_type
=
"
eyes-center
"
fixed_positions
=
None
fixed_positions
=
None
height_denominators
=
[
4.5
,
4
,
3.5
,
3
]
eyes_distances
=
[
30
,
35
,
40
,
42
,
45
,
48
]
#height_denominators = [3,4]
#eyes_distances = [42,43]
def
get_cropers
(
final_size
=
112
,
def
get_cropers
(
final_size
=
112
,
height_denominators
=
[
5
,
4
,
3
,
2
],
height_denominators
=
[
5
,
4
,
3
,
2
],
eyes_distances
=
[
30
,
35
,
40
,
42
,
45
]):
eyes_distances
=
[
30
,
35
,
40
,
42
,
45
]):
left_eye_offset
=
1.49
left_eye_offset
=
1.49
transformers
=
[]
transformers
=
[]
for
e
in
eyes_distances
:
for
e
in
eyes_distances
:
for
h
in
height_denominators
:
for
h
in
height_denominators
:
right_eye_offset
=
(
final_size
*
left_eye_offset
)
/
(
final_size
-
e
*
left_eye_offset
)
right_eye_offset
=
(
final_size
*
left_eye_offset
)
/
(
final_size
-
e
*
left_eye_offset
)
RIGHT_EYE_POS
=
(
final_size
/
h
,
final_size
/
right_eye_offset
)
RIGHT_EYE_POS
=
(
final_size
/
h
,
final_size
/
right_eye_offset
)
LEFT_EYE_POS
=
(
final_size
/
h
,
final_size
/
left_eye_offset
)
LEFT_EYE_POS
=
(
final_size
/
h
,
final_size
/
left_eye_offset
)
#RIGHT_EYE_POS = (final_size / 3.44, final_size / 3.02)
#RIGHT_EYE_POS = (final_size / 3.44, final_size / 3.02)
#LEFT_EYE_POS = (final_size / 3.44, final_size / 1.49)
#LEFT_EYE_POS = (final_size / 3.44, final_size / 1.49)
cropped_positions
=
{
cropped_positions
=
{
"
leye
"
:
LEFT_EYE_POS
,
"
leye
"
:
LEFT_EYE_POS
,
"
reye
"
:
RIGHT_EYE_POS
,
"
reye
"
:
RIGHT_EYE_POS
,
}
}
#print(cropped_positions)
#print(cropped_positions)
preprocessor_transformer
=
FaceCrop
(
cropped_image_size
=
(
112
,
112
),
preprocessor_transformer
=
FaceCrop
(
cropped_image_size
=
(
112
,
112
),
cropped_positions
=
cropped_positions
,
cropped_positions
=
cropped_positions
,
color_channel
=
'
rgb
'
,
color_channel
=
'
rgb
'
,
fixed_positions
=
fixed_positions
)
fixed_positions
=
fixed_positions
)
transform_extra_arguments
=
(
None
if
(
cropped_positions
is
None
or
fixed_positions
is
not
None
)
else
((
"
annotations
"
,
"
annotations
"
),))
transform_extra_arguments
=
(
None
if
(
cropped_positions
is
None
or
fixed_positions
is
not
None
)
else
((
"
annotations
"
,
"
annotations
"
),))
preprocessor_transformer
=
wrap
([
"
sample
"
],
preprocessor_transformer
,
transform_extra_arguments
=
transform_extra_arguments
)
preprocessor_transformer
=
wrap
([
"
sample
"
],
preprocessor_transformer
,
transform_extra_arguments
=
transform_extra_arguments
)
transformers
.
append
(
preprocessor_transformer
)
transformers
.
append
(
preprocessor_transformer
)
return
transformers
return
transformers
def
plot_faces
(
transformers
,
database
,
subplot_shape
,
fnmrs
=
None
):
def
plot_faces
(
transformers
,
database
,
subplot_shape
,
fnmrs
=
None
):
fig
,
axis
=
plt
.
subplots
(
subplot_shape
[
0
],
subplot_shape
[
1
])
fig
,
axis
=
plt
.
subplots
(
subplot_shape
[
0
],
subplot_shape
[
1
])
offset
=
0
offset
=
0
for
ax_h
in
axis
:
for
ax_h
in
axis
:
for
ax_w
in
ax_h
:
for
ax_w
in
ax_h
:
# Picking the first sample
# Picking the first sample
sample
=
database
.
references
()[
0
][
0
]
sample
=
database
.
references
()[
0
][
0
]
preprocessor_transformer
=
transformers
[
offset
]
preprocessor_transformer
=
transformers
[
offset
]
cropped
=
preprocessor_transformer
.
transform
([
sample
])[
0
]
cropped
=
preprocessor_transformer
.
transform
([
sample
])[
0
]
cropped
=
bob
.
io
.
image
.
to_matplotlib
(
cropped
.
data
).
astype
(
"
uint8
"
)
cropped
=
bob
.
io
.
image
.
to_matplotlib
(
cropped
.
data
).
astype
(
"
uint8
"
)
ax_w
.
imshow
(
cropped
)
ax_w
.
imshow
(
cropped
)
reye_y
=
round
(
preprocessor_transformer
.
estimator
.
cropped_positions
[
"
reye
"
][
0
],
2
)
reye_y
=
round
(
preprocessor_transformer
.
estimator
.
cropped_positions
[
"
reye
"
][
0
],
2
)
reye_x
=
round
(
preprocessor_transformer
.
estimator
.
cropped_positions
[
"
reye
"
][
1
],
2
)
reye_x
=
round
(
preprocessor_transformer
.
estimator
.
cropped_positions
[
"
reye
"
][
1
],
2
)
leye_y
=
round
(
preprocessor_transformer
.
estimator
.
cropped_positions
[
"
leye
"
][
0
],
2
)
leye_y
=
round
(
preprocessor_transformer
.
estimator
.
cropped_positions
[
"
leye
"
][
0
],
2
)
leye_x
=
round
(
preprocessor_transformer
.
estimator
.
cropped_positions
[
"
leye
"
][
1
],
2
)
leye_x
=
round
(
preprocessor_transformer
.
estimator
.
cropped_positions
[
"
leye
"
][
1
],
2
)
if
fnmrs
is
None
:
if
fnmrs
is
None
:
title
=
f
"
(
{
reye_y
}
,
{
reye_x
}
) - (
{
leye_y
}
,
{
leye_x
}
)
"
title
=
f
"
(
{
reye_y
}
,
{
reye_x
}
) - (
{
leye_y
}
,
{
leye_x
}
)
"
else
:
else
:
title
=
f
"
(
{
reye_y
}
,
{
reye_x
}
) - (
{
leye_y
}
,
{
leye_x
}
) =
{
fnmrs
[
offset
]
}
"
title
=
f
"
(
{
reye_y
}
,
{
reye_x
}
) - (
{
leye_y
}
,
{
leye_x
}
) =
{
fnmrs
[
offset
]
}
"
ax_w
.
set_title
(
f
"
{
title
}
"
,
fontsize
=
5
)
ax_w
.
set_title
(
f
"
{
title
}
"
,
fontsize
=
5
)
ax_w
.
axis
(
'
off
'
)
ax_w
.
axis
(
'
off
'
)
offset
+=
1
offset
+=
1
subplot_shape
=
(
int
((
len
(
eyes_distances
)
*
len
(
height_denominators
))
/
len
(
height_denominators
)),
len
(
height_denominators
))
subplot_shape
=
(
int
((
len
(
eyes_distances
)
*
len
(
height_denominators
))
/
len
(
height_denominators
)),
len
(
height_denominators
))
transformers
=
get_cropers
(
final_size
=
image_size
,
transformers
=
get_cropers
(
height_denominators
=
height_denominators
,
height_denominators
=
height_denominators
,
eyes_distances
=
eyes_distances
)
eyes_distances
=
eyes_distances
)
plot_faces
(
transformers
,
database
,
subplot_shape
)
plot_faces
(
transformers
,
database
,
subplot_shape
)
```
```
%% Output
%% Output
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
## Run vanilla biometrics
## Run vanilla biometrics
Here we are running Vanilla Biometrics several times and collecting the
`1-FNMR@FMR=0.
0
01`
and plotting.
Here we are running Vanilla Biometrics several times and collecting the
`1-FNMR@FMR=0.01`
and plotting.
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
"""
"""
vanilla_biometrics(
vanilla_biometrics(
pipeline,
pipeline,
database,
database,
dask_client,
dask_client,
groups,
groups,
output,
output,
write_metadata_scores,
write_metadata_scores,
checkpoint,
checkpoint,
dask_partition_size,
dask_partition_size,
dask_n_workers,
dask_n_workers,
)
)
"""
"""
from
sklearn.pipeline
import
make_pipeline
from
sklearn.pipeline
import
make_pipeline
write_metadata_scores
=
False
write_metadata_scores
=
False
checkpoint
=
False
checkpoint
=
False
dask_partition_size
=
None
dask_partition_size
=
None
dask_n_workers
=
15
dask_n_workers
=
15
### Preparing the pipeline
### Preparing the pipeline
fnmrs
=
[]
fnmrs
=
[]
for
t
in
transformers
:
for
t
in
transformers
:
# Chain the Transformers together
# Chain the Transformers together
transformer
=
make_pipeline
(
t
,
extractor_transformer
)
transformer
=
make_pipeline
(
t
,
extractor_transformer
)
# Assemble the Vanilla Biometric pipeline and execute
# Assemble the Vanilla Biometric pipeline and execute
pipeline
=
VanillaBiometricsPipeline
(
transformer
,
algorithm
)
pipeline
=
VanillaBiometricsPipeline
(
transformer
,
algorithm
)
execute_vanilla_biometrics
(
execute_vanilla_biometrics
(
pipeline
,
pipeline
,
database
,
database
,
dask_client
,
dask_client
,
[
"
dev
"
],
[
"
dev
"
],
output_path
,
output_path
,
write_metadata_scores
,
write_metadata_scores
,
checkpoint
,
checkpoint
,
dask_partition_size
,
dask_partition_size
,
dask_n_workers
,
dask_n_workers
,
allow_scoring_with_all_biometric_references
=
True
allow_scoring_with_all_biometric_references
=
True
)
)
scores_dev
=
os
.
path
.
join
(
output_path
,
"
scores-dev
"
)
scores_dev
=
os
.
path
.
join
(
output_path
,
"
scores-dev
"
)
# Picking FNMR@FAR=0.
0
01
# Picking FNMR@FAR=0.01
neg
,
pos
=
bob
.
bio
.
base
.
score
.
load
.
split_four_column
(
scores_dev
)
neg
,
pos
=
bob
.
bio
.
base
.
score
.
load
.
split_four_column
(
scores_dev
)
far_thres
=
bob
.
measure
.
far_threshold
(
neg
,
pos
,
0.
0
01
)
far_thres
=
bob
.
measure
.
far_threshold
(
neg
,
pos
,
0.01
)
fpr
,
fnr
=
bob
.
measure
.
fprfnr
(
neg
,
pos
,
far_thres
)
fpr
,
fnr
=
bob
.
measure
.
fprfnr
(
neg
,
pos
,
far_thres
)
fnmr_1
=
round
(
1
-
fnr
,
2
)
fnmr_1
=
round
(
1
-
fnr
,
2
)
fnmrs
.
append
(
fnmr_1
)
fnmrs
.
append
(
fnmr_1
)
plot_faces
(
transformers
,
database
,
subplot_shape
,
fnmrs
)
plot_faces
(
transformers
,
database
,
subplot_shape
,
fnmrs
)
```
```
%% Output
%% Output
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
# Shutting down client
# Shutting down client
dask_client
.
shutdown
()
dask_client
.
shutdown
()
```
```
%% Cell type:code id: tags:
```
python
```
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment