Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.ip.tensorflow_extractor
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
2
Issues
2
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.ip.tensorflow_extractor
Commits
e7aa5899
Commit
e7aa5899
authored
May 02, 2019
by
Amir MOHAMMADI
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add MTCNN (tensorflow) model
parent
c4e980f9
Pipeline
#29833
passed with stage
in 22 minutes and 24 seconds
Changes
15
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
440 additions
and
15 deletions
+440
-15
.gitignore
.gitignore
+3
-0
bob/ip/tensorflow_extractor/Extractor.py
bob/ip/tensorflow_extractor/Extractor.py
+2
-2
bob/ip/tensorflow_extractor/MTCNN.py
bob/ip/tensorflow_extractor/MTCNN.py
+125
-0
bob/ip/tensorflow_extractor/__init__.py
bob/ip/tensorflow_extractor/__init__.py
+3
-1
bob/ip/tensorflow_extractor/data/mtcnn/mtcnn.hdf5
bob/ip/tensorflow_extractor/data/mtcnn/mtcnn.hdf5
+0
-0
bob/ip/tensorflow_extractor/data/mtcnn/mtcnn.json
bob/ip/tensorflow_extractor/data/mtcnn/mtcnn.json
+188
-0
bob/ip/tensorflow_extractor/data/mtcnn/mtcnn.pb
bob/ip/tensorflow_extractor/data/mtcnn/mtcnn.pb
+0
-0
bob/ip/tensorflow_extractor/data/mtcnn/test_image.png
bob/ip/tensorflow_extractor/data/mtcnn/test_image.png
+0
-0
bob/ip/tensorflow_extractor/test.py
bob/ip/tensorflow_extractor/test.py
+41
-8
conda/meta.yaml
conda/meta.yaml
+4
-3
doc/conf.py
doc/conf.py
+1
-1
doc/face_detect.rst
doc/face_detect.rst
+18
-0
doc/index.rst
doc/index.rst
+1
-0
doc/plot/detect_faces_mtcnn.py
doc/plot/detect_faces_mtcnn.py
+53
-0
doc/py_api.rst
doc/py_api.rst
+1
-0
No files found.
.gitignore
View file @
e7aa5899
...
...
@@ -12,3 +12,6 @@ develop-eggs
sphinx
dist
bob/ip/tensorflow_extractor/data/FaceNet/
record.txt
build/
bob/ip/tensorflow_extractor/data/DR_GAN_model/
bob/ip/tensorflow_extractor/Extractor.py
View file @
e7aa5899
...
...
@@ -63,12 +63,12 @@ class Extractor(object):
Parameters
----------
image : numpy.array
image : numpy.
nd
array
Input Data
Returns
-------
numpy.array
numpy.
nd
array
The features.
"""
...
...
bob/ip/tensorflow_extractor/MTCNN.py
0 → 100644
View file @
e7aa5899
# code and model from https://github.com/blaueck/tf-mtcnn
import
pkg_resources
import
tensorflow
as
tf
import
multiprocessing
import
bob.io.image
MODEL_PATH
=
pkg_resources
.
resource_filename
(
__name__
,
"data/mtcnn/mtcnn.pb"
)
class
MTCNN
:
"""MTCNN v1 wrapper. See
https://kpzhang93.github.io/MTCNN_face_detection_alignment/index.html for more
details on MTCNN and see :ref:`bob.ip.tensorflow_extractor.face_detect` for an
example code.
Attributes
----------
factor : float
Factor is a trade-off between performance and speed.
min_size : int
Minimum face size to be detected.
thresholds : list
thresholds are a trade-off between false positives and missed detections.
"""
def
__init__
(
self
,
min_size
=
40
,
factor
=
0.709
,
thresholds
=
(
0.6
,
0.7
,
0.7
),
model_path
=
MODEL_PATH
,
):
self
.
min_size
=
min_size
self
.
factor
=
factor
self
.
thresholds
=
thresholds
graph
=
tf
.
Graph
()
with
graph
.
as_default
():
with
open
(
model_path
,
"rb"
)
as
f
:
graph_def
=
tf
.
GraphDef
.
FromString
(
f
.
read
())
tf
.
import_graph_def
(
graph_def
,
name
=
""
)
self
.
graph
=
graph
config
=
tf
.
ConfigProto
(
intra_op_parallelism_threads
=
multiprocessing
.
cpu_count
(),
inter_op_parallelism_threads
=
multiprocessing
.
cpu_count
(),
)
self
.
sess
=
tf
.
Session
(
graph
=
graph
,
config
=
config
)
def
detect
(
self
,
img
):
"""Detects all faces in the image.
Parameters
----------
img : numpy.ndarray
An RGB image in Bob format.
Returns
-------
tuple
A tuple of boxes, probabilities, and landmarks.
"""
# assuming img is Bob format and RGB
assert
img
.
shape
[
0
]
==
3
,
img
.
shape
# network expects BGR opencv format
img
=
bob
.
io
.
image
.
to_matplotlib
(
img
)
img
=
img
[
...
,
::
-
1
]
feeds
=
{
self
.
graph
.
get_operation_by_name
(
"input"
)
.
outputs
[
0
]:
img
,
self
.
graph
.
get_operation_by_name
(
"min_size"
)
.
outputs
[
0
]:
self
.
min_size
,
self
.
graph
.
get_operation_by_name
(
"thresholds"
)
.
outputs
[
0
]:
self
.
thresholds
,
self
.
graph
.
get_operation_by_name
(
"factor"
)
.
outputs
[
0
]:
self
.
factor
,
}
fetches
=
[
self
.
graph
.
get_operation_by_name
(
"prob"
)
.
outputs
[
0
],
self
.
graph
.
get_operation_by_name
(
"landmarks"
)
.
outputs
[
0
],
self
.
graph
.
get_operation_by_name
(
"box"
)
.
outputs
[
0
],
]
prob
,
landmarks
,
box
=
self
.
sess
.
run
(
fetches
,
feeds
)
return
box
,
prob
,
landmarks
def
annotations
(
self
,
img
):
"""Detects all faces in the image
Parameters
----------
img : numpy.ndarray
An RGB image in Bob format.
Returns
-------
list
A list of annotations. Annotations are dictionaries that contain the
following keys: ``topleft``, ``bottomright``, ``reye``, ``leye``, ``nose``,
``right_of_mouth``, ``left_of_mouth``, and ``quality``.
"""
boxes
,
scores
,
landmarks
=
self
.
detect
(
img
)
annots
=
[]
for
box
,
prob
,
lm
in
zip
(
boxes
,
scores
,
landmarks
):
topleft
=
box
[
0
],
box
[
1
]
bottomright
=
box
[
2
],
box
[
3
]
right_eye
=
lm
[
0
],
lm
[
5
]
left_eye
=
lm
[
1
],
lm
[
6
]
nose
=
lm
[
2
],
lm
[
7
]
right_of_mouth
=
lm
[
3
],
lm
[
8
]
left_of_mouth
=
lm
[
4
],
lm
[
9
]
annots
.
append
(
{
"topleft"
:
topleft
,
"bottomright"
:
bottomright
,
"reye"
:
right_eye
,
"leye"
:
left_eye
,
"nose"
:
nose
,
"right_of_mouth"
:
right_of_mouth
,
"left_of_mouth"
:
left_of_mouth
,
"quality"
:
prob
,
}
)
return
annots
def
__call__
(
self
,
img
):
"""Wrapper for the annotations method.
"""
return
self
.
annotations
(
img
)
bob/ip/tensorflow_extractor/__init__.py
View file @
e7aa5899
...
...
@@ -37,6 +37,7 @@ from .Extractor import Extractor
from
.FaceNet
import
FaceNet
from
.DrGanMSU
import
DrGanMSUExtractor
from
.Vgg16
import
VGGFace
,
vgg_16
from
.MTCNN
import
MTCNN
# gets sphinx autodoc done right - don't remove it
...
...
@@ -59,7 +60,8 @@ __appropriate__(
Extractor
,
FaceNet
,
DrGanMSUExtractor
,
VGGFace
VGGFace
,
MTCNN
,
)
# gets sphinx autodoc done right - don't remove it
...
...
bob/ip/tensorflow_extractor/data/mtcnn/mtcnn.hdf5
0 → 100644
View file @
e7aa5899
File added
bob/ip/tensorflow_extractor/data/mtcnn/mtcnn.json
0 → 100644
View file @
e7aa5899
[
{
"topleft"
:
[
40.24329
,
113.20566
],
"bottomright"
:
[
106.32423
,
162.45758
],
"reye"
:
[
67.90105
,
124.33353
],
"leye"
:
[
64.25375
,
145.77344
],
"nose"
:
[
81.764984
,
136.25734
],
"right_of_mouth"
:
[
88.05158
,
128.52052
],
"left_of_mouth"
:
[
84.64868
,
150.7494
],
"quality"
:
0.99999917
},
{
"topleft"
:
[
34.18492
,
427.5858
],
"bottomright"
:
[
94.81634
,
471.9476
],
"reye"
:
[
56.84209
,
446.76434
],
"leye"
:
[
64.74565
,
462.9847
],
"nose"
:
[
71.690926
,
454.36282
],
"right_of_mouth"
:
[
75.266556
,
438.5781
],
"left_of_mouth"
:
[
82.32741
,
454.99423
],
"quality"
:
0.99981314
},
{
"topleft"
:
[
69.87796
,
31.797615
],
"bottomright"
:
[
123.43042
,
78.04486
],
"reye"
:
[
96.72684
,
43.90791
],
"leye"
:
[
89.42775
,
61.957954
],
"nose"
:
[
104.02164
,
56.380474
],
"right_of_mouth"
:
[
111.99057
,
49.277725
],
"left_of_mouth"
:
[
104.45787
,
69.476105
],
"quality"
:
0.9994398
},
{
"topleft"
:
[
105.99489
,
238.27567
],
"bottomright"
:
[
159.2981
,
280.56006
],
"reye"
:
[
125.97672
,
249.38193
],
"leye"
:
[
127.117195
,
268.7624
],
"nose"
:
[
138.73158
,
257.6499
],
"right_of_mouth"
:
[
142.67319
,
246.85234
],
"left_of_mouth"
:
[
143.5627
,
267.6805
],
"quality"
:
0.99918956
},
{
"topleft"
:
[
48.377903
,
321.892
],
"bottomright"
:
[
110.95402
,
367.87064
],
"reye"
:
[
73.381096
,
334.53403
],
"leye"
:
[
76.274086
,
355.40384
],
"nose"
:
[
86.6857
,
344.3223
],
"right_of_mouth"
:
[
94.80564
,
331.12646
],
"left_of_mouth"
:
[
96.63391
,
351.96518
],
"quality"
:
0.9987685
},
{
"topleft"
:
[
115.29803
,
159.48656
],
"bottomright"
:
[
172.41876
,
205.56857
],
"reye"
:
[
141.30688
,
171.35336
],
"leye"
:
[
137.49718
,
191.17722
],
"nose"
:
[
151.32994
,
182.92662
],
"right_of_mouth"
:
[
159.72272
,
175.2344
],
"left_of_mouth"
:
[
156.25536
,
193.2938
],
"quality"
:
0.99671644
}
]
bob/ip/tensorflow_extractor/data/mtcnn/mtcnn.pb
0 → 100644
View file @
e7aa5899
File added
bob/ip/tensorflow_extractor/data/mtcnn/test_image.png
0 → 100644
View file @
e7aa5899
192 KB
bob/ip/tensorflow_extractor/test.py
View file @
e7aa5899
import
bob.io.base
import
bob.io.image
from
bob.io.base.test_utils
import
datafile
import
bob.ip.tensorflow_extractor
import
tensorflow
as
tf
import
pkg_resources
import
numpy
numpy
.
random
.
seed
(
10
)
import
json
import
os
numpy
.
random
.
seed
(
10
)
slim
=
tf
.
contrib
.
slim
from
.
import
scratch_network
...
...
@@ -15,8 +18,9 @@ from . import scratch_network
def
test_output
():
# Loading MNIST model
filename
=
os
.
path
.
join
(
pkg_resources
.
resource_filename
(
__name__
,
'data'
),
'model.ckp'
)
filename
=
os
.
path
.
join
(
pkg_resources
.
resource_filename
(
__name__
,
"data"
),
"model.ckp"
)
inputs
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
None
,
28
,
28
,
1
))
# Testing the last output
...
...
@@ -41,23 +45,52 @@ def test_output():
def
test_facenet
():
from
bob.ip.tensorflow_extractor
import
FaceNet
extractor
=
FaceNet
()
data
=
numpy
.
random
.
rand
(
3
,
160
,
160
)
.
astype
(
"uint8"
)
output
=
extractor
(
data
)
assert
output
.
size
==
128
,
output
.
shape
def
test_drgan
():
from
bob.ip.tensorflow_extractor
import
DrGanMSUExtractor
extractor
=
DrGanMSUExtractor
()
data
=
numpy
.
random
.
rand
(
3
,
96
,
96
)
.
astype
(
"uint8"
)
output
=
extractor
(
data
)
assert
output
.
size
==
320
,
output
.
shape
def
test_vgg16
():
pass
#from bob.ip.tensorflow_extractor import VGGFace
#extractor = VGGFace()
#data = numpy.random.rand(3, 224, 224).astype("uint8")
#output = extractor(data)
#assert output.size == 4096, output.shape
# from bob.ip.tensorflow_extractor import VGGFace
# extractor = VGGFace()
# data = numpy.random.rand(3, 224, 224).astype("uint8")
# output = extractor(data)
# assert output.size == 4096, output.shape
def
test_mtcnn
():
test_image
=
datafile
(
"mtcnn/test_image.png"
,
__name__
)
ref_numbers
=
datafile
(
"mtcnn/mtcnn.hdf5"
,
__name__
)
ref_annots
=
datafile
(
"mtcnn/mtcnn.json"
,
__name__
)
from
bob.ip.tensorflow_extractor
import
MTCNN
mtcnn
=
MTCNN
()
img
=
bob
.
io
.
base
.
load
(
test_image
)
bbox
,
prob
,
landmarks
=
mtcnn
.
detect
(
img
)
with
bob
.
io
.
base
.
HDF5File
(
ref_numbers
,
"r"
)
as
f
:
ref_bbox
=
f
[
"bbox"
]
ref_scores
=
f
[
"scores"
]
ref_landmarks
=
f
[
"landmarks"
]
assert
numpy
.
allclose
(
bbox
,
ref_bbox
),
(
bbox
,
ref_bbox
)
assert
numpy
.
allclose
(
prob
,
ref_scores
),
(
prob
,
ref_scores
)
assert
numpy
.
allclose
(
landmarks
,
ref_landmarks
),
(
landmarks
,
ref_landmarks
)
annots
=
mtcnn
.
annotations
(
img
)
ref_annots
=
json
.
load
(
open
(
ref_annots
))
for
a
,
aref
in
zip
(
annots
,
ref_annots
):
for
k
,
v
in
a
.
items
():
vref
=
aref
[
k
]
assert
numpy
.
allclose
(
v
,
vref
)
conda/meta.yaml
View file @
e7aa5899
...
...
@@ -25,12 +25,14 @@ requirements:
-
bob.db.mnist
-
bob.ip.color
-
six {{ six }}
-
tensorflow {{ tensorflow }}
-
numpy {{ numpy }}
run
:
-
python
-
setuptools
-
scipy
-
six
-
tensorflow >=1.2.1
-
{{
pin_compatible('tensorflow')
}}
-
{{
pin_compatible('numpy')
}}
test
:
imports
:
...
...
@@ -47,7 +49,6 @@ test:
-
coverage
-
sphinx
-
sphinx_rtd_theme
-
bob.io.image
-
bob.db.atnt
-
matplotlib
-
gridtk
...
...
doc/conf.py
View file @
e7aa5899
...
...
@@ -36,7 +36,7 @@ else:
extensions
.
append
(
'sphinx.ext.pngmath'
)
# Be picky about warnings
nitpicky
=
Fals
e
nitpicky
=
Tru
e
# Ignores stuff we can't easily resolve on other project's sphinx manuals
nitpick_ignore
=
[]
...
...
doc/face_detect.rst
0 → 100644
View file @
e7aa5899
.. _bob.ip.tensorflow_extractor.face_detect:
============================
Face detection using MTCNN
============================
This package comes with a wrapper around the MTCNN (v1) face detector. See
https://kpzhang93.github.io/MTCNN_face_detection_alignment/index.html for more
information on MTCNN. The model is directly converted from the caffe model using code in
https://github.com/blaueck/tf-mtcnn
See below for an example on how to use
:any:`bob.ip.tensorflow_extractor.MTCNN`:
.. plot:: plot/detect_faces_mtcnn.py
:include-source: True
doc/index.rst
View file @
e7aa5899
...
...
@@ -16,4 +16,5 @@ Tensorflow http://tensorflow.org/
:maxdepth: 2
guide
face_detect
py_api
doc/plot/detect_faces_mtcnn.py
0 → 100644
View file @
e7aa5899
from
bob.io.image
import
imshow
from
bob.io.base
import
load
from
bob.io.base.test_utils
import
datafile
from
bob.ip.tensorflow_extractor
import
MTCNN
import
matplotlib.pyplot
as
plt
from
matplotlib.patches
import
Rectangle
,
Circle
# load colored test image
color_image
=
load
(
datafile
(
"mtcnn/test_image.png"
,
"bob.ip.tensorflow_extractor"
))
# detect all face
detector
=
MTCNN
()
detections
=
detector
(
color_image
)
imshow
(
color_image
)
plt
.
axis
(
"off"
)
for
annotations
in
detections
:
topleft
=
annotations
[
"topleft"
]
bottomright
=
annotations
[
"bottomright"
]
size
=
bottomright
[
0
]
-
topleft
[
0
],
bottomright
[
1
]
-
topleft
[
1
]
# draw bounding boxes
plt
.
gca
()
.
add_patch
(
Rectangle
(
topleft
[::
-
1
],
size
[
1
],
size
[
0
],
edgecolor
=
"b"
,
facecolor
=
"none"
,
linewidth
=
2
,
)
)
# draw face landmarks
for
key
,
color
in
(
(
"reye"
,
"r"
),
(
"leye"
,
"g"
),
(
"nose"
,
"b"
),
(
"right_of_mouth"
,
"k"
),
(
"left_of_mouth"
,
"w"
),
):
plt
.
gca
()
.
add_patch
(
Circle
(
annotations
[
key
][::
-
1
],
radius
=
2
,
edgecolor
=
color
,
facecolor
=
"none"
,
linewidth
=
2
,
)
)
# show quality of detections
plt
.
text
(
topleft
[
1
],
topleft
[
0
],
round
(
annotations
[
"quality"
],
3
),
color
=
"b"
,
fontsize
=
14
)
doc/py_api.rst
View file @
e7aa5899
...
...
@@ -9,6 +9,7 @@ Classes
.. autosummary::
bob.ip.tensorflow_extractor.Extractor
bob.ip.tensorflow_extractor.FaceNet
bob.ip.tensorflow_extractor.MTCNN
Detailed API
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment