First commit

parents
Pipeline #12362 failed with stages
in 5 minutes and 56 seconds
*~
*.swp
*.pyc
bin
eggs
parts
.installed.cfg
.mr.developer.cfg
*.egg-info
src
develop-eggs
sphinx
dist
# This build file heavily uses template features from YAML so it is generic
# enough for any Bob project. Don't modify it unless you know what you're
# doing.
# Definition of our build pipeline
stages:
- build
- test
- docs
- wheels
- deploy
# ---------
# Templates
# ---------
# Template for the build stage
# Needs to run on all supported architectures, platforms and python versions
.build_template: &build_job
stage: build
before_script:
- git clean -ffdx
- mkdir _ci
- curl --silent "https://gitlab.idiap.ch/bob/bob.admin/raw/master/gitlab/install.sh" > _ci/install.sh
- chmod 755 _ci/install.sh
- ./_ci/install.sh _ci #updates
- ./_ci/before_build.sh
script:
- ./_ci/build.sh
after_script:
- ./_ci/after_build.sh
artifacts:
expire_in: 1 week
paths:
- _ci/
- dist/
- sphinx/
# Template for the test stage - re-installs from uploaded wheels
# Needs to run on all supported architectures, platforms and python versions
.test_template: &test_job
stage: test
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_test.sh
script:
- ./_ci/test.sh
after_script:
- ./_ci/after_test.sh
# Template for the wheel uploading stage
# Needs to run against one supported architecture, platform and python version
.wheels_template: &wheels_job
stage: wheels
environment: intranet
only:
- master
- /^v\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_wheels.sh
script:
- ./_ci/wheels.sh
after_script:
- ./_ci/after_wheels.sh
# Template for (latest) documentation upload stage
# Only one real job needs to do this
.docs_template: &docs_job
stage: docs
environment: intranet
only:
- master
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_docs.sh
script:
- ./_ci/docs.sh
after_script:
- ./_ci/after_docs.sh
# Template for the deployment stage - re-installs from uploaded wheels
# Needs to run on a single architecture only
# Will deploy your package to PyPI and other required services
# Only runs for tags
.deploy_template: &deploy_job
stage: deploy
environment: internet
only:
- /^v\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
except:
- branches
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_deploy.sh
script:
- ./_ci/deploy.sh
after_script:
- ./_ci/after_deploy.sh
# -------------
# Build Targets
# -------------
# Linux + Python 2.7: Builds, tests, uploads wheel and deploys (if needed)
build_linux_27:
<<: *build_job
variables: &linux_27_build_variables
PYTHON_VERSION: "2.7"
WHEEL_TAG: "py27"
tags:
- conda-linux
test_linux_27:
<<: *test_job
variables: *linux_27_build_variables
dependencies:
- build_linux_27
tags:
- conda-linux
wheels_linux_27:
<<: *wheels_job
variables: *linux_27_build_variables
dependencies:
- build_linux_27
tags:
- conda-linux
deploy_linux_27:
<<: *deploy_job
variables: *linux_27_build_variables
dependencies:
- build_linux_27
tags:
- conda-linux
# Linux + Python 3.5: Builds, tests and uploads wheel
build_linux_35:
<<: *build_job
variables: &linux_35_build_variables
PYTHON_VERSION: "3.5"
WHEEL_TAG: "py3"
tags:
- conda-linux
test_linux_35:
<<: *test_job
variables: *linux_35_build_variables
dependencies:
- build_linux_35
tags:
- conda-linux
wheels_linux_35:
<<: *wheels_job
variables: *linux_35_build_variables
dependencies:
- build_linux_35
tags:
- conda-linux
docs_linux_35:
<<: *docs_job
variables: *linux_35_build_variables
dependencies:
- build_linux_35
tags:
- conda-linux
# Linux + Python 3.6: Builds and tests
build_linux_36:
<<: *build_job
variables: &linux_36_build_variables
PYTHON_VERSION: "3.6"
WHEEL_TAG: "py3"
tags:
- conda-linux
test_linux_36:
<<: *test_job
variables: *linux_36_build_variables
dependencies:
- build_linux_36
tags:
- conda-linux
This diff is collapsed.
include README.rst bootstrap-buildout.py buildout.cfg COPYING version.txt requirements.txt
recursive-include doc *.py *.rst
recursive-include bob *.*
.. vim: set fileencoding=utf-8 :
.. Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
.. Fri 17 Jul 02:49:53 2016 CEST
.. image:: http://img.shields.io/badge/docs-stable-yellow.svg
:target: https://www.idiap.ch/software/bob/docs/bob/bob.ip.mtcnn/stable/index.html
.. image:: http://img.shields.io/badge/docs-latest-orange.svg
:target: https://www.idiap.ch/software/bob/docs/bob/bob.ip.mtcnn/master/index.html
.. image:: https://gitlab.idiap.ch/bob/bob.ip.mtcnn/badges/master/build.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.mtcnn/commits/master
.. image:: https://gitlab.idiap.ch/bob/bob.ip.mtcnn/badges/master/coverage.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.mtcnn/commits/master
.. image:: https://img.shields.io/badge/gitlab-project-0000c0.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.mtcnn
.. image:: http://img.shields.io/pypi/v/bob.ip.mtcnn.svg
:target: https://pypi.python.org/pypi/bob.ip.mtcnn
=====================================================
Bob interface for MTCNN face and landmark detection
=====================================================
This package wraps the MTCNN face and landmark detection from the paper
@ARTICLE{7553523,
author={K. Zhang and Z. Zhang and Z. Li and Y. Qiao},
journal={IEEE Signal Processing Letters},
title={Joint Face Detection and Alignment Using Multitask Cascaded Convolutional Networks},
year={2016},
volume={23},
number={10},
pages={1499-1503},
keywords={Benchmark testing;Computer architecture;Convolution;Detectors;Face;Face detection;Training;Cascaded convolutional neural network (CNN);face alignment;face detection},
doi={10.1109/LSP.2016.2603342},
ISSN={1070-9908},
month={Oct},}
Installation
------------
Complete Bob's `installation`_ instructions. Then, to install this package,
run::
$ conda install bob.ip.mtcnn
Documentation
-------------
For further documentation on this package, please read the `Latest Version <https://www.idiap.ch/software/bob/docs/bioidiap/bob.ip.mtcnn/master/index.html>`_ of the documentation.
.. _bob: https://www.idiap.ch/software/bob
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import numpy
import os
import bob.core
logger = bob.core.log.setup("bob.ip.mtcnn")
bob.core.log.set_verbosity_level(logger, 3)
import os
import caffe
import bob.ip.base
from bob.ip.facedetect import BoundingBox
from .legacy import detect_face
from .utils import bob_to_dlib_image_convertion, rectangle2bounding_box2
class FaceDetector(object):
"""
Detects face and 5 landmarks using the MTCNN (https://github.com/kpzhang93/MTCNN_face_detection_alignment) from the paper.
Zhang, Kaipeng, et al. "Joint face detection and alignment using multitask cascaded convolutional networks." IEEE Signal Processing Letters 23.10 (2016): 1499-1503.
"""
def __init__(self):
"""
Load the caffe models
"""
caffe_base_path = FaceDetector.get_mtcnn_model_path()
# Default value from the example
self.minsize = 20
self.threshold = [0.6, 0.7, 0.7]
self.factor = 0.709
self.fastresize = False
# Loading the models
caffe.set_mode_cpu()
self.p_net = caffe.Net(os.path.join(caffe_base_path, "det1.prototxt"), os.path.join(caffe_base_path, "det1.caffemodel"), caffe.TEST)
self.r_net = caffe.Net(os.path.join(caffe_base_path, "det2.prototxt"), os.path.join(caffe_base_path, "det2.caffemodel"), caffe.TEST)
self.o_net = caffe.Net(os.path.join(caffe_base_path, "det3.prototxt"), os.path.join(caffe_base_path, "det3.caffemodel"), caffe.TEST)
def _convert_list_to_landmarks(self, points):
"""
Convert the list to 10 landmarks to a dictionary with the points
"""
landmarks = []
possible_landmarks = ['reye', 'leye', 'nose', 'mouthleft', 'mouthright']
for i in range(points.shape[0]):
l = dict()
for offset, p in zip(range(len(possible_landmarks)), possible_landmarks):
l[p] = ( int(points[i][offset+5]), int(points[i][offset]) )
landmarks.append(l)
return landmarks
def detect_all_faces(self, image, return_bob_bb = True):
"""
Detect all the faces with its respective landmarks, if any, in a COLORED image
**Parameters**
image: numpy array with color image [c, w, h]
return_bob_bb: if true will return the faces wrapped using py:class:`bob.ip.facedetect.BoundingBox`
**Returns**
Returns two lists; the first on contains the bounding boxes with the detected faces and the second one
contains list with the faces landmarks. The CNN returns 5 facial landmarks
(leye, reye, nose, mouthleft, mouthright). If there's no face, `None` will be returned
"""
assert image is not None
if len(image.shape) !=3:
raise ValueError("Only color images is supported")
bb, landmarks = detect_face(bob_to_dlib_image_convertion(image), self.minsize, self.p_net, self.r_net, self.o_net, self.threshold, self.fastresize, self.factor)
# If there's no face, return none
if len(bb) == 0:
return None, None
if return_bob_bb:
bb = rectangle2bounding_box2(bb)
return bb, self._convert_list_to_landmarks(landmarks)
def detect_single_face(self, image):
"""
Returns the biggest face in a COLORED image, if any.
**Parameters**
image: numpy array with color image [c, w, h]
return_bob_bb: if true will return the faces wrapped using py:class:`bob.ip.facedetect.BoundingBox`
**Returns**
The face bounding box and its respective 5 landmarks (leye, reye, nose, mouthleft, mouthright).
If there's no face, `None` will be returned
"""
faces, landmarks = self.detect_all_faces(image)
# Return None if
if faces is None:
return None, None
index = numpy.argmax([(f.bottomright[0] - f.topleft[0]) * (f.bottomright[1] - f.topleft[1]) for f in faces])
return faces[index], landmarks[index]
def detect_crop_align(self, image, final_image_size=(160, 160) ):
"""
Detects the biggest face and crop it based in the eyes location
using py:class:`bob.ip.base.FaceEyesNorm`.
Final eyes location was inspired here: https://gitlab.idiap.ch/bob/bob.bio.caffe_face/blob/master/bob/bio/caffe_face/config/preprocessor/vgg_preprocessor.py
**Parameters**
image: numpy array with color image [c, w, h]
final_image_size: Image dimensions [w, h]
**Returns**
The cropped image. If there's no face, `None` will be returned
"""
face, landmark = self.detect_single_face(image)
if face is None:
return None
CROPPED_IMAGE_WIDTH = final_image_size[0]
CROPPED_IMAGE_HEIGHT = final_image_size[1]
# final image position w.r.t the image size
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT / 3.44, CROPPED_IMAGE_WIDTH / 3.02)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT / 3.44 , CROPPED_IMAGE_WIDTH / 1.49)
extractor = bob.ip.base.FaceEyesNorm((CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH), RIGHT_EYE_POS, LEFT_EYE_POS)
return extractor(image, landmark['reye'], landmark['leye'])
@staticmethod
def get_mtcnn_model_path():
import pkg_resources
return pkg_resources.resource_filename(__name__, 'data')
from .FaceDetector import FaceDetector
import numpy
def get_config():
"""Returns a string containing the configuration information.
"""
import bob.extension
return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
name: "PNet"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 12
input_dim: 12
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 10
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "PReLU1"
type: "PReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "PReLU2"
type: "PReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "conv3"
type: "Convolution"
bottom: "conv2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "PReLU3"
type: "PReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4-1"
type: "Convolution"
bottom: "conv3"
top: "conv4-1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 2
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv4-2"
type: "Convolution"
bottom: "conv3"
top: "conv4-2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "prob1"
type: "Softmax"
bottom: "conv4-1"
top: "prob1"
}
name: "RNet"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 24
input_dim: 24
##########################
######################
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 28
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "prelu1"
type: "PReLU"
bottom: "conv1"
top: "conv1"
propagate_down: true
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {