Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.pad.face
Commits
115d5ef1
Commit
115d5ef1
authored
Mar 05, 2018
by
Olegs NIKISINS
Browse files
Changed the default FrameSelector in preproc configs to select all frames
parent
78bbe93b
Pipeline
#17360
passed with stage
in 21 minutes and 44 seconds
Changes
9
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/pad/face/config/lbp_svm.py
View file @
115d5ef1
...
...
@@ -24,6 +24,8 @@ from ..preprocessor import FaceCropAlign
from
bob.bio.video.preprocessor
import
Wrapper
from
bob.bio.video.utils
import
FrameSelector
FACE_SIZE
=
64
# The size of the resulting face
RGB_OUTPUT_FLAG
=
False
# Gray-scale output
USE_FACE_ALIGNMENT
=
False
# use annotations
...
...
@@ -38,7 +40,10 @@ _image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
Wrapper
(
_image_preprocessor
)
_frame_selector
=
FrameSelector
(
selection_style
=
"all"
)
preprocessor
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
...
...
bob/pad/face/config/lbp_svm_aggregated_db.py
View file @
115d5ef1
...
...
@@ -26,6 +26,8 @@ from ..preprocessor import FaceCropAlign
from
bob.bio.video.preprocessor
import
Wrapper
from
bob.bio.video.utils
import
FrameSelector
FACE_SIZE
=
64
# The size of the resulting face
RGB_OUTPUT_FLAG
=
False
# Gray-scale output
USE_FACE_ALIGNMENT
=
False
# use annotations
...
...
@@ -40,7 +42,10 @@ _image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
Wrapper
(
_image_preprocessor
)
_frame_selector
=
FrameSelector
(
selection_style
=
"all"
)
preprocessor
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
...
...
bob/pad/face/config/preprocessor/video_face_crop.py
View file @
115d5ef1
...
...
@@ -5,6 +5,8 @@ from ..preprocessor import FaceCropAlign
from
bob.bio.video.preprocessor
import
Wrapper
from
bob.bio.video.utils
import
FrameSelector
# =======================================================================================
# Define instances here:
...
...
@@ -23,7 +25,10 @@ _image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
rgb_face_detector_dlib
=
Wrapper
(
_image_preprocessor
)
_frame_selector
=
FrameSelector
(
selection_style
=
"all"
)
rgb_face_detector_dlib
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
# =======================================================================================
FACE_DETECTION_METHOD
=
"mtcnn"
# use mtcnn face detection
...
...
@@ -35,4 +40,5 @@ _image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
rgb_face_detector_mtcnn
=
Wrapper
(
_image_preprocessor
)
rgb_face_detector_mtcnn
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
bob/pad/face/config/qm_lr.py
View file @
115d5ef1
...
...
@@ -23,6 +23,8 @@ from ..preprocessor import FaceCropAlign
from
bob.bio.video.preprocessor
import
Wrapper
from
bob.bio.video.utils
import
FrameSelector
FACE_SIZE
=
64
# The size of the resulting face
RGB_OUTPUT_FLAG
=
True
# RGB output
USE_FACE_ALIGNMENT
=
False
# use annotations
...
...
@@ -37,7 +39,10 @@ _image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
Wrapper
(
_image_preprocessor
)
_frame_selector
=
FrameSelector
(
selection_style
=
"all"
)
preprocessor
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
...
...
bob/pad/face/config/qm_one_class_gmm.py
View file @
115d5ef1
...
...
@@ -23,6 +23,8 @@ from ..preprocessor import FaceCropAlign
from
bob.bio.video.preprocessor
import
Wrapper
from
bob.bio.video.utils
import
FrameSelector
FACE_SIZE
=
64
# The size of the resulting face
RGB_OUTPUT_FLAG
=
True
# RGB output
USE_FACE_ALIGNMENT
=
False
# use annotations
...
...
@@ -37,7 +39,10 @@ _image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
Wrapper
(
_image_preprocessor
)
_frame_selector
=
FrameSelector
(
selection_style
=
"all"
)
preprocessor
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
...
...
bob/pad/face/config/qm_one_class_svm_aggregated_db.py
View file @
115d5ef1
...
...
@@ -25,6 +25,8 @@ from ..preprocessor import FaceCropAlign
from
bob.bio.video.preprocessor
import
Wrapper
from
bob.bio.video.utils
import
FrameSelector
FACE_SIZE
=
64
# The size of the resulting face
RGB_OUTPUT_FLAG
=
True
# RGB output
USE_FACE_ALIGNMENT
=
False
# use annotations
...
...
@@ -39,7 +41,10 @@ _image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
Wrapper
(
_image_preprocessor
)
_frame_selector
=
FrameSelector
(
selection_style
=
"all"
)
preprocessor
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
...
...
bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
View file @
115d5ef1
...
...
@@ -25,6 +25,8 @@ from ..preprocessor import FaceCropAlign
from
bob.bio.video.preprocessor
import
Wrapper
from
bob.bio.video.utils
import
FrameSelector
FACE_SIZE
=
64
# The size of the resulting face
RGB_OUTPUT_FLAG
=
True
# RGB output
USE_FACE_ALIGNMENT
=
False
# use annotations
...
...
@@ -39,7 +41,10 @@ _image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
Wrapper
(
_image_preprocessor
)
_frame_selector
=
FrameSelector
(
selection_style
=
"all"
)
preprocessor
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
...
...
bob/pad/face/config/qm_svm.py
View file @
115d5ef1
...
...
@@ -23,6 +23,8 @@ from ..preprocessor import FaceCropAlign
from
bob.bio.video.preprocessor
import
Wrapper
from
bob.bio.video.utils
import
FrameSelector
FACE_SIZE
=
64
# The size of the resulting face
RGB_OUTPUT_FLAG
=
True
# RGB output
USE_FACE_ALIGNMENT
=
False
# use annotations
...
...
@@ -37,7 +39,10 @@ _image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
Wrapper
(
_image_preprocessor
)
_frame_selector
=
FrameSelector
(
selection_style
=
"all"
)
preprocessor
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
...
...
bob/pad/face/config/qm_svm_aggregated_db.py
View file @
115d5ef1
...
...
@@ -25,6 +25,8 @@ from ..preprocessor import FaceCropAlign
from
bob.bio.video.preprocessor
import
Wrapper
from
bob.bio.video.utils
import
FrameSelector
FACE_SIZE
=
64
# The size of the resulting face
RGB_OUTPUT_FLAG
=
True
# RGB output
USE_FACE_ALIGNMENT
=
False
# use annotations
...
...
@@ -39,7 +41,10 @@ _image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
face_detection_method
=
FACE_DETECTION_METHOD
,
min_face_size
=
MIN_FACE_SIZE
)
preprocessor
=
Wrapper
(
_image_preprocessor
)
_frame_selector
=
FrameSelector
(
selection_style
=
"all"
)
preprocessor
=
Wrapper
(
preprocessor
=
_image_preprocessor
,
frame_selector
=
_frame_selector
)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment