vanilla-biometrics Video support

parent 0d09668a
Pipeline #53738 failed with stage
in 38 minutes and 59 seconds
from .utils import select_frames, VideoAsArray, VideoLikeContainer
from .utils import (
select_frames,
VideoAsArray,
VideoLikeContainer,
video_wrap_skpipeline,
)
from . import annotator
from . import transformer
......@@ -31,8 +36,7 @@ def __appropriate__(*args):
__appropriate__(
VideoAsArray,
VideoLikeContainer,
VideoAsArray, VideoLikeContainer,
)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith("_")]
from bob.bio.video.database import YoutubeDatabase
from functools import partial
from bob.bio.video.utils import select_frames
database = YoutubeDatabase(protocol="fold0")
# Defining frame selection bit
# If you want to customize this, please, create a new config file and do
# bob bio pipelines vanilla-biometrics `my-new-config-file.py` `baseline`......
selection_style = "first"
max_number_of_frames = None
step_size = None
frame_selector = partial(
select_frames,
max_number_of_frames=max_number_of_frames,
selection_style=selection_style,
step_size=step_size,
)
database = YoutubeDatabase(protocol="fold0", frame_selector=frame_selector)
from bob.bio.video.transformer import VideoWrapper
from bob.bio.video.utils import video_wrap_skpipeline
from bob.pipelines import wrap
# Fetaching the pipeline from the chain-loading
pipeline = locals().get("pipeline")
pipeline.transformer = video_wrap_skpipeline(pipeline.transformer)
......@@ -46,6 +46,26 @@ class YoutubeDatabase(Database):
>>> probes = youtube.probes()
Parameters
----------
protocol: str
One of the Youtube above mentioned protocols
annotation_type: str
One of the supported annotation types
original_directory: str
Original directory
extension: str
Default file extension
annotation_extension: str
frame_selector:
Pointer to a function that does frame selection.
"""
def __init__(
......@@ -55,10 +75,8 @@ class YoutubeDatabase(Database):
fixed_positions=None,
original_directory=rc.get("bob.bio.face.youtube.directory"),
extension=".jpg",
selection_style="first",
max_number_of_frames=None,
step_size=None,
annotation_extension=".labeled_faces.txt",
frame_selector=None,
):
self._check_protocol(protocol)
......@@ -89,12 +107,10 @@ class YoutubeDatabase(Database):
self.reference_id_to_subject_id = None
self.reference_id_to_sample = None
self.load_file_client_id()
self.selection_style = selection_style
self.max_number_of_frames = max_number_of_frames
self.step_size = step_size
self.original_directory = original_directory
self.extension = extension
self.annotation_extension = annotation_extension
self.frame_selector = frame_selector
super().__init__(
name="youtube",
......@@ -142,12 +158,18 @@ class YoutubeDatabase(Database):
[x for x in os.listdir(path) if os.path.splitext(x)[1] == ".jpg"]
)
files_indices = select_frames(
len(files),
max_number_of_frames=self.max_number_of_frames,
selection_style=self.selection_style,
step_size=self.step_size,
# If there's no frame selector, uses all frames
files_indices = (
select_frames(
len(files),
max_number_of_frames=None,
selection_style="all",
step_size=None,
)
if self.frame_selector is None
else self.frame_selector(len(files))
)
data, indices = [], []
for i, file_name in enumerate(files):
if i not in files_indices:
......@@ -230,6 +252,8 @@ class YoutubeDatabase(Database):
return annots
def background_model_samples(self):
"""
"""
return None
def references(self, group="dev"):
......
......@@ -5,10 +5,42 @@ import h5py
import numpy as np
from bob.bio.base import selected_indices
from bob.io.video import reader
from .transformer import VideoWrapper
from bob.pipelines import wrap
logger = logging.getLogger(__name__)
def video_wrap_skpipeline(sk_pipeline):
"""
This function takes a `sklearn.Pipeline` and wraps each estimator inside of it with
:any:`bob.bio.video.transformer.VideoWrapper`
"""
for i, name, estimator in sk_pipeline._iter():
# 1. Unwrap the estimator
# If the estimator is `Sample` wrapped takes `estimator.estimator`.
transformer = (
estimator.estimator if hasattr(estimator, "estimator") else estimator
)
# 2. do a video wrap
transformer = VideoWrapper(transformer)
# 3. Sample wrap again
transformer = wrap(
["sample"],
transformer,
fit_extra_arguments=estimator.fit_extra_arguments,
transform_extra_arguments=estimator.transform_extra_arguments,
)
sk_pipeline.steps[i] = (name, transformer)
return sk_pipeline
def select_frames(
count, max_number_of_frames=None, selection_style=None, step_size=None
):
......
.. _bob.bio.video.faq:
================================
Frequently Asked Questions (FAQ)
================================
How to change the way frames are selected in my experiment?
-----------------------------------------------------------
The default frame selector in this package :any:`bob.bio.video.select_frames` allows you to select the
way frames are select `first`, `spread`, `step`, and `all` the the maximum number of frames can be used in this select.
The examples below shows some examples on how to use this selector.
Select the first frame only from every video
............................................
.. code-block:: python
>>> from bob.bio.video import select_frames
>>> from functools import partial
>>> frame_selector = partial(select_frames, selection_style="first", max_number_of_frames=1)
>>> frame_indices = [] # Some arbitrary list holding the frame indices
>>> selected_frames = frame_selector(frame_indices)
Select all frames
.................
.. code-block:: python
>>> from bob.bio.video import select_frames
>>> from functools import partial
>>> frame_selector = partial(select_frames, selection_style="all", max_number_of_frames=None)
>>> frame_indices = [] # Some arbitrary list holding the frame indices
>>> selected_frames = frame_selector(frame_indices)
Select all frames, but with an upper-bound of 100 frames
........................................................
.. code-block:: python
>>> from bob.bio.video import select_frames
>>> from functools import partial
>>> frame_selector = partial(select_frames, selection_style="all", max_number_of_frames=100)
>>> frame_indices = [] # Some arbitrary list holding the frame indices
>>> selected_frames = frame_selector(frame_indices)
Select 10 frames equally spread from the whole video
....................................................
.. code-block:: python
>>> from bob.bio.video import select_frames
>>> from functools import partial
>>> frame_selector = partial(select_frames, selection_style="spread", max_number_of_frames=10)
>>> frame_indices = [] # Some arbitrary list holding the frame indices
>>> selected_frames = frame_selector(frame_indices)
Now that I have customized my frame selector, so what?
......................................................
Once this frame selector is set, you can customize your experiment to use it.
The example below shows how to customize it for the YouTube Video Faces dataset (using one of the examples above)
.. code-block:: python
>>> from bob.bio.video.database import YoutubeDatabase
>>> from functools import partial
>>> from bob.bio.video.utils import select_frames
>>> frame_selector = partial(select_frames, selection_style="spread", max_number_of_frames=10)
>>> database = YoutubeDatabase(protocol="fold0", frame_selector=frame_selector)
Once this is saved into a python file (e.g. `my-dataset.py`), the vanilla-biometrics pipeline (:doc:`vanilla_biometrics_intro`) can be triggered as::
$ bob bio pipelines vanilla-biometrics my-dataset.py [BASELINE] video-wrapper
======================
Implementation Details
======================
.. todo::
Recover the old documentation and update it
......@@ -17,6 +17,7 @@ Summary
bob.bio.video.annotator.Base
bob.bio.video.annotator.Wrapper
bob.bio.video.annotator.FailSafeVideo
bob.bio.video.video_wrap_skpipeline
Databases
......
......@@ -10,11 +10,25 @@
This package is part of the ``bob.bio`` packages, which provide open source tools to run comparable and reproducible biometric recognition experiments.
In this package, tools to run video face recognition experiments are provided.
So far, a single set of tools is available, which are meta-classes that allow to use other well-established face recognition algorithms on video data.
For more detailed information about the structure of the ``bob.bio`` packages, please refer to the documentation of :ref:`bob.bio.base <bob.bio.base>`.
In the following, we provide more detailed information about the particularities of this package only.
In the following, we provide more detailed information about the particularities of this package only.
Get Started (TLTR)
==================
To run biometric experiments using the :doc:`vanilla_biometrics_intro` with video databases, please make usage of the `video-wrapper` `entry-point <https://packaging.python.org/specifications/entry-points/>`_.
For instance the example below uses the `video-wrapper` to run face recognition experiments using one of our baselines from :ref:`bob.bio.face <bob.bio.face>` and the Youtube Face datase::
$ bob bio pipelines vanilla-biometrics youtube arcface-insightface video-wrapper
Please, go through the documentation of this package and :ref:`bob.bio.base <bob.bio.base>` to see how these commands work.
Users Guide
===========
......@@ -22,7 +36,7 @@ Users Guide
.. toctree::
:maxdepth: 2
implementation
faq
annotators
Reference Manual
......
......@@ -67,7 +67,8 @@ setup(
],
"bob.bio.config": [
"mobio = bob.bio.video.config.database.mobio",
"youtube = bob.bio.video.config.database.youtube",
"youtube = bob.bio.video.config.database.youtube",
"video-wrapper = bob.bio.video.config.video_wrapper",
],
},
# Classifiers are important if you plan to distribute this package through
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment