VideoToFrames.py 1.12 KB
Newer Older
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
1 2
from sklearn.base import TransformerMixin, BaseEstimator
import bob.pipelines as mario
3
from bob.pipelines.wrappers import _frmt
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
4 5 6 7 8 9
import logging

logger = logging.getLogger(__name__)


class VideoToFrames(TransformerMixin, BaseEstimator):
10
    """Expands video samples to frame-based samples only when transform is called.
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
11 12 13
    """

    def transform(self, video_samples):
14
        logger.debug(f"{_frmt(self)}.transform")
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
15 16
        output = []
        for sample in video_samples:
17
            annotations = getattr(sample, "annotations", {}) or {}
18

19 20 21
            # video is an instance of VideoAsArray or VideoLikeContainer
            video = sample.data
            for frame, frame_id in zip(video, video.indices):
22 23 24 25 26 27
                new_sample = mario.Sample(
                    frame,
                    frame_id=frame_id,
                    annotations=annotations.get(str(frame_id)),
                    parent=sample,
                )
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
28
                output.append(new_sample)
29

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
30 31 32 33 34 35 36
        return output

    def fit(self, X, y=None, **fit_params):
        return self

    def _more_tags(self):
        return {"stateless": True, "requires_fit": False}