diff --git a/bob/pad/face/test/dummy/__init__.py b/bob/pad/face/test/dummy/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/pad/face/test/dummy/database.py b/bob/pad/face/test/dummy/database.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfb5c7b3bb193397ac8f3165bcaddbc980fad17b
--- /dev/null
+++ b/bob/pad/face/test/dummy/database.py
@@ -0,0 +1,75 @@
+from bob.bio.base.test.utils import atnt_database_directory
+from bob.bio.video.utils import FrameContainer
+import bob.io.base
+import os
+from bob.pad.face.database import VideoPadFile
+from bob.pad.base.database import PadDatabase
+from bob.db.base.utils import check_parameters_for_validity, convert_names_to_lowlevel
+
+
+class DummyPadFile(VideoPadFile):
+    def load(self, directory=None, extension='.pgm', frame_selector=None):
+        file_name = self.make_path(directory, extension)
+        fc = FrameContainer()
+        fc.add(os.path.basename(file_name), bob.io.base.load(file_name))
+        return fc
+
+
+class DummyDatabase(PadDatabase):
+
+    def __init__(self):
+        # call base class constructor with useful parameters
+        super(DummyDatabase, self).__init__(
+            name='test',
+            original_directory=atnt_database_directory(),
+            original_extension='.pgm',
+            check_original_files_for_existence=True,
+            training_depends_on_protocol=False,
+            models_depend_on_protocol=False
+        )
+        import bob.db.atnt
+        self._db = bob.db.atnt.Database()
+        self.low_level_names = ('world', 'dev')
+        self.high_level_names = ('train', 'dev')
+
+    def _make_bio(self, files):
+        return [DummyPadFile(client_id=f.client_id, path=f.path, file_id=f.id,
+                             attack_type=None)
+                for f in files]
+
+    def objects(self, groups=None, protocol=None, purposes=None,
+                model_ids=None, **kwargs):
+        groups = check_parameters_for_validity(
+            groups, 'groups', self.high_level_names, default_parameters=None)
+        groups = convert_names_to_lowlevel(
+            groups, self.low_level_names, self.high_level_names)
+        purposes = list(check_parameters_for_validity(
+            purposes, 'purposes', ('real', 'attack'),
+            default_parameters=('real', 'attack')))
+        if 'real' in purposes:
+            purposes.remove('real')
+            purposes.append('enroll')
+        if 'attack' in purposes:
+            purposes.remove('attack')
+            purposes.append('probe')
+        return self._make_bio(self._db.objects(model_ids, groups, purposes,
+                                               protocol, **kwargs))
+
+    def annotations(self, file):
+        return None
+
+    def frames(self, padfile):
+        fc = padfile.load(self.original_directory)
+        for _, frame, _ in fc:
+            yield frame
+
+    def number_of_frames(self, padfile):
+        fc = padfile.load(self.original_directory)
+        return len(fc)
+
+    @property
+    def frame_shape(self):
+        return (112, 92)
+
+
+database = DummyDatabase()
diff --git a/bob/pad/face/test/test_utils.py b/bob/pad/face/test/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..539aba19ea0216a6ed61c8fa05bced35a5a0cafa
--- /dev/null
+++ b/bob/pad/face/test/test_utils.py
@@ -0,0 +1,80 @@
+from bob.pad.face.test.dummy.database import DummyDatabase as Database
+from bob.pad.face.utils import yield_frames, yield_faces, scale_face, blocks
+from types import MethodType
+from nose.tools import raises
+import numpy
+
+padfile = Database().all_files(('train', 'dev'))[0][0]
+image = padfile.load(Database().original_directory,
+                     Database().original_extension)[0][1]
+
+
+def _annotations(self, padfile):
+    return {'0': {'topleft': (0, 0), 'bottomright': self.frame_shape}}
+
+
+def test_yield_frames():
+    database = Database()
+    assert database.number_of_frames(padfile) == 1
+    for frame in yield_frames(database, padfile):
+        assert frame.ndim == 2
+        assert frame.shape == database.frame_shape
+
+
+@raises(ValueError)
+def test_yield_faces_1():
+    database = Database()
+    for face in yield_faces(database, padfile):
+        pass
+
+
+def test_yield_faces_2():
+    database = Database()
+    database.annotations = MethodType(
+        _annotations, database)
+    for face in yield_faces(database, padfile):
+        assert face.ndim == 2
+        assert face.shape == database.frame_shape
+
+
+def test_scale_face():
+    # gray-scale image
+    face = image
+    scaled_face = scale_face(face, 64)
+    assert scaled_face.dtype == 'float64'
+    assert scaled_face.shape == (64, 64)
+    # color image
+    scaled_face = scale_face(numpy.array([face, face, face]), 64)
+    assert scaled_face.dtype == 'float64'
+    assert scaled_face.shape == (3, 64, 64)
+    assert (scaled_face[0] == scaled_face[1]).all()
+    assert (scaled_face[0] == scaled_face[2]).all()
+
+
+def test_blocks():
+    # gray-scale image
+    patches = blocks(image, (28, 28))
+    assert patches.shape == (12, 28, 28), patches.shape
+    # color image
+    patches_gray = patches
+    patches = blocks([image, image, image], (28, 28))
+    assert patches.shape == (12, 3, 28, 28), patches.shape
+    assert (patches_gray == patches[:, 0, ...]).all()
+    assert (patches_gray == patches[:, 1, ...]).all()
+    assert (patches_gray == patches[:, 2, ...]).all()
+    # color video
+    patches = blocks([[image, image, image]], (28, 28))
+    assert patches.shape == (12, 3, 28, 28), patches.shape
+    assert (patches_gray == patches[:, 0, ...]).all()
+    assert (patches_gray == patches[:, 1, ...]).all()
+    assert (patches_gray == patches[:, 2, ...]).all()
+
+
+@raises(ValueError)
+def test_block_raises1():
+    blocks(image[0], (28, 28))
+
+
+@raises(ValueError)
+def test_block_raises2():
+    blocks([[[image]]], (28, 28))
diff --git a/bob/pad/face/utils/__init__.py b/bob/pad/face/utils/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4444a68057622b8ebef09f5f12fd586aeca7fb82 100644
--- a/bob/pad/face/utils/__init__.py
+++ b/bob/pad/face/utils/__init__.py
@@ -0,0 +1,8 @@
+from .face_detection_utils import (detect_face_in_image, detect_faces_in_video,
+                                   detect_face_landmarks_in_image,
+                                   detect_face_landmarks_in_video, get_eye_pos)
+from .load_utils import (frames, number_of_frames, yield_frames,
+                         normalize_detections, yield_faces, scale_face, blocks)
+
+# gets sphinx autodoc done right - don't remove it
+__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/pad/face/utils/face_detection_utils.py b/bob/pad/face/utils/face_detection_utils.py
index 164b153be8a2dbe5c594e3971f188fdda5e523fd..034ca9abe1d3d56f880c173f78f7043bc8238c67 100644
--- a/bob/pad/face/utils/face_detection_utils.py
+++ b/bob/pad/face/utils/face_detection_utils.py
@@ -17,7 +17,7 @@ def get_eye_pos(lm):
 
     **Parameters:**
 
-    ``lm`` : :py:class:`array`
+    ``lm`` : :py:class:`numpy.ndarray`
         A numpy array containing the coordinates of facial landmarks, (68X2)
 
     **Returns:**
diff --git a/bob/pad/face/utils/load_utils.py b/bob/pad/face/utils/load_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..64e42e964da4e0f47d8c2780538fd6427f9e41af
--- /dev/null
+++ b/bob/pad/face/utils/load_utils.py
@@ -0,0 +1,216 @@
+from bob.io.video import reader
+from bob.ip.base import scale, block, block_output_shape
+from bob.ip.facedetect import bounding_box_from_annotation
+import numpy
+import six
+
+
+def frames(path):
+    """Yields the frames of a video file.
+
+    Parameters
+    ----------
+    path : str
+        Path to the video file.
+
+    Yields
+    ------
+    :any:`numpy.array`
+        A frame of the video. The size is (3, 240, 320).
+    """
+    video = reader(path)
+    for frame in video:
+        yield frame
+
+
+def number_of_frames(path):
+    """returns the number of frames of a video file.
+
+    Parameters
+    ----------
+    path : str
+        Path to the video file.
+
+    Returns
+    -------
+    int
+        The number of frames. Then, it yields the frames.
+    """
+    video = reader(path)
+    return video.number_of_frames
+
+
+def yield_frames(paddb, padfile):
+    """Loads the frames of a video PAD database.
+
+    Parameters
+    ----------
+    paddb : :any:`bob.pad.base.database.PadDatabase`
+        The video PAD database. The database needs to have implemented the
+        `.frames()` method.
+    padfile : :any:`bob.pad.face.database.VideoPadFile`
+        The PAD file.
+
+    Yields
+    ------
+    :any:`numpy.array`
+        Frames of the PAD file one by one.
+    """
+    frames = paddb.frames(padfile)
+    for image in frames:
+        yield image
+
+
+def normalize_detections(detections, nframes, max_age=-1, faceSizeFilter=0):
+    """Calculates a list of "nframes" with the best possible detections taking
+    into consideration the ages of the last valid detection on the detections
+    list.
+
+    Parameters
+    ----------
+    detections : dict
+        A dictionary containing keys that indicate the frame number of the
+        detection and a value which is a BoundingBox object.
+
+    nframes : int
+        An integer indicating how many frames has the video that will be
+        analyzed.
+
+    max_age : :obj:`int`, optional
+        An integer indicating for a how many frames a detected face is valid if
+        no detection occurs after such frame. A value of -1 == forever
+
+    faceSizeFilter : :obj:`int`, optional
+        The minimum required size of face height (in pixels)
+
+    Yields
+    ------
+    object
+        The bounding box or None.
+    """
+    curr = None
+    age = 0
+
+    for k in range(nframes):
+        if detections and k in detections and \
+                (detections[k].size[0] > faceSizeFilter):
+            curr = detections[k]
+            age = 0
+        elif max_age < 0 or age < max_age:
+            age += 1
+        else:  # no detections and age is larger than maximum allowed
+            curr = None
+
+        yield curr
+
+
+def yield_faces(database, padfile, **kwargs):
+    """Yields face images of a padfile. It uses the annotations from the
+    database. The annotations are further normalized.
+
+    Parameters
+    ----------
+    database : :any:`bob.pad.base.database.PadDatabase`
+        A face PAD database. This database needs to have implemented the
+        `frames` method.
+    padfile : :any:`bob.pad.base.database.PadFile`
+        The padfile to return the faces.
+    **kwargs
+        They are passed to :any:`normalize_detections`.
+
+    Yields
+    ------
+    numpy.array
+        Face images
+
+    Raises
+    ------
+    ValueError
+        If the database returns None for annotations.
+    """
+    frames_gen = database.frames(padfile)
+    nframes = database.number_of_frames(padfile)
+    # read annotation
+    annots = database.annotations(padfile)
+    if annots is None:
+        raise ValueError("No annotations were returned.")
+    # normalize annotations
+    annots = {int(k): bounding_box_from_annotation(**v)
+              for k, v in six.iteritems(annots)}
+    bounding_boxes = normalize_detections(annots, nframes, **kwargs)
+    for frame, bbx in six.moves.zip(frames_gen, bounding_boxes):
+        if bbx is None:
+            continue
+        face = frame[..., bbx.top:bbx.bottom, bbx.left:bbx.right]
+        yield face
+
+
+def scale_face(face, face_height, face_width=None):
+    """Scales a face image to the given size.
+
+    Parameters
+    ----------
+    face : :any:`numpy.array`
+        The face image. It can be 2D or 3D in bob image format.
+    face_height : int
+        The height of the scaled face.
+    face_width : :obj:`None`, optional
+        The width of the scaled face. If None, face_height is used.
+
+    Returns
+    -------
+    :any:`numpy.array`
+        The scaled face.
+    """
+    face_width = face_height if face_width is None else face_width
+    shape = list(face.shape)
+    shape[-2:] = (face_height, face_width)
+    scaled_face = numpy.empty(shape, dtype='float64')
+    scale(face, scaled_face)
+    return scaled_face
+
+
+def blocks(data, block_size, block_overlap=(0, 0)):
+    """Extracts patches of an image
+
+    Parameters
+    ----------
+    data : :any:`numpy.array`
+        The image in gray-scale, color, or color video format.
+    block_size : (int, int)
+        The size of patches
+    block_overlap : (:obj:`int`, :obj:`int`), optional
+        The size of overlap of patches
+
+    Returns
+    -------
+    :any:`numpy.array`
+        The patches.
+
+    Raises
+    ------
+    ValueError
+        If data dimension is not between 2 and 4 (inclusive).
+    """
+    data = numpy.asarray(data)
+    # if a gray scale image:
+    if data.ndim == 2:
+        output = block(data, block_size, block_overlap,
+                       flat=True)
+    # if a color image:
+    elif data.ndim == 3:
+        out_shape = list(data.shape[0:1]) + list(block_output_shape(
+            data[0], block_size, block_overlap, flat=True))
+
+        output = numpy.empty(out_shape, dtype=data.dtype)
+        for i, img2d in enumerate(data):
+            block(img2d, block_size, block_overlap, output[i], flat=True)
+        output = numpy.moveaxis(output, 0, 1)
+    # if a color video:
+    elif data.ndim == 4:
+        output = [blocks(img3d, block_size, block_overlap)
+                  for img3d in data]
+        output = numpy.concatenate(output, axis=0)
+    else:
+        raise ValueError("Unknown data dimension {}".format(data.ndim))
+    return output
diff --git a/doc/api.rst b/doc/api.rst
index d1dde96805b2dc695f9ba42f30a4d856d008d75a..573c695d2335e1a611c9364e5dfbf145dc7ba894 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -65,3 +65,23 @@ Matching Algorithms
 ------------------------------
 
 .. automodule:: bob.pad.face.algorithm
+
+
+Utilities
+---------
+
+.. autosummary::
+   bob.pad.face.utils.blocks
+   bob.pad.face.utils.detect_face_in_image
+   bob.pad.face.utils.detect_face_landmarks_in_image
+   bob.pad.face.utils.detect_face_landmarks_in_video
+   bob.pad.face.utils.detect_faces_in_video
+   bob.pad.face.utils.frames
+   bob.pad.face.utils.get_eye_pos
+   bob.pad.face.utils.normalize_detections
+   bob.pad.face.utils.number_of_frames
+   bob.pad.face.utils.scale_face
+   bob.pad.face.utils.yield_faces
+   bob.pad.face.utils.yield_frames
+
+.. automodule:: bob.pad.face.utils
diff --git a/doc/nitpick-exceptions.txt b/doc/nitpick-exceptions.txt
new file mode 100644
index 0000000000000000000000000000000000000000..14c3632ef2dc0c19608a93a7af18fdac75d55333
--- /dev/null
+++ b/doc/nitpick-exceptions.txt
@@ -0,0 +1 @@
+py:exc ValueError