diff --git a/bob/pad/face/preprocessor/VideoFaceCropAlignBlockPatch.py b/bob/pad/face/preprocessor/VideoFaceCropAlignBlockPatch.py
index 4aa913090e9272739d879a184f077637511a0884..9ca1e5cfa1fc124e55d59f78cb24b0d1bc0ce83c 100644
--- a/bob/pad/face/preprocessor/VideoFaceCropAlignBlockPatch.py
+++ b/bob/pad/face/preprocessor/VideoFaceCropAlignBlockPatch.py
@@ -240,6 +240,8 @@ class VideoFaceCropAlignBlockPatch(Preprocessor, object):
             facial region. ROI is annotated as follows:
             ``annotations['face_roi'][0] = [x_top_left, y_top_left]``
             ``annotations['face_roi'][1] = [x_bottom_right, y_bottom_right]``
+            If ``face_roi`` annotations are undefined, the patches will be
+            extracted from an entire cropped facial image.
 
         **Returns:**
 
diff --git a/bob/pad/face/test/test.py b/bob/pad/face/test/test.py
index cf10dcab76da3c08d8f8e27954b8a60babf8bc77..ed6c8d2c9fca05e9e24424daa2e55d2ebae31d9d 100644
--- a/bob/pad/face/test/test.py
+++ b/bob/pad/face/test/test.py
@@ -52,6 +52,8 @@ from bob.pad.face.config.preprocessor.face_feature_crop_quality_check import fac
 
 from bob.pad.face.utils.patch_utils import reshape_flat_patches
 
+from bob.pad.face.config.preprocessor.video_face_crop_align_block_patch import video_face_crop_align_bw_ir_d_channels_3x128x128 as mc_preprocessor
+
 
 def test_detect_face_landmarks_in_image_mtcnn():
 
@@ -335,6 +337,42 @@ def test_preproc_with_quality_check():
     assert data_preprocessed is None
 
 
+# =============================================================================
+def test_multi_channel_preprocessing():
+    """
+    Test video_face_crop_align_bw_ir_d_channels_3x128x128 preprocessor.
+    """
+
+    # =========================================================================
+    # prepare the test data:
+
+    image = load(datafile('test_image.png', 'bob.pad.face.test'))
+
+    # annotations must be known for this preprocessor, so compute them:
+    annotations = detect_face_landmarks_in_image(image, method="mtcnn")
+
+    video_color, annotations = convert_image_to_video_data(image, annotations, 2)
+
+    video_bw, _ = convert_image_to_video_data(image[0], annotations, 2)
+
+    mc_video = {}
+    mc_video["color"] = video_color
+    mc_video["infrared"] = video_bw
+    mc_video["depth"] = video_bw
+
+    # =========================================================================
+    # test the preprocessor:
+
+    data_preprocessed = mc_preprocessor(mc_video, annotations)
+
+    assert len(data_preprocessed) == 2
+    assert data_preprocessed[0][1].shape == (3, 128, 128)
+
+    # chanenls are preprocessed differently, thus this should apply:
+    assert np.any(data_preprocessed[0][1][0] != data_preprocessed[0][1][1])
+    assert np.any(data_preprocessed[0][1][0] != data_preprocessed[0][1][2])
+
+
 # =============================================================================
 def test_reshape_flat_patches():
     """