diff --git a/bob/pad/face/test/test.py b/bob/pad/face/test/test.py
index ac1078e42c5fb5509af2a2ea0d7a124bb6a3afeb..799d8efb2b6a0506776dfd07e4b10516b6fd8c83 100644
--- a/bob/pad/face/test/test.py
+++ b/bob/pad/face/test/test.py
@@ -35,8 +35,40 @@ from ..algorithm import VideoSvmPadAlgorithm
 
 from ..algorithm import VideoGmmPadAlgorithm
 
+from ..utils import face_detection_utils
+
 import random
 
+
+
+def test_detect_face_landmarks_in_image():
+
+	img = load(datafile('testimage.jpg', 'bob.bio.face.test'))
+	assert len(img) == 3
+	annotations = face_detection_utils.detect_face_landmarks_in_image(img)
+	assert len(annotations['landmarks']) == 68
+	assert len(annotations['left_eye']) == 2
+	assert len(annotations['right_eye']) == 2
+	assert len(annotations['topleft']) == 2
+	assert len(annotations['bottomright']) == 2
+
+	#assert len(annotations['left_eye']) == (176, 220)
+
+
+
+def test_detect_face_landmarks_in_video():
+
+	img = load(datafile('testimage.jpg', 'bob.bio.face.test'))
+	assert len(img) == 3
+	frame_container= bob.bio.video.FrameContainer()
+	frame_container.add(1,img)
+	frame_container.add(2,img)
+
+	annotations = face_detection_utils.detect_face_landmarks_in_video(frame_container)
+	assert len(annotations) == 2
+	assert len(annotations['1']['landmarks']) == 68
+	
+
 #==============================================================================
 def test_lbp_histogram():
     lbp = LBPHistogram()
diff --git a/bob/pad/face/utils/face_detection_utils.py b/bob/pad/face/utils/face_detection_utils.py
index 66b16de7dbdf996f5b26a6addb3da08667894f44..0ba1f2308344618aaba65a889722d90acc2731ed 100644
--- a/bob/pad/face/utils/face_detection_utils.py
+++ b/bob/pad/face/utils/face_detection_utils.py
@@ -8,6 +8,7 @@ This file contains face detection utils.
 # Import here:
 
 import importlib
+import numpy as np
 
 
 #==============================================================================
@@ -96,8 +97,139 @@ def detect_faces_in_video(frame_container, method = "dlib"):
             annotations[str(idx)] = frame_annotations
 
     return annotations
+    
 
+def getEyePos(lm):
 
+    """
+    This function returns the locations of left and right eyes 
+
+    **Parameters:**
+
+    ``lm`` : :py:class:`array`
+        A numpy array containing the coordinates of facial landmarks, (68X2)
+
+
+    **Returns:**
+
+    ``right_eye`` 
+        A tuple containing the location of right eye, 
+
+    ``left_eye`` 
+        A tuple containing the location of left eye
+
+    """
+
+    # Mean position of eye corners as eye centers , casted to int()
+
+    left_eye_t = (lm[36,:] + lm[39,:])/2.0
+    right_eye_t = (lm[42,:] + lm[45,:])/2.0
+
+    right_eye = (int(left_eye_t[1]),int(left_eye_t[0]))  
+    left_eye = (int(right_eye_t[1]),int(right_eye_t[0]))
+    
+    return right_eye,left_eye
+
+
+
+def detect_face_landmarks_in_image(image, method = "dlib"):
+    """
+    This function detects a face and facial landmarks in the input image.
+
+    **Parameters:**
+
+    ``image`` : 3D :py:class:`numpy.ndarray`
+        A color image to detect the face in.
+
+    ``method`` : :py:class:`str`
+        A package to be used for face detection. Options supported by this
+        package: "dlib" (dlib is a dependency of this package). 
+
+    **Returns:**
+
+    ``annotations`` : :py:class:`dict`
+        A dictionary containing the annotations for an image.
+        Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col), 'left_eye': (row, col), 'right_eye': (row, col), 'landmarks': [(row1,col1), (row2,col2), ...]}``
+        Where (rowK,colK) is the location of Kth facial landmark (K=0,...,67).
+        If no annotations found an empty dictionary is returned. 
+    """
+
+    try:
+        face_landmark_detection_module = importlib.import_module("bob.ip.facelandmarks");
+    except ImportError:
+        raise ImportError("No module named bob.ip.facelandmarks")
+
+    if not hasattr(face_landmark_detection_module, 'detect_landmarks'):
+        raise AttributeError("bob.ip.facelandmarks module has no attribute detect_landmarks")
+
+    key_points = face_landmark_detection_module.detect_landmarks(image, 1);
+
+    annotations = {}
+
+    try:
+        kp = key_points[0]
+    except:
+        kp = None
+
+    if kp is not None:
+
+        lm = np.vstack((kp.landmarks[:,1],kp.landmarks[:,0])).T
+
+        right_eye,left_eye = getEyePos(lm)
+
+        points = []
+
+        for i in range(lm.shape[0]):
+            points.append((int(lm[i,0]),int(lm[i,1])))
+
+        annotations['topleft'] = kp.bounding_box.topleft
+        annotations['bottomright'] = kp.bounding_box.bottomright
+        annotations['landmarks'] = points # list of landmarks
+        annotations['left_eye'] = left_eye
+        annotations['right_eye'] = right_eye
+
+
+    return annotations
+
+
+
+def detect_face_landmarks_in_video(frame_container, method = "dlib"):
+    """
+    This function detects a face and face landmarks  in each farme of the input video.
+
+    **Parameters:**
+
+    ``frame_container`` : FrameContainer
+        FrameContainer containing the frames data.
+
+    ``method`` : :py:class:`str`
+        A package to be used for face detection. Options supported by this
+        package: "dlib" (dlib is a dependency of this package). 
+
+    **Returns:**
+
+    ``annotations`` : :py:class:`dict`
+        A dictionary containing the annotations for each frame in the video.
+        Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+        Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col), 'left_eye': (row, col), 'right_eye': (row, col), 'landmarks': [(row1,col1), (row2,col2), ...]}``
+        is the dictionary defining the coordinates of the face bounding box in frame N.
+        Where (rowK,colK) is the location of Kth facial landmark (K=0,...,67).
+        If no annotations found an empty dictionary is returned.
+    """
+
+    annotations = {}
+
+    for idx, frame in enumerate(frame_container):
+
+        image = frame[1]
+
+        frame_annotations = detect_face_landmarks_in_image(image, method);
+
+        if frame_annotations:
+
+            annotations[str(idx)] = frame_annotations
+
+    return annotations
 
 
 
diff --git a/requirements.txt b/requirements.txt
index 6692bce0eb1efa0062578f7755eea27928de0e94..9518917ce953e255d508bf20116ce3eac9e5f92c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -13,6 +13,7 @@ bob.io.image
 bob.ip.color
 bob.ip.qualitymeasure
 bob.ip.dlib
+bob.ip.facelandmarks
 bob.learn.libsvm
 bob.learn.linear
 scikit-learn