Skip to content
Snippets Groups Projects
Commit e42ba79a authored by Anjith GEORGE's avatar Anjith GEORGE
Browse files

WIP: Annotations mismatch

parent 1b0be28f
No related branches found
No related tags found
No related merge requests found
Pipeline #38665 failed
......@@ -11,6 +11,9 @@ from bob.pad.face.preprocessor.FaceCropAlign import detect_face_landmarks_in_ima
from bob.db.hqwmca.attack_dictionaries import idiap_type_id_config, idiap_subtype_id_config
from bob.io.stream import stream
import cv2
import bob.io.image
# def _color(f):
# return f.stream('color')
......@@ -261,8 +264,29 @@ class HQWMCAPadDatabase_warp(PadDatabase):
frame_annotations = detect_face_landmarks_in_image(image, method='mtcnn')
if frame_annotations is None:
print('No Frame annotations, tring CLAHE')
print('image',image.shape,type(image))
cv_image=bob.io.image.to_matplotlib(image)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cv_image[:,:,0] = clahe.apply(cv_image[:,:,0])
cv_image[:,:,1] = clahe.apply(cv_image[:,:,1])
cv_image[:,:,2] = clahe.apply(cv_image[:,:,2])
print('cv_image',cv_image.shape,type(cv_image))
bob_image=bob.io.image.to_bob(cv_image)
print('bob_image',bob_image.shape,type(bob_image))
frame_annotations = detect_face_landmarks_in_image(bob_image, method='mtcnn')
if frame_annotations is not None:
print('CLAHE Suceeded................................................................................')
print('frame_annotations',frame_annotations)
# print('frame_annotations',frame_annotations)
if frame_annotations:
for key in frame_annotations.keys():
......@@ -271,10 +295,10 @@ class HQWMCAPadDatabase_warp(PadDatabase):
if key!='quality':
frame_annotations[key]=(int(frame_annotations[key][0]),int(frame_annotations[key][1]))
else:
frame_annotations[key]=int(frame_annotations[key])
frame_annotations[key]=1#int(frame_annotations[key])
print('frame_annotations AFTER',frame_annotations)
# print('frame_annotations AFTER CHANGE',frame_annotations)
if frame_annotations:
annotations[str(idx)] = frame_annotations
......
......@@ -554,7 +554,7 @@ class FaceCropAlign(Preprocessor):
annotations['bottomright']=(int(annotations['bottomright'][0]),int(annotations['bottomright'][1]))
print('annotations',annotations)
# print('annotations',annotations)
if self.min_face_size is not None: # quality check
......
......@@ -268,9 +268,13 @@ class VideoFaceCropAlignBlockPatch(Preprocessor, object):
# Convert arrays of dimensionality 3 to 4 if necessary:
preprocessed_arrays = [np.expand_dims(item, axis=1) if len(item.shape)==3 else item for item in preprocessed_arrays]
for pa in preprocessed_arrays:
print('PASHAPE:',pa.shape)
# Concatenate streams channel-wise:
preprocessed_arrays = np.concatenate(preprocessed_arrays, axis=1)
print()
# Convert to frame container:
preprocessed_fc = bob.bio.video.FrameContainer() # initialize the FrameContainer
[preprocessed_fc.add(idx, item) for idx, item in enumerate(preprocessed_arrays)]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment