Commit aab3cf8f authored by Zohreh MOSTAANI's avatar Zohreh MOSTAANI

first attempt to add the vulnarability analysis to the code

parent dcb83e0f
#!/usr/bin/env python
"""
AIM dataset for vulnurabitlity analysis agains makeup. Default configuration for grandtest protocol
"""
from bob.paper.makeup_aim.database import AIMVulunDataset
ORIGINAL_DIRECTORY = "[AIM_DIRECTORY]"
ORIGINAL_EXTENSION = ".h5"
ANNOTATION_DIRECTORY = "[AIM_ANNOTATION_DIRECTORY]"
PROTOCOL = "grandtest"
database = AIMVulunDataset(
protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotation_directory=ANNOTATION_DIRECTORY,
training_depends_on_protocol=True,
)
groups = ["dev"]
#----------------------------------------------------------
#!/usr/bin/env python
"""
This file contains configuration to run vulunaribility analysis experiments for AIM dataset.
"""
#--------------------------------------------------------------------
# sub_directory where the results will be placed
sub_directory = 'aim_vulun'
#--------------------------------------------------------------------
# define preprocessor:
from bob.pad.face.preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
# parameters and constants
FACE_SIZE = 128
RGB_OUTPUT_FLAG = False
USE_FACE_ALIGNMENT = True
ALIGNMENT_TYPE = "lightcnn"
MAX_IMAGE_SIZE = None
FACE_DETECTION_METHOD = None
MIN_FACE_SIZE = 50
_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
rgb_output_flag=RGB_OUTPUT_FLAG,
use_face_alignment=USE_FACE_ALIGNMENT,
alignment_type=ALIGNMENT_TYPE,
max_image_size=MAX_IMAGE_SIZE,
face_detection_method=FACE_DETECTION_METHOD,
min_face_size=MIN_FACE_SIZE,
)
_frame_selector = FrameSelector(selection_style = "spread")
preprocessor = Wrapper(preprocessor = _image_preprocessor, frame_selector = _frame_selector)
#--------------------------------------------------------------------
# define extractor:
from bob.paper.makeup_aim.extractor import FICNN
from bob.bio.video.extractor import Wrapper
from bob.extension import rc
import os
_model_dir = rc.get("LIGHTCNN9_MODEL_DIRECTORY")
_model_name = "LightCNN_9Layers_checkpoint.pth.tar"
_model_file = os.path.join(_model_dir, _model_name)
if not os.path.exists(_model_file):
print("Error: Could not find the LightCNN-9 model at [{}].\nPlease follow the download instructions from README".format(_model_dir))
exit(0)
extractor = Wrapper(FICNN(model_file=_model_file))
#--------------------------------------------------------------------
# define algorithm:
from bob.bio.base.config.algorithm.distance_cosine import algorithm
from bob.bio.video.algorithm import Wrapper
algorithm = Wrapper(algorithm)
#--------------------------------------------------------------------
\ No newline at end of file
......@@ -161,7 +161,7 @@ class AIMDatabase(FileListPadDatabase):
NOTE: you can pre-compute annotation in your first experiment
and then reuse them in other experiments setting
``self.annotations_temp_dir`` path of this class, where
``self.annotations_directory`` path of this class, where
precomputed annotations will be saved.
**Parameters:**
......
"""
Implementation of dataset interface of AIM for Face Recognition using RGB images.
@author: Zohreh Mostaani
"""
# Imports
from bob.bio.base.database import FileListBioDatabase
from bob.bio.video.database import VideoBioFile
from bob.bio.video import FrameSelector
from bob.io.base import HDF5File, create_directories_safe
from bob.extension import rc
from bob.pad.face.preprocessor.FaceCropAlign import detect_face_landmarks_in_image
import numpy as np
import json
import os
import pkg_resources
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#--------------------------------------------------------------------
class File(VideoBioFile):
"""
The file interface for rgb data.
"""
def load(self, directory=None, extension=None,
frame_selector=FrameSelector()):
filepath = self.make_path(directory, extension)
with HDF5File(filepath) as f:
data = f['/intelsr300_bgr']
# bgr to rgb
data = data[..., ::-1]
# to bob format
data = np.moveaxis(data, -1, 1)
return frame_selector(data)
class AIMVulunDataset(FileListBioDatabase):
"""
A high level implementation of a Database class for AIM dataset used for vulnurabitliy analysis.
"""
def __init__(
self,
name="AIM_vulun",
original_directory=None,
original_extension='.h5',
protocol="grandtest",
annotation_directory=None,
annotation_extension='.json',
annotation_type='json',
models_depend_on_protocol=True,
bio_file_class=File,
use_dense_probe_file_list=False,
groups="dev",
**kwargs):
"""
**Parameters:**
``original_directory`` : str or None
original directory refers to the location of AIM_vulun parent directory
``original_extension`` : str or None
extension of original data
``groups`` : str or [str]
The groups for which the clients should be returned.
Usually, groups are one or more elements of ['train', 'dev', 'eval'].
Default: 'dev'.
``protocol`` : str
The protocol for which the clients should be retrieved.
Default: 'grandtest'.
"""
filelists_directory = pkg_resources.resource_filename( __name__, "/lists/aim_vulun/")
self.filelists_directory = filelists_directory
# init the parent class using super.
super(AIMVulunDataset, self).__init__(
filelists_directory=filelists_directory,
name=name,
protocol=protocol,
original_directory=original_directory,
original_extension=original_extension,
annotation_directory=annotation_directory,
annotation_extension=annotation_extension,
annotation_type=annotation_type,
bio_file_class=bio_file_class,
models_depend_on_protocol=models_depend_on_protocol,
**kwargs)
self.bio_file_class=bio_file_class
self.annotation_directory = annotation_directory
self.landmark_detect_method = "mtcnn"
self.protocol = protocol
logger.info("Dataset: {}".format(self.name))
logger.info("Original directory: {}; Annotation directory: {}".format(self.original_directory, self.annotation_directory))
#--------------------------------------------------------------------
def annotations(self, f):
"""
Computes annotations for a given file object ``f``, which
is an instance of the ``BatlPadFile`` class.
NOTE: you can pre-compute annotation in your first experiment
and then reuse them in other experiments setting
``self.annotation_directory`` path of this class, where
precomputed annotations will be saved.
**Parameters:**
``f`` : :py:class:`object`
An instance of ``File`` defined above.
**Returns:**
``annotations`` : :py:class:`dict`
A dictionary containing annotations for
each frame in the video.
Dictionary structure:
``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
Where
``frameN_dict`` contains coordinates of the
face bounding box and landmarks in frame N.
"""
file_path = os.path.join(self.annotation_directory, f.path + ".json")
# If annotations do not exist, then generate.
if not os.path.isfile(file_path):
video = f.load(directory=self.original_directory,
extension=self.original_extension)
annotations = {}
for idx, image in enumerate(video.as_array()):
frame_annotations = detect_face_landmarks_in_image(
image, method=self.landmark_detect_method)
if frame_annotations:
annotations[str(idx)] = frame_annotations
# if directory is not an empty string
if self.annotation_directory:
create_directories_safe(
directory=os.path.split(file_path)[0], dryrun=False)
with open(file_path, 'w+') as json_file:
json_file.write(json.dumps(annotations))
# if file with annotations exists load them from file
else:
with open(file_path, 'r') as json_file:
annotations = json.load(json_file)
# if dictionary is empty
if not annotations:
logger.warning("Empty annotations for %s", f.path)
return None
return annotations
#--------------------------------------------------------------------
\ No newline at end of file
face-station/24.01.18/001_01_000_0_00 001 001
face-station/01.02.18/002_01_000_0_00 002 002
face-station/17.01.18/003_01_000_0_00 003 003
face-station/17.01.18/004_01_000_0_00 004 004
face-station/18.01.18/005_01_000_0_00 005 005
face-station/22.01.18/006_01_000_0_00 006 006
face-station/22.01.18/007_01_000_0_00 007 007
face-station/26.01.18/008_01_000_0_00 008 008
face-station/23.01.18/009_01_000_0_00 009 009
face-station/24.01.18/010_01_000_0_00 010 010
face-station/24.01.18/011_01_000_0_00 011 011
face-station/25.01.18/012_01_000_0_00 012 012
face-station/24.01.18/013_01_000_0_00 013 013
face-station/24.01.18/014_01_000_0_00 014 014
face-station/25.01.18/015_01_000_0_00 015 015
face-station/26.01.18/017_01_000_0_00 017 017
face-station/18.01.18/018_01_000_0_00 018 018
face-station/31.01.18/019_01_000_0_00 019 019
face-station/29.01.18/020_01_000_0_00 020 020
face-station/29.01.18/021_01_000_0_00 021 021
face-station/29.01.18/022_01_000_0_00 022 022
face-station/29.01.18/023_01_000_0_00 023 023
face-station/29.01.18/024_01_000_0_00 024 024
face-station/29.01.18/025_01_000_0_00 025 025
face-station/30.01.18/026_01_000_0_00 026 026
face-station/30.01.18/027_01_000_0_00 027 027
face-station/30.01.18/028_01_000_0_00 028 028
face-station/30.01.18/029_01_000_0_00 029 029
face-station/30.01.18/030_01_000_0_00 030 030
face-station/30.01.18/031_01_000_0_00 031 031
face-station/31.01.18/032_01_000_0_00 032 032
face-station/31.01.18/033_01_000_0_00 033 033
face-station/31.01.18/034_01_000_0_00 034 034
face-station/31.01.18/035_01_000_0_00 035 035
face-station/31.01.18/036_01_000_0_00 036 036
face-station/07.02.18/037_02_000_0_00 037 037
face-station/01.02.18/038_01_000_0_00 038 038
face-station/01.02.18/039_01_000_0_00 039 039
face-station/01.02.18/040_01_000_0_00 040 040
face-station/01.02.18/041_01_000_0_00 041 041
face-station/07.02.18/042_02_000_0_00 042 042
face-station/01.02.18/043_01_000_0_00 043 043
face-station/07.02.18/044_02_000_0_00 044 044
face-station/07.02.18/045_02_000_0_00 045 045
face-station/01.02.18/046_01_000_0_00 046 046
face-station/02.02.18/047_01_000_0_00 047 047
face-station/02.02.18/048_01_000_0_00 048 048
face-station/02.02.18/049_01_000_0_00 049 049
face-station/02.02.18/050_01_000_0_00 050 050
face-station/02.02.18/051_01_000_0_00 051 051
face-station/02.02.18/052_01_000_0_00 052 052
face-station/02.02.18/053_01_000_0_00 053 053
face-station/02.02.18/054_01_000_0_00 054 054
face-station/02.02.18/055_01_000_0_00 055 055
face-station/07.02.18/056_02_000_0_00 056 056
face-station/02.02.18/057_01_000_0_00 057 057
face-station/02.02.18/058_01_000_0_00 058 058
face-station/07.02.18/059_02_000_0_00 059 059
face-station/02.02.18/060_01_000_0_00 060 060
face-station/02.02.18/061_01_000_0_00 061 061
face-station/06.02.18/062_02_000_0_00 062 062
face-station/07.02.18/063_02_000_0_00 063 063
face-station/30.04.18/064_05_000_0_00 064 064
face-station/01.05.18/065_07_000_0_00 065 065
face-station/23.04.18/066_05_000_0_00 066 066
face-station/23.04.18/067_05_000_0_00 067 067
face-station/22.05.18/068_05_000_0_00 068 068
face-station/15.05.18/069_05_000_0_00 069 069
face-station/04.05.18/070_05_000_0_00 070 070
face-station/04.05.18/071_05_000_0_00 071 071
face-station/23.05.18/072_07_000_0_00 072 072
face-station/23.05.18/073_05_000_0_00 073 073
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment