...
 
Commits (5)
#!/usr/bin/env python
"""
AIM dataset for vulnurabitlity analysis agains makeup. Default configuration for grandtest protocol
"""
from bob.paper.makeup_aim.database.aim_vuln import AIMVulnDataset
ORIGINAL_DIRECTORY = "[AIM_DIRECTORY]"
ORIGINAL_EXTENSION = ".h5"
ANNOTATION_DIRECTORY = "[AIM_ANNOTATION_DIRECTORY]"
PROTOCOL = "grandtest"
database = AIMVulnDataset(
protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotation_directory=ANNOTATION_DIRECTORY,
training_depends_on_protocol=True,
)
groups = ["dev"]
#----------------------------------------------------------
#!/usr/bin/env python
"""
This file contains configuration to run vulunaribility analysis experiments for AIM dataset.
"""
#--------------------------------------------------------------------
# sub_directory where the results will be placed
sub_directory = 'aim_vuln'
#--------------------------------------------------------------------
# define preprocessor:
from bob.pad.face.preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
# parameters and constants
FACE_SIZE = 128
RGB_OUTPUT_FLAG = False
USE_FACE_ALIGNMENT = True
ALIGNMENT_TYPE = "lightcnn"
MAX_IMAGE_SIZE = None
FACE_DETECTION_METHOD = None
MIN_FACE_SIZE = 50
_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
rgb_output_flag=RGB_OUTPUT_FLAG,
use_face_alignment=USE_FACE_ALIGNMENT,
alignment_type=ALIGNMENT_TYPE,
max_image_size=MAX_IMAGE_SIZE,
face_detection_method=FACE_DETECTION_METHOD,
min_face_size=MIN_FACE_SIZE,
)
_frame_selector = FrameSelector(selection_style = "spread")
preprocessor = Wrapper(preprocessor = _image_preprocessor, frame_selector = _frame_selector)
#--------------------------------------------------------------------
# define extractor:
from bob.paper.makeup_aim.extractor import FICNN
from bob.bio.video.extractor import Wrapper
from bob.extension import rc
import os
_model_dir = rc.get("LIGHTCNN9_MODEL_DIRECTORY")
_model_name = "LightCNN_9Layers_checkpoint.pth.tar"
_model_file = os.path.join(_model_dir, _model_name)
if not os.path.exists(_model_file):
print("Error: Could not find the LightCNN-9 model at [{}].\nPlease follow the download instructions from README".format(_model_dir))
exit(0)
extractor = Wrapper(FICNN(model_file=_model_file))
#--------------------------------------------------------------------
# define algorithm:
from bob.bio.base.config.algorithm.distance_cosine import algorithm
from bob.bio.video.algorithm import Wrapper
algorithm = Wrapper(algorithm)
#--------------------------------------------------------------------
\ No newline at end of file
......@@ -161,7 +161,7 @@ class AIMDatabase(FileListPadDatabase):
NOTE: you can pre-compute annotation in your first experiment
and then reuse them in other experiments setting
``self.annotations_temp_dir`` path of this class, where
``self.annotations_directory`` path of this class, where
precomputed annotations will be saved.
**Parameters:**
......
"""
Implementation of dataset interface of AIM for Face Recognition using RGB images.
@author: Zohreh Mostaani
"""
# Imports
from bob.bio.base.database import FileListBioDatabase
from bob.bio.video.database import VideoBioFile
from bob.bio.video import FrameSelector
from bob.io.base import HDF5File, create_directories_safe
from bob.pad.face.preprocessor.FaceCropAlign import detect_face_landmarks_in_image
import numpy as np
import json
import os
import pkg_resources
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#--------------------------------------------------------------------
class File(VideoBioFile):
"""
The file interface for RGB data.
"""
def load(self, directory=None, extension=None,
frame_selector=FrameSelector()):
filepath = self.make_path(directory, extension)
with HDF5File(filepath) as f:
data = f['/intelsr300_bgr']
# bgr to rgb
data = data[..., ::-1]
# to bob format
data = np.moveaxis(data, -1, 1)
return frame_selector(data)
class AIMVulnDataset(FileListBioDatabase):
"""
A high level implementation of a Database class for AIM dataset used for vulnerability analysis.
"""
def __init__(
self,
name="AIM_vuln",
original_directory=None,
original_extension='.h5',
protocol="grandtest",
annotation_directory=None,
annotation_extension='.json',
annotation_type='json',
models_depend_on_protocol=True,
bio_file_class=File,
use_dense_probe_file_list=False,
groups="dev",
**kwargs):
"""
**Parameters:**
``original_directory`` : str or None
original directory refers to the location of AIM_vuln parent directory
``original_extension`` : str or None
extension of original data
``groups`` : str or [str]
The groups for which the clients should be returned.
Usually, groups are one or more elements of ['train', 'dev', 'eval'].
Default: 'dev'.
``protocol`` : str
The protocol for which the clients should be retrieved.
Default: 'grandtest'.
"""
filelists_directory = pkg_resources.resource_filename( __name__, "/lists/aim_vuln/")
self.filelists_directory = filelists_directory
# init the parent class using super.
super(AIMVulnDataset, self).__init__(
filelists_directory=filelists_directory,
name=name,
protocol=protocol,
original_directory=original_directory,
original_extension=original_extension,
annotation_directory=annotation_directory,
annotation_extension=annotation_extension,
annotation_type=annotation_type,
bio_file_class=bio_file_class,
models_depend_on_protocol=models_depend_on_protocol,
**kwargs)
self.bio_file_class=bio_file_class
self.annotation_directory = annotation_directory
self.landmark_detect_method = "mtcnn"
self.protocol = protocol
logger.info("Dataset: {}".format(self.name))
logger.info("Original directory: {}; Annotation directory: {}".format(self.original_directory, self.annotation_directory))
#--------------------------------------------------------------------
def annotations(self, f):
"""
Computes annotations for a given file object ``f``, which
is an instance of the ``File`` class.
NOTE: you can pre-compute annotation in your first experiment
and then reuse them in other experiments setting
``self.annotation_directory`` path of this class, where
precomputed annotations will be saved.
**Parameters:**
``f`` : :py:class:`object`
An instance of ``File`` defined above.
**Returns:**
``annotations`` : :py:class:`dict`
A dictionary containing annotations for
each frame in the video.
Dictionary structure:
``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
Where
``frameN_dict`` contains coordinates of the
face bounding box and landmarks in frame N.
"""
file_path = os.path.join(self.annotation_directory, f.path + ".json")
# If annotations do not exist, then generate.
if not os.path.isfile(file_path):
video = f.load(directory=self.original_directory,
extension=self.original_extension)
annotations = {}
for idx, image in enumerate(video.as_array()):
frame_annotations = detect_face_landmarks_in_image(
image, method=self.landmark_detect_method)
if frame_annotations:
annotations[str(idx)] = frame_annotations
# if directory is not an empty string
if self.annotation_directory:
create_directories_safe(
directory=os.path.split(file_path)[0], dryrun=False)
with open(file_path, 'w+') as json_file:
json_file.write(json.dumps(annotations))
# if file with annotations exists load them from file
else:
with open(file_path, 'r') as json_file:
annotations = json.load(json_file)
# if dictionary is empty
if not annotations:
logger.warning("Empty annotations for %s", f.path)
return None
return annotations
#--------------------------------------------------------------------
\ No newline at end of file
face-station/01.02.18/002_01_000_0_00 002 002
face-station/17.01.18/003_01_000_0_00 003 003
face-station/17.01.18/004_01_000_0_00 004 004
face-station/18.01.18/005_01_000_0_00 005 005
face-station/22.01.18/006_01_000_0_00 006 006
face-station/22.01.18/007_01_000_0_00 007 007
face-station/26.01.18/008_01_000_0_00 008 008
face-station/25.01.18/012_01_000_0_00 012 012
face-station/18.01.18/018_01_000_0_00 018 018
face-station/31.01.18/019_01_000_0_00 019 019
face-station/01.02.18/038_01_000_0_00 038 038
face-station/30.04.18/064_05_000_0_00 064 064
face-station/01.05.18/065_07_000_0_00 065 065
face-station/23.04.18/066_05_000_0_00 066 066
face-station/23.04.18/067_05_000_0_00 067 067
face-station/22.05.18/068_05_000_0_00 068 068
face-station/15.05.18/069_05_000_0_00 069 069
face-station/04.05.18/070_05_000_0_00 070 070
face-station/04.05.18/071_05_000_0_00 071 071
face-station/23.05.18/072_07_000_0_00 072 072
#!/usr/bin/env python
"""
Plots box-plot and histogram for similarity scores for face recognition system.
Usage:
plot_vuln.py [options] <scores>
plot_vuln.py --help
Options:
-h --help Show this screen.
-o <path>, --output=<path> The path to which the plots will be saved [default: ]
"""
import os
from docopt import docopt
import bob.measure
import matplotlib.pyplot as plt
import numpy as np
from bob.bio.base.score.load import load_score
from matplotlib import gridspec
#--------------------------------------------------------------------
# Here are the global names used for the plots
plot_title = "LightCNN FR"
genuine_label = 'Genuine'
makeup_label = 'Makeup'
# box-plot
box_plot_filename = 'box-plot'
box_plot_ylabel = 'Similarity Scores'
# histogram plot
historgram_filename = 'hist-plot'
line_label = 'FNMR threshold'
hist_plot_xlabel = 'Similarity Scores'
hist_plot_ylabel = 'Normalized Count'
#--------------------------------------------------------------------
# separating genuine and makeup scores
def return_similarity_scores(scores):
'''
Separates the necessary scores from the score list for genuine and makeup.
'''
gen = scores[scores['real_id'] == scores['claimed_id']]['score']
makeup = scores[scores['real_id'] == 'makeup']['score']
gen_sim = 1 + np.ascontiguousarray(gen)
makeup_sim = 1 + np.ascontiguousarray(makeup)
return gen_sim, makeup_sim
#--------------------------------------------------------------------
# plotting the box-plot
def plot_box_plot(gen_scores, makeup_scores, output):
'''
The box plots to show the effect of makeup on FR system
- gen_scores: the scores for genuine faces without makeup
- makeup_scores: the scores for faces with makeup
- output: The file to which the plot will be saved.
'''
plt.style.use('default')
plt.rcParams['figure.figsize'] = (4, 3)
plt.rcParams['figure.constrained_layout.use'] = True
fig = plt.figure()
gs = gridspec.GridSpec(9, 1, figure=fig)
ax = plt.gcf().add_subplot(gs[1:9])
bbox_props = dict(color="b", alpha=0.9)
flier_props = dict(marker="+", markersize=4, markeredgecolor="g")
whis_props = [5, 95]
whisker_props = dict(linestyle='--', dashes=(5, 5))
median_props = dict(color="r")
bp = ax.boxplot([gen_scores, makeup_scores], labels=[genuine_label, makeup_label],
patch_artist=False, autorange=True, flierprops=flier_props,
boxprops=bbox_props,
whiskerprops=whisker_props,
medianprops=median_props,
whis=whis_props,
widths=0.25,
)
top = 1
bottom = 0
ax.set_ylim(bottom, top)
ax.set_aspect(1.5)
for line in bp['medians']:
# get position data for median line
x, y = line.get_xydata()[1] # top of median line
# overlay median value
ax.annotate(f"{y:.2f}", (x, y))
plt.ylabel(box_plot_ylabel)
plt.title(plot_title)
plt.savefig(output)
plt.close()
#--------------------------------------------------------------------
# plotting the histogram
def plot_histogram(gen_scores, makeup_scores, output):
'''
The histogram to show the effect of makeup on FR system
- gen_scores: the scores for genuine faces without makeup
- makeup_scores: the scores for faces with makeup
- output: The file to which the plot will be saved.
'''
plt.style.use('default')
plt.rcParams['figure.figsize'] = (4, 3)
plt.rcParams['figure.constrained_layout.use'] = True
fig = plt.figure()
gs = gridspec.GridSpec(9, 1, figure=fig)
ax = plt.gcf().add_subplot(gs[1:9])
th = bob.measure.frr_threshold([], gen_scores, 0.1)
color_scheme = {'genuine': '#2ca02c',
'line': '#d4257b', 'makeup': '#ff7f0e'}
alpha_scheme = {'genuine': 0.9, 'makeup': 0.6}
hatch_scheme = {'genuine': '//', 'makeup': None}
lines = []
line = plt.hist(gen_scores, bins=10, color=color_scheme['genuine'],
alpha=alpha_scheme['genuine'],
hatch=hatch_scheme['genuine'],
label=genuine_label, density=True)
lines.append(line[-1][0])
line = plt.axvline(x=th, ymin=0, ymax=1, linewidth=2,
color=color_scheme['line'], linestyle='--',
label=line_label)
lines.append(line)
line = plt.hist(makeup_scores, bins=10, color=color_scheme['makeup'],
alpha=alpha_scheme['makeup'],
hatch=hatch_scheme['makeup'],
density=True, label=makeup_label)
lines.append(line[-1][0])
hs, ls = plt.gca().get_legend_handles_labels()
ax.grid(True)
plt.xlabel(hist_plot_xlabel)
plt.ylabel(hist_plot_ylabel)
plt.title(plot_title)
by_label = dict(zip(ls, hs))
fig.legend(by_label.values(), by_label.keys(),
loc='upper center', ncol=3, framealpha=0.5)
plt.savefig(output)
plt.close()
def main():
args = docopt(__doc__)
score_directory = args['<scores>']
plots_directory = args['--output']
scores = load_score(score_directory)
gen_sim, makeup_sim = return_similarity_scores(scores)
output1=os.path.join(plots_directory, box_plot_filename)
plot_box_plot(gen_scores=gen_sim,
makeup_scores=makeup_sim, output=output1)
output2=os.path.join(plots_directory, historgram_filename)
plot_histogram(gen_scores=gen_sim,
makeup_scores=makeup_sim, output=output2)
print("The plots are saved in {} and {}".format(output1,output2))
if __name__ == "__main__":
main()
......@@ -89,6 +89,7 @@ setup(
# scripts should be declared using this entry:
'console_scripts' : [
'annotate_db.py = bob.paper.makeup_aim.script.annotate_db:main',
'plot_vuln.py = bob.paper.makeup_aim.script.plot_vuln:main'
],
'bob.bio.config': [
'ymu = bob.paper.makeup_aim.config.ymu',
......