From d81b3a4bb26eb6f012ccaecd861838a0c31bb685 Mon Sep 17 00:00:00 2001
From: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
Date: Tue, 4 May 2021 18:34:00 +0200
Subject: [PATCH] 50 shades of face

---
 notebooks/50-shades-of-face.ipynb | 359 ++++++++++++++++++++++++++++++
 1 file changed, 359 insertions(+)
 create mode 100644 notebooks/50-shades-of-face.ipynb

diff --git a/notebooks/50-shades-of-face.ipynb b/notebooks/50-shades-of-face.ipynb
new file mode 100644
index 00000000..0e987722
--- /dev/null
+++ b/notebooks/50-shades-of-face.ipynb
@@ -0,0 +1,359 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 50 Shades of face\n",
+    "\n",
+    "In this notebook we aim to evalute the impact of different face crops in FR baselines.\n",
+    "For that we are relying on the MOBIO dataset, which is not ideal, but it's short enough to run a bunch of experiments."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Fetching resources\n",
+    "import bob.bio.base\n",
+    "from bob.bio.base.pipelines.vanilla_biometrics import execute_vanilla_biometrics\n",
+    "from bob.bio.base.pipelines.vanilla_biometrics import Distance\n",
+    "from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline\n",
+    "\n",
+    "from bob.bio.face.database import MobioDatabase\n",
+    "from bob.bio.face.preprocessor import FaceCrop\n",
+    "from bob.extension import rc\n",
+    "from bob.pipelines import wrap\n",
+    "import os\n",
+    "import scipy.spatial\n",
+    "import bob.measure\n",
+    "dask_client = None\n",
+    "\n",
+    "###\n",
+    "image_size = 112\n",
+    "\n",
+    "# eyes position in the vertical axis\n",
+    "# final position will be image_size/height_denominators\n",
+    "height_denominators = [4.5,4,3.5,3,2.8]\n",
+    "\n",
+    "# Eyes distance to be explored\n",
+    "eyes_distances = [30, 35, 40, 42,45,48]\n",
+    "\n",
+    "\n",
+    "\n",
+    "output_path = \"./50-shades\"\n",
+    "\n",
+    "\n",
+    "######## CHANGE YOUR FEATURE EXTRACTOR HERE\n",
+    "from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace\n",
+    "extractor_transformer = wrap([\"sample\"],ArcFaceInsightFace())\n",
+    "\n",
+    "### CHANGE YOUR MATCHER HERE\n",
+    "algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)\n",
+    "\n",
+    "\n",
+    "##### CHANGE YOUR DATABASE HERE\n",
+    "database = MobioDatabase(protocol=\"mobile0-male\")\n",
+    "sample = database.references()[0][0]\n",
+    "import matplotlib.pyplot as plt\n",
+    "import bob.io.image\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Setting up the grid\n",
+    "\n",
+    "If you want to run this on the cluster, don't forget to `SETSHELL grid` before running the cell below.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Starting dask client\n",
+    "\n",
+    "\n",
+    "from dask.distributed import Client\n",
+    "from bob.pipelines.distributed.sge import SGEMultipleQueuesCluster\n",
+    "\n",
+    "cluster = SGEMultipleQueuesCluster(min_jobs=1)\n",
+    "dask_client = Client(cluster)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Running different face crops\n",
+    "\n",
+    "Here we are varying the `eyes_distances` and the ration `fig_size/height_denominators`,\n",
+    "generating the transformers and plotting the outcome"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "365e408f16e04de7ba0f709639b4ee8d",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "%matplotlib widget\n",
+    "\n",
+    "annotation_type = \"eyes-center\"\n",
+    "fixed_positions = None\n",
+    "\n",
+    "\n",
+    "def get_cropers(final_size = 112,\n",
+    "                height_denominators = [5,4,3,2],\n",
+    "                eyes_distances = [30, 35, 40, 42,45]):\n",
+    "    \n",
+    "    left_eye_offset = 1.49\n",
+    "\n",
+    "    transformers = []\n",
+    "    for e in eyes_distances:\n",
+    "        for h in height_denominators:\n",
+    "\n",
+    "            right_eye_offset = (final_size*left_eye_offset)/(final_size-e*left_eye_offset)\n",
+    "        \n",
+    "            RIGHT_EYE_POS = (final_size / h, final_size/right_eye_offset)\n",
+    "            LEFT_EYE_POS = (final_size / h, final_size/left_eye_offset)\n",
+    "\n",
+    "\n",
+    "            #RIGHT_EYE_POS = (final_size / 3.44, final_size / 3.02)\n",
+    "            #LEFT_EYE_POS = (final_size / 3.44, final_size / 1.49)\n",
+    "            cropped_positions = {\n",
+    "                \"leye\": LEFT_EYE_POS,\n",
+    "                \"reye\": RIGHT_EYE_POS,\n",
+    "            }\n",
+    "            #print(cropped_positions)\n",
+    "\n",
+    "            preprocessor_transformer = FaceCrop(cropped_image_size=(112,112),\n",
+    "                                                cropped_positions=cropped_positions,\n",
+    "                                                color_channel='rgb',\n",
+    "                                                fixed_positions=fixed_positions)\n",
+    "\n",
+    "            transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else ((\"annotations\", \"annotations\"),))\n",
+    "\n",
+    "            preprocessor_transformer = wrap([\"sample\"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments)\n",
+    "            \n",
+    "            transformers.append(preprocessor_transformer)\n",
+    "\n",
+    "    return transformers\n",
+    "\n",
+    "def plot_faces(transformers, database, subplot_shape, fnmrs=None):\n",
+    "\n",
+    "    fig, axis = plt.subplots(subplot_shape[0], subplot_shape[1])\n",
+    "    offset = 0\n",
+    "    for ax_h in axis:\n",
+    "        for ax_w in ax_h:\n",
+    "    \n",
+    "            # Picking the first sample\n",
+    "            sample = database.references()[0][0]\n",
+    "            preprocessor_transformer = transformers[offset]\n",
+    "\n",
+    "            cropped = preprocessor_transformer.transform([sample])[0]\n",
+    "            cropped = bob.io.image.to_matplotlib(cropped.data).astype(\"uint8\")\n",
+    "\n",
+    "            ax_w.imshow(cropped)\n",
+    "            reye_y = round(preprocessor_transformer.estimator.cropped_positions[\"reye\"][0],2)\n",
+    "            reye_x = round(preprocessor_transformer.estimator.cropped_positions[\"reye\"][1],2)\n",
+    "            leye_y = round(preprocessor_transformer.estimator.cropped_positions[\"leye\"][0],2)\n",
+    "            leye_x = round(preprocessor_transformer.estimator.cropped_positions[\"leye\"][1],2)\n",
+    "            \n",
+    "            if fnmrs is None:\n",
+    "                title = f\"({reye_y},{reye_x}) - ({leye_y},{leye_x})\"\n",
+    "            else:\n",
+    "                title = f\"({reye_y},{reye_x}) - ({leye_y},{leye_x}) = {fnmrs[offset]}\"\n",
+    "\n",
+    "            ax_w.set_title(f\"{title}\", fontsize=5)\n",
+    "            ax_w.axis('off')\n",
+    "            \n",
+    "            offset +=1\n",
+    "    \n",
+    "\n",
+    "subplot_shape = (int((len(eyes_distances)*len(height_denominators))/len(height_denominators)),len(height_denominators))\n",
+    "\n",
+    "transformers = get_cropers(final_size=image_size,\n",
+    "                           height_denominators=height_denominators,\n",
+    "                           eyes_distances=eyes_distances)\n",
+    "\n",
+    "plot_faces(transformers, database, subplot_shape)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Run vanilla biometrics\n",
+    "\n",
+    "Here we are running Vanilla Biometrics several times and collecting the `1-FNMR@FMR=0.001` and plotting."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
+      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n"
+     ]
+    },
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "6556949a548c4f1da86073f0a9351109",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "\"\"\"\n",
+    "vanilla_biometrics(\n",
+    "    pipeline,\n",
+    "    database,\n",
+    "    dask_client,\n",
+    "    groups,\n",
+    "    output,\n",
+    "    write_metadata_scores,\n",
+    "    checkpoint,\n",
+    "    dask_partition_size,\n",
+    "    dask_n_workers,\n",
+    ")\n",
+    "\"\"\"\n",
+    "from sklearn.pipeline import make_pipeline\n",
+    "write_metadata_scores = False\n",
+    "checkpoint = False\n",
+    "dask_partition_size = None\n",
+    "dask_n_workers = 15\n",
+    "\n",
+    "### Preparing the pipeline\n",
+    "\n",
+    "fnmrs = []\n",
+    "for t in transformers:\n",
+    "\n",
+    "    # Chain the Transformers together\n",
+    "    transformer = make_pipeline(t, extractor_transformer)\n",
+    "\n",
+    "    # Assemble the Vanilla Biometric pipeline and execute\n",
+    "    pipeline = VanillaBiometricsPipeline(transformer, algorithm)\n",
+    "\n",
+    "    execute_vanilla_biometrics(\n",
+    "        pipeline,\n",
+    "        database,\n",
+    "        dask_client,\n",
+    "        [\"dev\"],\n",
+    "        output_path,\n",
+    "        write_metadata_scores,\n",
+    "        checkpoint,\n",
+    "        dask_partition_size,\n",
+    "        dask_n_workers,\n",
+    "        allow_scoring_with_all_biometric_references=True\n",
+    "    )\n",
+    "    \n",
+    "    scores_dev = os.path.join(output_path, \"scores-dev\")\n",
+    "    \n",
+    "    # Picking FNMR@FAR=0.001\n",
+    "    neg, pos = bob.bio.base.score.load.split_four_column(scores_dev)\n",
+    "    far_thres = bob.measure.far_threshold(neg, pos, 0.001)\n",
+    "    fpr,fnr = bob.measure.fprfnr(neg, pos, far_thres)\n",
+    "    fnmr_1 = round(1-fnr,2)\n",
+    "    fnmrs.append(fnmr_1)\n",
+    "    \n",
+    "    \n",
+    "plot_faces(transformers, database, subplot_shape, fnmrs)        \n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Shutting down client\n",
+    "dask_client.shutdown()"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
-- 
GitLab