From 9459871fe71cbb328f219572d0fa860881c7e14f Mon Sep 17 00:00:00 2001
From: Samuel Gaist <samuel.gaist@idiap.ch>
Date: Wed, 27 Jan 2021 17:47:02 +0100
Subject: [PATCH] [advanced][databases][cpqd] Add new version following V2
 implementation

---
 advanced/databases/cpqd/5.json | 327 +++++++++++++++++++++++++
 advanced/databases/cpqd/5.py   | 424 +++++++++++++++++++++++++++++++++
 advanced/databases/cpqd/5.rst  |   1 +
 3 files changed, 752 insertions(+)
 create mode 100644 advanced/databases/cpqd/5.json
 create mode 100644 advanced/databases/cpqd/5.py
 create mode 100644 advanced/databases/cpqd/5.rst

diff --git a/advanced/databases/cpqd/5.json b/advanced/databases/cpqd/5.json
new file mode 100644
index 0000000..8226837
--- /dev/null
+++ b/advanced/databases/cpqd/5.json
@@ -0,0 +1,327 @@
+{
+    "description": "The CPqD database",
+    "root_folder": "/this/database/is/not/installed",
+    "protocols": [
+        {
+            "name": "laptop_male",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "laptop_male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "laptop_male"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "laptop_male"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "laptop_male"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "laptop_male"
+                    }
+                }
+            }
+        },
+        {
+            "name": "laptop_female",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "laptop_female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "laptop_female"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "laptop_female"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "laptop_female"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "laptop_female"
+                    }
+                }
+            }
+        },
+        {
+            "name": "smartphone_male",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "smartphone_male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "smartphone_male"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "smartphone_male"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "smartphone_male"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "smartphone_male"
+                    }
+                }
+            }
+        },
+        {
+            "name": "smartphone_female",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "smartphone_female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "smartphone_female"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "smartphone_female"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "smartphone_female"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "smartphone_female"
+                    }
+                }
+            }
+        },
+        {
+            "name": "l2s_male",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "l2s_male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "l2s_male"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "l2s_male"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "l2s_male"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "l2s_male"
+                    }
+                }
+            }
+        },
+        {
+            "name": "l2s_female",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "l2s_female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "l2s_female"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "l2s_female"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "l2s_female"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "l2s_female"
+                    }
+                }
+            }
+        },
+        {
+            "name": "s2l_male",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "s2l_male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "s2l_male"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "s2l_male"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "s2l_male"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "s2l_male"
+                    }
+                }
+            }
+        },
+        {
+            "name": "s2l_female",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "s2l_female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "s2l_female"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "s2l_female"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "s2l_female"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "s2l_female"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/cpqd/5.py b/advanced/databases/cpqd/5.py
new file mode 100644
index 0000000..0a09e7c
--- /dev/null
+++ b/advanced/databases/cpqd/5.py
@@ -0,0 +1,424 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import re
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.cpqd
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image'])
+
+        image_folder       = os.path.join(root_folder, "images")
+        annotation_folder  = os.path.join(root_folder, "eye_positions")
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.cpqd.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+
+        for obj in objs:
+            obj_id = obj.id.split('/')[-1]
+            digits = re.findall(r'\d+', obj_id)
+
+            entries.append(Entry(np.uint64(obj.client_id[1:]), np.uint64(''.join(digits)),
+                                 db.annotations(obj.make_path(annotation_folder, '.pos')),
+                                 obj.make_path(image_folder, '.jpg')))
+
+        return entries
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image'])
+
+        image_folder       = os.path.join(root_folder, "images")
+        annotation_folder  = os.path.join(root_folder, "eye_positions")
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.cpqd.Database()
+
+        template_ids = db.model_ids(protocol=parameters['protocol'],
+                                    groups=parameters['group'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              groups=parameters['group'],
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            for obj in objs:
+                obj_id = obj.id.split('/')[-1]
+                digits = re.findall(r'\d+', obj_id)
+
+                entries.append(Entry(np.uint64(obj.client_id[1:]), np.uint64(template_id[1:]),
+                                     np.uint64(''.join(digits)),
+                                     db.annotations(obj.make_path(annotation_folder, '.pos')),
+                                     obj.make_path(image_folder, '.jpg')))
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'eye_centers', 'image'])
+
+        image_folder       = os.path.join(root_folder, "images")
+        annotation_folder  = os.path.join(root_folder, "eye_positions")
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.cpqd.Database()
+
+        template_ids = sorted(db.model_ids(protocol=parameters['protocol'],
+                                           groups=parameters['group']))
+
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(protocol=parameters['protocol'],
+                                     groups=parameters['group'],
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                           key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+
+            templates = [ np.uint64(x[1:]) for x in templates ]
+
+            obj_id = obj.id.split('/')[-1]
+            digits = re.findall(r'\d+', obj_id)
+
+            entries.append(Entry(templates,
+                                 np.uint64(obj.client_id[1:]),
+                                 np.uint64(''.join(digits)),
+                                 np.uint64(''.join(digits)),
+                                 db.annotations(obj.make_path(annotation_folder, '.pos')),
+                                 obj.make_path(image_folder, '.jpg')))
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': obj.template_ids
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    def mock_annotations(obj, path):
+        return dict(
+            leye=(5, 4),
+            reye=(7, 4),
+        )
+
+    bob.io.base.load = mock_load
+    bob.db.cpqd.Database.annotations = mock_annotations
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='laptop_male'
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='laptop_male',
+            group='dev',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='s2l_female',
+            group='dev',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/cpqd/5.rst b/advanced/databases/cpqd/5.rst
new file mode 100644
index 0000000..1013b98
--- /dev/null
+++ b/advanced/databases/cpqd/5.rst
@@ -0,0 +1 @@
+The CPqD database
\ No newline at end of file
-- 
GitLab