From 37c73d6f54be65fbbdb4c8c79c7329af3e9c1a38 Mon Sep 17 00:00:00 2001 From: Philip ABBET <philip.abbet@idiap.ch> Date: Wed, 21 Feb 2018 09:41:16 +0100 Subject: [PATCH] Update to beat.backend.python 1.5.x --- advanced/databases/atnt/5.json | 124 ++ advanced/databases/atnt/5.py | 572 ++++++ advanced/databases/atnt/5.rst | 74 + advanced/databases/atvskeystroke/4.json | 41 + advanced/databases/atvskeystroke/4.py | 266 +++ advanced/databases/atvskeystroke/4.rst | 83 + advanced/databases/avspoof/4.json | 950 ++++++++++ advanced/databases/avspoof/4.py | 534 ++++++ advanced/databases/avspoof/4.rst | 193 ++ advanced/databases/banca/4.json | 608 ++++++ advanced/databases/banca/4.py | 382 ++++ advanced/databases/banca/4.rst | 70 + advanced/databases/biosecurid_face/3.json | 87 + advanced/databases/biosecurid_face/3.py | 324 ++++ advanced/databases/biosecurid_face/3.rst | 41 + advanced/databases/biowave/3.json | 1579 ++++++++++++++++ advanced/databases/biowave/3.py | 499 +++++ advanced/databases/biowave/3.rst | 212 +++ advanced/databases/casme2/4.json | 1010 ++++++++++ advanced/databases/casme2/4.py | 148 ++ advanced/databases/casme2/4.rst | 65 + advanced/databases/cbsr_nir_vis_2/4.json | 866 +++++++++ advanced/databases/cbsr_nir_vis_2/4.py | 413 +++++ advanced/databases/cbsr_nir_vis_2/4.rst | 64 + advanced/databases/cpqd/4.json | 694 +++++++ advanced/databases/cpqd/4.py | 424 +++++ advanced/databases/cpqd/4.rst | 144 ++ advanced/databases/frgc/4.json | 330 ++++ advanced/databases/frgc/4.py | 412 +++++ advanced/databases/frgc/4.rst | 67 + advanced/databases/gbu/4.json | 159 ++ advanced/databases/gbu/4.py | 381 ++++ advanced/databases/gbu/4.rst | 61 + advanced/databases/kboc16/4.json | 76 + advanced/databases/kboc16/4.py | 259 +++ advanced/databases/kboc16/4.rst | 84 + advanced/databases/lfw/4.json | 54 + advanced/databases/lfw/4.py | 285 +++ advanced/databases/lfw/4.rst | 72 + advanced/databases/livdet2013/4.json | 164 ++ advanced/databases/livdet2013/4.py | 109 ++ advanced/databases/livdet2013/4.rst | 75 + advanced/databases/mnist/4.json | 38 + advanced/databases/mnist/4.py | 111 ++ advanced/databases/mnist/4.rst | 93 + advanced/databases/mobio/4.json | 188 ++ advanced/databases/mobio/4.py | 402 ++++ advanced/databases/mobio/4.rst | 74 + advanced/databases/putvein/4.json | 1646 +++++++++++++++++ advanced/databases/putvein/4.py | 222 +++ advanced/databases/putvein/4.rst | 138 ++ advanced/databases/replay/4.json | 1122 +++++++++++ advanced/databases/replay/4.py | 571 ++++++ advanced/databases/replay/4.rst | 225 +++ advanced/databases/utfvp/4.json | 623 +++++++ advanced/databases/utfvp/4.py | 308 +++ advanced/databases/utfvp/4.rst | 108 ++ advanced/databases/voxforge/4.json | 80 + advanced/databases/voxforge/4.py | 323 ++++ advanced/databases/voxforge/4.rst | 56 + advanced/databases/xm2vts/4.json | 350 ++++ advanced/databases/xm2vts/4.py | 382 ++++ advanced/databases/xm2vts/4.rst | 103 ++ test/algorithms/username/integers_sum/1.json | 26 + test/algorithms/username/integers_sum/1.py | 37 + test/databases/simple/1.json | 29 + test/databases/simple/1.py | 200 +- .../username/username/duo/1/split_2.json | 37 + .../username/username/single/1/single.json | 4 +- .../username/single/1/single_add.json | 6 +- .../username/single/1/single_error.json | 2 +- .../single/1/single_error_split_2.json | 2 +- .../username/single/1/single_split_10.json | 2 +- .../username/single/1/single_split_2.json | 4 +- test/toolchains/username/duo/1.json | 60 + 75 files changed, 20533 insertions(+), 94 deletions(-) create mode 100644 advanced/databases/atnt/5.json create mode 100644 advanced/databases/atnt/5.py create mode 100644 advanced/databases/atnt/5.rst create mode 100644 advanced/databases/atvskeystroke/4.json create mode 100644 advanced/databases/atvskeystroke/4.py create mode 100644 advanced/databases/atvskeystroke/4.rst create mode 100644 advanced/databases/avspoof/4.json create mode 100644 advanced/databases/avspoof/4.py create mode 100644 advanced/databases/avspoof/4.rst create mode 100644 advanced/databases/banca/4.json create mode 100644 advanced/databases/banca/4.py create mode 100644 advanced/databases/banca/4.rst create mode 100644 advanced/databases/biosecurid_face/3.json create mode 100644 advanced/databases/biosecurid_face/3.py create mode 100644 advanced/databases/biosecurid_face/3.rst create mode 100644 advanced/databases/biowave/3.json create mode 100644 advanced/databases/biowave/3.py create mode 100644 advanced/databases/biowave/3.rst create mode 100644 advanced/databases/casme2/4.json create mode 100644 advanced/databases/casme2/4.py create mode 100644 advanced/databases/casme2/4.rst create mode 100644 advanced/databases/cbsr_nir_vis_2/4.json create mode 100644 advanced/databases/cbsr_nir_vis_2/4.py create mode 100644 advanced/databases/cbsr_nir_vis_2/4.rst create mode 100644 advanced/databases/cpqd/4.json create mode 100644 advanced/databases/cpqd/4.py create mode 100644 advanced/databases/cpqd/4.rst create mode 100644 advanced/databases/frgc/4.json create mode 100644 advanced/databases/frgc/4.py create mode 100644 advanced/databases/frgc/4.rst create mode 100644 advanced/databases/gbu/4.json create mode 100644 advanced/databases/gbu/4.py create mode 100644 advanced/databases/gbu/4.rst create mode 100644 advanced/databases/kboc16/4.json create mode 100644 advanced/databases/kboc16/4.py create mode 100644 advanced/databases/kboc16/4.rst create mode 100644 advanced/databases/lfw/4.json create mode 100644 advanced/databases/lfw/4.py create mode 100644 advanced/databases/lfw/4.rst create mode 100644 advanced/databases/livdet2013/4.json create mode 100644 advanced/databases/livdet2013/4.py create mode 100644 advanced/databases/livdet2013/4.rst create mode 100644 advanced/databases/mnist/4.json create mode 100644 advanced/databases/mnist/4.py create mode 100644 advanced/databases/mnist/4.rst create mode 100644 advanced/databases/mobio/4.json create mode 100644 advanced/databases/mobio/4.py create mode 100644 advanced/databases/mobio/4.rst create mode 100644 advanced/databases/putvein/4.json create mode 100644 advanced/databases/putvein/4.py create mode 100644 advanced/databases/putvein/4.rst create mode 100644 advanced/databases/replay/4.json create mode 100644 advanced/databases/replay/4.py create mode 100644 advanced/databases/replay/4.rst create mode 100644 advanced/databases/utfvp/4.json create mode 100644 advanced/databases/utfvp/4.py create mode 100644 advanced/databases/utfvp/4.rst create mode 100644 advanced/databases/voxforge/4.json create mode 100644 advanced/databases/voxforge/4.py create mode 100644 advanced/databases/voxforge/4.rst create mode 100644 advanced/databases/xm2vts/4.json create mode 100644 advanced/databases/xm2vts/4.py create mode 100644 advanced/databases/xm2vts/4.rst create mode 100644 test/algorithms/username/integers_sum/1.json create mode 100644 test/algorithms/username/integers_sum/1.py create mode 100644 test/experiments/username/username/duo/1/split_2.json create mode 100644 test/toolchains/username/duo/1.json diff --git a/advanced/databases/atnt/5.json b/advanced/databases/atnt/5.json new file mode 100644 index 0000000..25459fc --- /dev/null +++ b/advanced/databases/atnt/5.json @@ -0,0 +1,124 @@ +{ + "description": "The AT&T Database of Faces", + "root_folder": "/idiap/group/biometric/databases/orl", + "protocols": [ + { + "name": "idiap", + "template": "simple_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_2d_uint8/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_2d_uint8/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_2d_uint8/1" + } + } + ] + }, + { + "name": "idiap_test_eyepos", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "TrainEyePositions", + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "TemplatesEyePositions", + "parameters": { + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "ProbesEyePositions", + "parameters": { + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "TemplatesEyePositions", + "parameters": { + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "ProbesEyePositions", + "parameters": { + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/atnt/5.py b/advanced/databases/atnt/5.py new file mode 100644 index 0000000..9056066 --- /dev/null +++ b/advanced/databases/atnt/5.py @@ -0,0 +1,572 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.ip.color +import bob.db.atnt + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.atnt.Database() + objs = sorted(db.objects(groups='world', purposes=None), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.pgm')) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.atnt.Database() + + template_ids = db.model_ids(groups='dev') + + entries = [] + + for template_id in template_ids: + objs = db.objects(groups='dev', purposes='enroll', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.pgm')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.atnt.Database() + + template_ids = np.array(sorted(db.model_ids(groups='dev'), + key=lambda x: int(x)), + dtype='uint64') + + objs = sorted(db.objects(groups='dev', purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(template_ids, x.client_id, x.id, x.id, x.make_path(root_folder, '.pgm')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': obj.template_ids + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class TrainEyePositions(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'eye_centers', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.atnt.Database() + objs = sorted(db.objects(groups='world', purposes=None), + key=lambda x: (x.client_id, x.id)) + + eye_centers = { + 'left': { + 'y': np.int32(48), + 'x': np.int32(63), + }, + 'right': { + 'y': np.int32(48), + 'x': np.int32(27), + } + } + + return [ Entry(x.client_id, eye_centers, x.id, x.make_path(root_folder, '.pgm')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'eye_centers': + return obj.eye_centers + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class TemplatesEyePositions(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.atnt.Database() + + eye_centers = { + 'left': { + 'y': np.int32(48), + 'x': np.int32(63), + }, + 'right': { + 'y': np.int32(48), + 'x': np.int32(27), + } + } + + template_ids = db.model_ids(groups='dev') + + entries = [] + + for template_id in template_ids: + objs = db.objects(groups='dev', purposes='enroll', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, eye_centers, x.make_path(root_folder, '.pgm')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return obj.eye_centers + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class ProbesEyePositions(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - client_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.atnt.Database() + + eye_centers = { + 'left': { + 'y': np.int32(48), + 'x': np.int32(63), + }, + 'right': { + 'y': np.int32(48), + 'x': np.int32(27), + } + } + + template_ids = np.array(sorted(db.model_ids(groups='dev'), + key=lambda x: int(x)), + dtype='uint64') + + objs = sorted(db.objects(groups='dev', purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(template_ids, x.client_id, x.id, x.id, eye_centers, + x.make_path(root_folder, '.pgm')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': obj.template_ids + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return obj.eye_centers + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((92, 112), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Train() + view.objs = view.index(root_folder='', parameters=dict()) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('image', 0) + + view = Templates() + view.objs = view.index(root_folder='', parameters=dict()) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('image', 0) + + view = Probes() + view.objs = view.index(root_folder='', parameters=dict()) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('image', 0) + + view = TrainEyePositions() + view.objs = view.index(root_folder='', parameters=dict()) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + view = TemplatesEyePositions() + view.objs = view.index(root_folder='', parameters=dict()) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + view = ProbesEyePositions() + view.objs = view.index(root_folder='', parameters=dict()) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) diff --git a/advanced/databases/atnt/5.rst b/advanced/databases/atnt/5.rst new file mode 100644 index 0000000..49cd0e1 --- /dev/null +++ b/advanced/databases/atnt/5.rst @@ -0,0 +1,74 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The AT&T Database of Faces +-------------------------- + +Changelog +========= + +* **Version 5**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 4**, 30/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 3**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 2**, 03/Sep/2015: + + - Fix ``idiap_eye_pos`` protocol that was returning the **wrong** eye + positions and was generating rotated crops instead. + +* **Version 1**, 01/Apr/2014: + + - Initial release + + +Description +=========== + +The `AT&T Database of Faces +<http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html>`_, +(formerly 'The ORL Database of Faces'), contains a set of face images taken +between April 1992 and April 1994. The database was used in the context of a +face recognition project carried out at `Cambridge University +<http://www.cam.ac.uk/>`. + +There are ten different images of each of 40 distinct subjects. For some +subjects, the images were taken at different times, varying the lighting, +facial expressions (open / closed eyes, smiling / not smiling) and +facial details (glasses / no glasses). All the images were taken against a +dark homogeneous background with the subjects in an upright, frontal position +(with tolerance for some side movement). + +A preview image of the Database of Faces is shown below: + +.. image:: http://www.cl.cam.ac.uk/research/dtg/attarchive/images/pictures/faces.gif + :width: 700 px + +The files are in PGM format. The size of each image is 92x112 pixels, with +256 grey levels per pixel. There are 40 subjects, each subject having 10 +different images. diff --git a/advanced/databases/atvskeystroke/4.json b/advanced/databases/atvskeystroke/4.json new file mode 100644 index 0000000..7cf1002 --- /dev/null +++ b/advanced/databases/atvskeystroke/4.json @@ -0,0 +1,41 @@ +{ + "description": "The ATVS Keystroke database", + "root_folder": "/idiap/group/biometric/databases/atvs_keystroke", + "protocols": [ + { + "name": "A", + "template": "simple_keystroke_recognition", + "sets": [ + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "A" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "keystroke": "{{ user.username }}/atvs_keystroke/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "A" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "keystroke": "{{ user.username }}/atvs_keystroke/1" + } + } + ] + } + ] +} \ No newline at end of file diff --git a/advanced/databases/atvskeystroke/4.py b/advanced/databases/atvskeystroke/4.py new file mode 100644 index 0000000..c9a4339 --- /dev/null +++ b/advanced/databases/atvskeystroke/4.py @@ -0,0 +1,266 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.db.atvskeystroke + + +keystroke_feature = ['', 'holdtime', 'rplatency', 'pplatency', 'rrlatency', 'prlatency'] +keystroke_type = ['', 'given_name', 'family_name', 'email', 'nationality', 'id_number'] + + +#---------------------------------------------------------- + + +def keystroke_reader(filename): + counter = 0 + feat = 0 + + data = {} + for line in open(filename, 'r').readlines(): + if not line.strip(): continue + if counter % 6 == 0: + feat += 1 + label = line.strip() + data[keystroke_feature[feat]] = {} + else: + values = [np.int32(v) for v in line.strip().split(' ')] + data[keystroke_feature[feat]][keystroke_type[counter % 6]] = np.array(values) + counter += 1 + + return data + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - keystroke: "{{ user.username }}/atvs_keystroke/1 + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "keystroke". + Several "keystroke" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | keystroke | | keystroke | | keystroke | | keystroke | | keystroke | | keystroke | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'keystroke']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.atvskeystroke.Database() + + template_ids = db.model_ids(groups='eval', + protocol=parameters['protocol']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(groups='eval', + protocol=parameters['protocol'], + purposes='enrol', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.txt')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'template_id': + return { + 'text': str(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'keystroke': + return keystroke_reader(obj.keystroke) + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - keystroke: "{{ user.username }}/atvs_keystroke/1 + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/text/1" + - probe_id: "{{ system_user.username }}/uint64/1", + - template_ids: "{{ system_user.username }}/array_1d_text/1", + + One "file_id" is associated with a given "keystroke". + One "probe_id" is associated with a given "keystroke". + Several "keystroke" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | keystroke | | keystroke | | keystroke | | keystroke | | keystroke | | keystroke | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'keystroke']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.atvskeystroke.Database() + + template_ids = sorted(db.model_ids(protocol=parameters['protocol'], + groups='eval'), + key=lambda x: int(x)) + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='eval', + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='eval', + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + entries.append( Entry(templates, obj.client_id, obj.id, obj.id, + obj.make_path(root_folder, '.txt')) ) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.probe_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'text': [ str(x) for x in obj.template_ids ] + } + + elif output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'keystroke': + return keystroke_reader(obj.keystroke) + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the keystrokes + def mock_keystroke_reader(filename): + return {} + + global keystroke_reader + keystroke_reader = mock_keystroke_reader + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Templates() + view.objs = view.index(root_folder='', parameters=dict(protocol = 'A')) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('keystroke', 0) + + view = Probes() + view.objs = view.index(root_folder='', parameters=dict(protocol = 'A')) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('keystroke', 0) diff --git a/advanced/databases/atvskeystroke/4.rst b/advanced/databases/atvskeystroke/4.rst new file mode 100644 index 0000000..8e43bc3 --- /dev/null +++ b/advanced/databases/atvskeystroke/4.rst @@ -0,0 +1,83 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The ATVS-Keystroke Database +--------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 30/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 26/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 09/Jul/2015: + + - Initial release + + +Description +=========== + +The `ATVS-Keystroke database <http://atvs.ii.uam.es/keystroke_db.html>`_ +is a dataset captured for performance evaluation of Keystroke Dynamics +recognition systems (see [BTAS15]_ for all the details). The database +comprises 63 users with 12 genuine access and 12 impostor access for +each user for a total number of samples equal to 7680 (63 users x 24 access +x 5 data). There are people from two different nationalities with 60% of males +and 40% females. The acquisition was made in two sessions according a +semi-supervised protocol: + +1. **First session**: the users were asked to introduce their personal data +in the platform. This process was repeated six times. + +2. **Second session**: after at least 24 hours, the users were asked to +introduce once again their personal data in the platform. The process was +repeated six times. In addition, in this second session, each user acted as an +impostor trying to spoof the system with the personal data of another user. +The personal data of three other users was showed to each of the impostor and +they introduced them four times for a total number of impostor access of +twelve per user. + +The information provided by the users includes sensitive data and therefore, +it has been post-processed to remove all the personal information (the +characters pressed) and to maintain the privacy of the users enrolled in the +database. The keystroke dynamic patterns were recorded using a key-logger +(programmed in Java). The key-logger detects two different types of events: +press and release. The timestamps for each of the detected events were +recorded in milliseconds. + +For further information on the database we refer the reader to (the following +article is publicly available in the publications section of the +`ATVS group webpage <http://atvs.ii.uam.es/listpublications.do>`_ .) + +.. [BTAS15] A. Morales, M. Falanga, J. Fierrez, C. Sansone and J. Ortega-Garcia, ''Keystroke Dynamics Recognition based on Personal Data: A Comparative Experimental Evaluation Implementing Reproducible Research'', in Proc. of the IEEE Seventh International Conference on Biometrics: Theory, Applications and Systems, Arlington, Virginia, USA, September 2015. + +Please remember to reference [BTAS15]_ on any work made public, whatever the +form, based directly or indirectly on any part of the ATVS-Keystroke DB. diff --git a/advanced/databases/avspoof/4.json b/advanced/databases/avspoof/4.json new file mode 100644 index 0000000..8164081 --- /dev/null +++ b/advanced/databases/avspoof/4.json @@ -0,0 +1,950 @@ +{ + "description": "The AVspoof Database", + "root_folder": "/idiap/resource/database/AVSpoof", + "protocols": [ + { + "name": "smalltest_verify_train", + "template": "verify_trainset_speech", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "protocol": "smalltest", + "purpose": "enroll" + }, + "name": "train_templates", + "template": "templates", + "view": "RecognitionTemplates" + }, + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "train", + "protocol": "smalltest" + }, + "name": "train_probes", + "template": "probes", + "view": "Probes" + } + ] + }, + { + "name": "smalltest_verify_train_spoof", + "template": "verify_trainset_speech_spoof", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "train", + "protocol": "smalltest" + }, + "name": "train_attacks", + "template": "attacks", + "view": "Attacks" + } + ] + }, + { + "name": "smalltest_verification", + "template": "advanced_speaker_recognition", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "protocol": "smalltest" + }, + "name": "train", + "template": "train", + "view": "RecognitionTraining" + }, + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "devel", + "protocol": "smalltest", + "purpose": "enroll" + }, + "name": "dev_templates", + "template": "templates", + "view": "RecognitionTemplates" + }, + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "devel", + "protocol": "smalltest" + }, + "name": "dev_probes", + "template": "probes", + "view": "Probes" + }, + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "test", + "protocol": "smalltest", + "purpose": "enroll" + }, + "name": "test_templates", + "template": "templates", + "view": "RecognitionTemplates" + }, + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "test", + "protocol": "smalltest" + }, + "name": "test_probes", + "template": "probes", + "view": "Probes" + } + ] + }, + { + "name": "smalltest_verification_spoof", + "template": "speaker_recognition_spoof", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "devel", + "protocol": "smalltest" + }, + "name": "dev_attacks", + "template": "attacks", + "view": "Attacks" + }, + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "test", + "protocol": "smalltest" + }, + "name": "test_attacks", + "template": "attacks", + "view": "Attacks" + } + ] + }, + { + "name": "grandtest_verify_train", + "template": "verify_trainset_speech", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "protocol": "grandtest", + "purpose": "enroll" + }, + "name": "train_templates", + "template": "templates", + "view": "RecognitionTemplates" + }, + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "train", + "protocol": "grandtest" + }, + "name": "train_probes", + "template": "probes", + "view": "Probes" + } + ] + }, + { + "name": "grandtest_verify_train_spoof", + "template": "verify_trainset_speech_spoof", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "train", + "protocol": "grandtest" + }, + "name": "train_attacks", + "template": "attacks", + "view": "Attacks" + } + ] + }, + { + "name": "grandtest_verification", + "template": "advanced_speaker_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "RecognitionTraining", + "parameters": { + "protocol": "grandtest", + "group": "train" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "RecognitionTemplates", + "parameters": { + "protocol": "grandtest", + "purpose": "enroll", + "group": "devel" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "grandtest", + "group": "devel" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "RecognitionTemplates", + "parameters": { + "protocol": "grandtest", + "purpose": "enroll", + "group": "test" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "grandtest", + "group": "test" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + } + ] + }, + { + "name": "grandtest_verification_spoof", + "template": "speaker_recognition_spoof", + "sets": [ + { + "name": "dev_attacks", + "template": "attacks", + "view": "Attacks", + "parameters": { + "protocol": "grandtest", + "group": "devel" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_attacks", + "template": "attacks", + "view": "Attacks", + "parameters": { + "protocol": "grandtest", + "group": "test" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + } + ] + }, + { + "name": "physicalaccess_verify_train", + "template": "verify_trainset_speech", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "protocol": "physical_access", + "purpose": "enroll" + }, + "name": "train_templates", + "template": "templates", + "view": "RecognitionTemplates" + }, + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "train", + "protocol": "physical_access" + }, + "name": "train_probes", + "template": "probes", + "view": "Probes" + } + ] + }, + { + "name": "physicalaccess_verify_train_spoof", + "template": "verify_trainset_speech_spoof", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "train", + "protocol": "physical_access" + }, + "name": "train_attacks", + "template": "attacks", + "view": "Attacks" + } + ] + }, + { + "name": "physicalaccess_verification", + "template": "advanced_speaker_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "RecognitionTraining", + "parameters": { + "protocol": "physical_access", + "group": "train" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "RecognitionTemplates", + "parameters": { + "protocol": "physical_access", + "purpose": "enroll", + "group": "devel" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "physical_access", + "group": "devel" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "RecognitionTemplates", + "parameters": { + "protocol": "physical_access", + "purpose": "enroll", + "group": "test" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "physical_access", + "group": "test" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + } + ] + }, + { + "name": "physicalaccess_verification_spoof", + "template": "speaker_recognition_spoof", + "sets": [ + { + "name": "dev_attacks", + "template": "attacks", + "view": "Attacks", + "parameters": { + "protocol": "physical_access", + "group": "devel" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_attacks", + "template": "attacks", + "view": "Attacks", + "parameters": { + "protocol": "physical_access", + "group": "test" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + } + ] + }, + { + "name": "logicalaccess_verify_train", + "template": "verify_trainset_speech", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "protocol": "logical_access", + "purpose": "enroll" + }, + "name": "train_templates", + "template": "templates", + "view": "RecognitionTemplates" + }, + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "train", + "protocol": "logical_access" + }, + "name": "train_probes", + "template": "probes", + "view": "Probes" + } + ] + }, + { + "name": "logicalaccess_verify_train_spoof", + "template": "verify_trainset_speech_spoof", + "sets": [ + { + "outputs": { + "speech": "{{ system_user.username }}/array_1d_floats/1", + "client_id": "{{ system_user.username }}/text/1", + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "parameters": { + "group": "train", + "protocol": "logical_access" + }, + "name": "train_attacks", + "template": "attacks", + "view": "Attacks" + } + ] + }, + { + "name": "logicalaccess_verification", + "template": "advanced_speaker_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "RecognitionTraining", + "parameters": { + "protocol": "logical_access", + "group": "train" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "RecognitionTemplates", + "parameters": { + "protocol": "logical_access", + "purpose": "enroll", + "group": "devel" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "logical_access", + "group": "devel" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "RecognitionTemplates", + "parameters": { + "protocol": "logical_access", + "purpose": "enroll", + "group": "test" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "logical_access", + "group": "test" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + } + ] + }, + { + "name": "logicalaccess_verification_spoof", + "template": "speaker_recognition_spoof", + "sets": [ + { + "name": "dev_attacks", + "template": "attacks", + "view": "Attacks", + "parameters": { + "protocol": "logical_access", + "group": "devel" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_attacks", + "template": "attacks", + "view": "Attacks", + "parameters": { + "protocol": "logical_access", + "group": "test" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "attack_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + } + ] + }, + { + "name": "smalltest_antispoofing", + "template": "simple_speech_antispoofing", + "sets": [ + { + "outputs": { + "attack_type": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "protocol": "smalltest" + }, + "name": "train", + "template": "train", + "view": "SimpleAntispoofing" + }, + { + "outputs": { + "attack_type": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "devel", + "protocol": "smalltest" + }, + "name": "dev_probes", + "template": "probes", + "view": "SimpleAntispoofing" + }, + { + "outputs": { + "attack_type": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "test", + "protocol": "smalltest" + }, + "name": "eval_probes", + "template": "probes", + "view": "SimpleAntispoofing" + } + ] + }, + { + "name": "grandtest_antispoofing", + "template": "simple_speech_antispoofing", + "sets": [ + { + "name": "train", + "template": "train", + "view": "SimpleAntispoofing", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "attack_type": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "protocol": "grandtest", + "group": "train" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "SimpleAntispoofing", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "attack_type": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "protocol": "grandtest", + "group": "devel" + } + }, + { + "name": "eval_probes", + "template": "probes", + "view": "SimpleAntispoofing", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "attack_type": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "protocol": "grandtest", + "group": "test" + } + } + ] + }, + { + "name": "physicalaccess_antispoofing", + "template": "simple_speech_antispoofing", + "sets": [ + { + "name": "train", + "template": "train", + "view": "SimpleAntispoofing", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "attack_type": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "protocol": "physical_access", + "group": "train" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "SimpleAntispoofing", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "attack_type": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "protocol": "physical_access", + "group": "devel" + } + }, + { + "name": "eval_probes", + "template": "probes", + "view": "SimpleAntispoofing", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "attack_type": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "protocol": "physical_access", + "group": "test" + } + } + ] + }, + { + "name": "logicalaccess_antispoofing", + "template": "simple_speech_antispoofing", + "sets": [ + { + "name": "train", + "template": "train", + "view": "SimpleAntispoofing", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "attack_type": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "protocol": "logical_access", + "group": "train" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "SimpleAntispoofing", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "attack_type": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "protocol": "logical_access", + "group": "devel" + } + }, + { + "name": "eval_probes", + "template": "probes", + "view": "SimpleAntispoofing", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1", + "attack_type": "{{ system_user.username }}/text/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "protocol": "logical_access", + "group": "test" + } + } + ] + } + ] +} \ No newline at end of file diff --git a/advanced/databases/avspoof/4.py b/advanced/databases/avspoof/4.py new file mode 100644 index 0000000..1b5a45f --- /dev/null +++ b/advanced/databases/avspoof/4.py @@ -0,0 +1,534 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.db.avspoof +import scipy.io.wavfile + +from bob.db.avspoof.driver import Interface + +INFO = Interface() +SQLITE_FILE = INFO.files()[0] + + +#---------------------------------------------------------- + + +class RecognitionTraining(View): + """Outputs: + - speech: "{{ system_user.username }}/array_1d_floats/1" + - file_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "speech". + Several "speech" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | speech | | speech | | speech | | speech | | speech | | speech | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'speech']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.avspoof.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls=('enroll', 'probe')), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.wav')) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'file_id': + return { + 'text': str(obj.file_id) + } + + elif output == 'speech': + rate, audio = scipy.io.wavfile.read(obj.speech) + + return { + 'value': np.cast['float'](audio) + } + + +#---------------------------------------------------------- + + +class RecognitionTemplates(View): + """Outputs: + - speech: "{{ system_user.username }}/array_1d_floats/1" + - file_id: "{{ system_user.username }}/text/1" + - template_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "speech". + Several "speech" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | speech | | speech | | speech | | speech | | speech | | speech | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'speech']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.avspoof.Database() + + if parameters['protocol'] == "smalltest": + if parameters['group'] == "train": + template_ids = sorted([1, 3]) + if parameters['group'] == "devel": + template_ids = sorted([15, 20]) + if parameters['group'] == "test": + template_ids = sorted([18, 33]) + else: + template_ids = [ client.id for client in db.clients(groups=parameters['group']) ] + + entries = [] + + for template_id in template_ids: + objs = db.objects(groups=parameters['group'], + protocol=parameters['protocol'], + cls=parameters['purpose'], + clients=(template_id,)) + + entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.wav')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'template_id': + return { + 'text': str(obj.template_id) + } + + elif output == 'file_id': + return { + 'text': str(obj.file_id) + } + + elif output == 'speech': + rate, audio = scipy.io.wavfile.read(obj.speech) + + return { + 'value': np.cast['float'](audio) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - speech: "{{ system_user.username }}/array_1d_floats/1" + - file_id: "{{ system_user.username }}/text/1" + - probe_id: "{{ system_user.username }}/text/1", + - client_id: "{{ system_user.username }}/text/1" + - template_ids: "{{ system_user.username }}/array_1d_text/1", + + One "file_id" is associated with a given "speech". + One "probe_id" is associated with a given "speech". + Several "speech" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + Each probe must be matched against a number of templates defined by a list of + client identifiers. + + --------------- --------------- --------------- --------------- --------------- --------------- + | speech | | speech | | speech | | speech | | speech | | speech | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'speech']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.avspoof.Database() + + if parameters['protocol'] == "smalltest": + if parameters['group'] == "train": + template_ids = sorted([1, 3]) + if parameters['group'] == "devel": + template_ids = sorted([15, 20]) + if parameters['group'] == "test": + template_ids = sorted([18, 33]) + else: + template_ids = sorted([ client.id for client in db.clients(groups=parameters['group']) ]) + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='probe'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(template_ids, x.client_id, x.id, x.id, x.make_path(root_folder, '.wav')) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'text': [ str(x) for x in obj.template_ids ] + } + + elif output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'probe_id': + return { + 'text': str(obj.probe_id) + } + + elif output == 'file_id': + return { + 'text': str(obj.file_id) + } + + elif output == 'speech': + rate, audio = scipy.io.wavfile.read(obj.speech) + + return { + 'value': np.cast['float'](audio) + } + + +#---------------------------------------------------------- + + +class Attacks(View): + """Outputs: + - speech: "{{ system_user.username }}/array_1d_floats/1" + - file_id: "{{ system_user.username }}/text/1" + - attack_id: "{{ system_user.username }}/text/1", + - client_id: "{{ system_user.username }}/text/1" + - template_ids: "{{ system_user.username }}/array_1d_text/1", + + One "file_id" is associated with a given "speech". + One "probe_id" is associated with a given "speech". + Several "speech" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + Each probe must be matched against a number of templates defined by a list of + client identifiers. + + --------------- --------------- --------------- --------------- --------------- --------------- + | speech | | speech | | speech | | speech | | speech | | speech | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | attack_id | | attack_id | | attack_id | | attack_id | | attack_id | | attack_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'attack_id', 'file_id', 'speech']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.avspoof.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='attack'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry([ str(x.client_id) ], x.client_id, x.id, x.id, + x.make_path(root_folder, '.wav')) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'text': obj.template_ids + } + + elif output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'attack_id': + return { + 'text': str(obj.attack_id) + } + + elif output == 'file_id': + return { + 'text': str(obj.file_id) + } + + elif output == 'speech': + rate, audio = scipy.io.wavfile.read(obj.speech) + + return { + 'value': np.cast['float'](audio) + } + + +#---------------------------------------------------------- + + +class SimpleAntispoofing(View): + """Outputs: + - speech: "{{ system_user.username }}/array_1d_floats/1" + - file_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + - attack_type: "{{ system_user.username }}/text/1" + - class: "{{ system_user.username }}/text/1" + + + One "file_id" is associated with a given "speech". + Several "speech" are associated with a given "client_id". + Several "client_id" are associated with a given "template_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | speech | | speech | | speech | | speech | | speech | | speech | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ------------------------------- ------------------------------- ------------------------------- + | attack_type | | attack_type | | attack_type | + ------------------------------- ------------------------------- ------------------------------- + --------------------------------------------------------------- ------------------------------- + | client_id | | client_id + --------------------------------------------------------------- ------------------------------- + ----------------------------------------------------------------------------------------------- + | class | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "client_id" + per "template_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['cls', 'client_id', 'attack_type', 'file_id', 'speech']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.avspoof.Database() + + objs_real = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='real'), + key=lambda x: (x.client_id, x.id)) + + objs_attack = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='attack'), + key=lambda x: (x.client_id, x.get_attack(), x.id)) + + return [ Entry('real', x.client_id, 'human', x.id, x.make_path(root_folder, '.wav')) for x in objs_real ] + \ + [ Entry('attack', x.client_id, x.get_attack(), x.id, x.make_path(root_folder, '.wav')) for x in objs_attack ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'class': + return { + 'text': obj.cls + } + + elif output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'attack_type': + return { + 'text': obj.attack_type + } + + elif output == 'file_id': + return { + 'text': str(obj.file_id) + } + + elif output == 'speech': + rate, audio = scipy.io.wavfile.read(obj.speech) + + return { + 'value': np.cast['float'](audio) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock read function for the audio files + def mock_read(filename): + return 44100, np.ndarray((128,)) + + scipy.io.wavfile.read = mock_read + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = RecognitionTraining() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol = 'smalltest', + group = 'train', + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('speech', 0) + + + view = RecognitionTemplates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol = 'smalltest', + group = 'train', + purpose = 'enroll', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('speech', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol = 'smalltest', + group = 'train', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('speech', 0) + + + view = Attacks() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol = 'smalltest', + group = 'train', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('attack_id', 0) + view.get('file_id', 0) + view.get('speech', 0) + + + view = SimpleAntispoofing() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol = 'smalltest', + group = 'train', + ) + ) + view.get('class', 0) + view.get('client_id', 0) + view.get('attack_type', 0) + view.get('file_id', 0) + view.get('speech', 0) diff --git a/advanced/databases/avspoof/4.rst b/advanced/databases/avspoof/4.rst new file mode 100644 index 0000000..5ef2edf --- /dev/null +++ b/advanced/databases/avspoof/4.rst @@ -0,0 +1,193 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The AVspoof Database +-------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 30/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 24/Mar/2016: + + - Added ``_verify_train`` protocols that allow using train set in the + verification evaluations. + +* **Version 1**, 09/Feb/2016: + + - Initial release + + +Description +=========== + +The `AVspoof Database <https://www.idiap.ch/dataset/avspoof>`_ provides +non-biased spoofing attacks in order for researchers to test both their +ASV systems and anti-spoofing algorithms. The attacks are created based on +newly acquired audio recordings. The data acquisition process lasted approximately +two months with 44 persons, each participating in several sessions configured in +different environmental conditions and setups. After the collection of the data, +the attacks, more precisely, replay, voice conversion and speech synthesis attacks +were generated. This Database was produced at the Idiap Research Institute, in Switzerland. + + +Acknowledgements +================ + +If you use this database, please cite the following publication on your paper:: + + @INPROCEEDINGS{KucurErgunay_IEEEBTAS_2015, + author = {Kucur Ergunay, Serife and Khoury, Elie and Lazaridis, Alexandros and Marcel, S{\'{e}}bastien}, + projects = {Idiap, SNSF-LOBI, BEAT}, + month = sep, + title = {On the Vulnerability of Speaker Verification to Realistic Voice Spoofing}, + booktitle = {IEEE International Conference on Biometrics: Theory, Applications and Systems}, + year = {2015}, + pdf = {http://publications.idiap.ch/downloads/papers/2015/KucurErgunay_IEEEBTAS_2015.pdf} + } + + +Database Description +==================== + +The data acquisition process is divided into four different sessions, each scheduled several +days apart in different setups and environmental conditions (e.g. different in terms of +background noise, reverberation, etc.) for each of 31 male and 13 female participants. +The first session which is supposed to be used as training set while creating the attacks, +was performed in the most controlled conditions. Besides, the conditions for the last +three sessions dedicated to test trials were more relaxed in order to grasp the challenging +scenarios. The audio data were recorded by three different devices including (a) one +good-quality microphone, AT2020USB+, and two mobiles, (b) Samsung Galaxy S4 (phone1) +and (c) iPhone 3GS (phone2). The positioning of the devices was stabilized for each +session and each participant in order to standardize the recording settings. + +For each session, the participant was subjected to three different data acquisition protocols as in the following: +* **Reading part (read)**: 10/40 pre-defined sentences are read by the participant. +* **Pass-phrases part (pass)**: 5 short prompts are read by the participant. +* **Free speech part (free)**: The participant speaks freely about any topic for 3 to 10 minutes. + +The number, the length, as well as the content of the sentences for the reading and +pass-phrases part are carefully selected in order to satisfy the constraints in terms +of readability, data acquisition and attack quality. Similarly, the minimum duration of +the free speech part is also determined according to our preliminary investigations +mostly on the voice conversion attacks for which the free speech data would be included +in the training set. + +Spoofing Attacks +================ + +In the spoofing attack creation phase, we considered creating spoofing trials for the +text-dependent utterances of the testing data, i.e. reading parts of sessions 2-4 +and the pass-phrases of all four sessions. As a preliminary step before the creation +of the attacks, the speech data originally recorded at 44.1 KHz sampling rate is +down-sampled to 16 KHz. + +There are four main spoofing attacks for ASV systems: Impersonation, replay, +speech synthesis and voice conversion. As the impersonation is known not to be a +serious threat for ASV systems, we did not include it in our database. For the +remaining three spoofing types, we designed ten different scenarios (see table below). +We gave special attention to physical access attacks. These attacks are more realistic +than logic access attacks considering the fact that the attacker often has no direct +access to the system. The acquisition devices (sensors) are open to anyone, therefore +more subjected to such attacks. + + ++-----------------------+--------------+-------------+--------------+-------------+ +| | Num. of trials per speaker | Total num. of trials | ++ Attacks +--------------+-------------+--------------+-------------+ +| | Male | Female | Male | Female | ++-----------------------+--------------+-------------+--------------+-------------+ +|Replay-phone1 | 50 | 50 | 1550 | 650 | ++-----------------------+--------------+-------------+--------------+-------------+ +|Replay-phone2 | 50 | 50 | 1550 | 650 | ++-----------------------+--------------+-------------+--------------+-------------+ +|Replay-laptop | 50 | 50 | 1550 | 650 | ++-----------------------+--------------+-------------+--------------+-------------+ +|Replay-laptop-HQ | 50 | 50 | 1550 | 650 | ++-----------------------+--------------+-------------+--------------+-------------+ +|Speech-Synthesis-LA | 35 | 35 | 1085 | 455 | ++-----------------------+--------------+-------------+--------------+-------------+ +|Speech-Synthesis-PA | 35 | 35 | 1085 | 455 | ++-----------------------+--------------+-------------+--------------+-------------+ +|Speech-Synthesis-PA-HQ | 35 | 35 | 1085 | 455 | ++-----------------------+--------------+-------------+--------------+-------------+ +|Voice-Conversion-LA | 1500 | 600 | 46500 | 7800 | ++-----------------------+--------------+-------------+--------------+-------------+ +|Voice-Conversion-PA | 1500 | 600 | 46500 | 7800 | ++-----------------------+--------------+-------------+--------------+-------------+ +|Voice-Conversion-PA-HQ | 1500 | 600 | 46500 | 7800 | ++-----------------------+--------------+-------------+--------------+-------------+ + + +Replay Attacks +-------------- + +A replay attack consists of replaying a pre-recorded speech to an ASV system. +We assume that the ASV system has a good quality microphone and the replay attack targets this sensor: + +* **Replay-phone1**: Replay attack using the data captured by the Samsung mobile. The speech recorded by this mobile is replayed using its own speakers and re-recorded by the microphone of the ASV system. +* **Replay-phone2**: Replay attack using the data captured by the iPhone mobile. The speech recorded by this mobile is replayed using its own speakers and re-recorded by the microphone of the ASV system. +* **Replay-laptop**: Replay attack using the data captured by the microphone of the ASV system. The speech recorded by this microphone is replayed using the laptop speakers and re-recorded again by the microphone of the system. +* **Replay-laptop-HQ**: Replay attack using the data captured by the microphone of the ASV system. The speech recorded by this microphone is replayed using external high-quality loudspeakers and re-recorded using the microphone of the ASV system. + +Speech Synthesis Attacks +------------------------ + +The speech synthesis attacks were based on statistical parametric speech synthesis (SPSS). +More specific, hidden Markov model (HMM)-based speech synthesis technique was used. + +* **Speech-Synthesis-LA**: Speech synthesis via logical access. The synthesized speech is directly presented to the ASV system without being re-recorded. +* **Speech-Synthesis-PA**: Speech synthesis via physical access. The synthesized speech is replayed using the laptop speakers and re-recorded by the microphone of the ASV system. +* **Speech-Synthesis-PA-HQ**: Speech synthesis via high-quality physical access. The synthesized speech is replayed using external high-quality loudspeakers and re-recorded by the microphone of the ASV system. + +Voice Conversion Attacks +------------------------ + +The voice conversion attacks were created using Festvox. A conversion function +for each pair of source-target speaker is found based on the learned GMM model/parameters +by using the source and target speakers training data. We did not consider cross-gender +voice conversion attacks, that is only male-to-male and female-to-female conversions +were taken into account. As in the case of speech synthesis, three possible scenarios are involved: + +* **Voice-Conversion-LA**: Voice conversion via logical access. The converted speech is directly presented to the system without being re-recorded. +* **Voice-Conversion-PA**: Voice conversion via physical access. The converted speech is replayed using the speakers of the laptop and re-recorded by the microphone of the ASV system. +* **Voice-Conversion-PA-HQ**: Voice conversion via high-quality physical access. The converted speech is replayed using external high-quality loudspeakers and re-recorded by the microphone of the ASV system. + +Specificities to the BEAT View +============================== + +Spoofing and genuine samples are each labelled with a text field that defines +the class of the sample: ``"attack"`` or ``"real"`` for simple anti-spoofing +binary classification systems. Code using this +database views may use the ``class`` field to differentiate samples. + +The view supports the following protocols, which are also available in the database: +``smalltest`` (for prove of concept experiments only, as a subset of only three clients is +provided for each set), ``grandtest`` (data of the whole database is provided), ``physical_access`` +(only replay/presentation attack are provided), and ``logical_access`` (only logical access +attacks are provided with no replay attacks). diff --git a/advanced/databases/banca/4.json b/advanced/databases/banca/4.json new file mode 100644 index 0000000..a39a276 --- /dev/null +++ b/advanced/databases/banca/4.json @@ -0,0 +1,608 @@ +{ + "description": "The BANCA Database of Faces", + "root_folder": "/idiap/group/biometric/databases/banca/english/images/images", + "protocols": [ + { + "name": "P", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "P" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "P", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "P", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "P", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "P", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "G", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "G" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "G", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "G", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "G", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "G", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "Mc", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "Mc" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Mc", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Mc", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Mc", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Mc", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "Md", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "Md" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Md", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Md", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Md", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Md", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "Ma", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "Ma" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Ma", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Ma", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Ma", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Ma", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "Ud", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "Ud" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Ud", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Ud", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Ud", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Ud", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "Ua", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "Ua" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Ua", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Ua", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Ua", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Ua", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/banca/4.py b/advanced/databases/banca/4.py new file mode 100644 index 0000000..c90de39 --- /dev/null +++ b/advanced/databases/banca/4.py @@ -0,0 +1,382 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.db.banca + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.banca.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, db.annotations(x), x.make_path(root_folder, '.ppm')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.banca.Database() + + template_ids = db.model_ids(protocol=parameters['protocol'], + groups=parameters['group']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='enroll', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, db.annotations(x), + x.make_path(root_folder, '.ppm')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.banca.Database() + + template_ids = sorted(db.model_ids(protocol=parameters['protocol'], + groups=parameters['group'])) + + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + + entries.append( Entry(templates, obj.client_id, obj.id, obj.id, + db.annotations(obj), obj.make_path(root_folder, '.ppm')) ) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.probe_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': np.uint64(obj.template_ids) + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='P' + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='P', + group='dev', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='P', + group='dev', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) diff --git a/advanced/databases/banca/4.rst b/advanced/databases/banca/4.rst new file mode 100644 index 0000000..fe4593a --- /dev/null +++ b/advanced/databases/banca/4.rst @@ -0,0 +1,70 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The BANCA Database of Faces +--------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 30/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 01/Apr/2014: + + - Initial release + + +Description +=========== + +The `BANCA database <http://www.ee.surrey.ac.uk/CVSSP/banca/>`_ is a large, +realistic and challenging multi-modal database intended for training and +testing multi-modal verification systems. The BANCA database was captured +in four European languages in two modalities (face and voice). For recording, +both high and low quality microphones and cameras were used. The subjects +were recorded in three different scenarios, controlled, degraded and adverse +over 12 different sessions spanning three months. In total 208 people were +captured, half men and half women. + +.. image:: http://www.ee.surrey.ac.uk/CVSSP/banca/samples/1021_1.jpg + :width: 200 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/banca/samples/1029_1.jpg + :width: 200 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/banca/samples/1015_1.jpg + :width: 200 px + +This deployment of the BANCA database only consists of the images of the +English recordings. + +Associated with the database is the `BANCA protocol <http://www.ee.surrey.ac.uk/CVSSP/banca/documentation/messer-avbpa03.pdf>`_. +The protocol defines which sets of data to use for training, evaluation and +testing. Performing experiments according to the protocol allows institutions +to easily compare their results to others. diff --git a/advanced/databases/biosecurid_face/3.json b/advanced/databases/biosecurid_face/3.json new file mode 100644 index 0000000..f6e8c8c --- /dev/null +++ b/advanced/databases/biosecurid_face/3.json @@ -0,0 +1,87 @@ +{ + "description": "The biosecurid Database of Faces", + "root_folder": "/idiap/resource/database/BiosecurID_Definitiva", + "protocols": [ + { + "name": "A", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "A" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "A", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "A", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "A", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "A", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/biosecurid_face/3.py b/advanced/databases/biosecurid_face/3.py new file mode 100644 index 0000000..9cba7ea --- /dev/null +++ b/advanced/databases/biosecurid_face/3.py @@ -0,0 +1,324 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob +import bob.io.base +import bob.io.image +import bob.db.biosecurid.face + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.biosecurid.face.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.bmp')) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.biosecurid.face.Database() + + template_ids = db.model_ids(protocol=parameters['protocol'], + groups=parameters['group']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='enrol', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.bmp')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.biosecurid.face.Database() + + template_ids = sorted(db.model_ids(protocol=parameters['protocol'], + groups=parameters['group'])) + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + entries.append( Entry(templates, obj.client_id, obj.id, obj.id, + obj.make_path(root_folder, '.bmp')) ) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': obj.template_ids + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='A' + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('image', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='A', + group='dev', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('image', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='A', + group='dev', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('image', 0) diff --git a/advanced/databases/biosecurid_face/3.rst b/advanced/databases/biosecurid_face/3.rst new file mode 100644 index 0000000..bcfa016 --- /dev/null +++ b/advanced/databases/biosecurid_face/3.rst @@ -0,0 +1,41 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The BiosecurID Database of Faces +-------------------------------- + +No documentation provided by author. + + +Changelog +========= + +* **Version 3**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 2**, 30/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 1**, 26/Jan/2016: + + - initial release diff --git a/advanced/databases/biowave/3.json b/advanced/databases/biowave/3.json new file mode 100644 index 0000000..63a4586 --- /dev/null +++ b/advanced/databases/biowave/3.json @@ -0,0 +1,1579 @@ +{ + "root_folder": "/idiap/project/biowave/biowave_v1/data", + "description": "BIOWAVE Vein Database", + "protocols": [ + { + "name": "Idiap_1_1_R_BEAT_test", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_5_5_R_BEAT_test", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_5_R_BEAT_test", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_1_5_R_BEAT_test", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_5_R_BEAT_test", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_5_R_BEAT_test", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_5_R_BEAT_test", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_5_R_BEAT_test", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_1_R", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_5_R", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_1_5_R", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_5_R", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_5_R", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_5_R", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_5_R", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_3_5_R", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_3_5_R", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_3_5_R", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_3_5_R", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_3_5_R", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_3_5_R", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_5_5_R", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_1_L", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_5_L", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_1_5_L", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_5_L", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_5_L", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_5_L", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_5_L", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_5_5_L", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_1_R_less", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_1_1_R_less", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_1_R_less", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_1_R_less", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_1_R_less", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_1_R_less", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_5_R_less", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_1_5_R_less", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_5_R_less", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_5_R_less", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_5_R_less", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_5_R_less", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_5_5_R_less", + "template": "advanced_vein_recognition_2", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "Idiap_5_5_R_less", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_5_5_R_less", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_5_5_R_less", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_5_5_R_less", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_5_5_R_less", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_1_R_annotations_BEAT_test", + "template": "advanced_vein_annotations", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "template": "train", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_1_R_BEAT_test", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_2_1_R_annotations_BEAT_test", + "template": "advanced_vein_annotations", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "template": "train", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_5_5_R_BEAT_test", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_1_R_annotations", + "template": "advanced_vein_annotations", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "template": "train", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_1_R", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_2_1_R_annotations", + "template": "advanced_vein_annotations", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "template": "train", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_5_5_R", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_1_1_L_annotations", + "template": "advanced_vein_annotations", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "template": "train", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_1_1_L", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "Idiap_2_1_L_annotations", + "template": "advanced_vein_annotations", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "template": "train", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "world" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "dev_templates", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "ViewAnnotations", + "name": "dev_probes", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_templates", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateViewAnnotations", + "name": "eval_probes", + "parameters": { + "protocol": "Idiap_5_5_L", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "annotation_benchmark", + "template": "advanced_annotation_benchmark", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1", + "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1" + }, + "view": "AnnotationBenchmark", + "template": "annotation_benchmark", + "parameters": { + "protocol": "Idiap_1_1_R" + }, + "name": "annotations" + } + ] + } + ] +} diff --git a/advanced/databases/biowave/3.py b/advanced/databases/biowave/3.py new file mode 100644 index 0000000..87a638d --- /dev/null +++ b/advanced/databases/biowave/3.py @@ -0,0 +1,499 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import os +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View as BaseView + +import bob.io.base +import bob.io.image +import bob.db.biowave_v1 + +from PIL import Image, ImageDraw, ImageFilter + + +#---------------------------------------------------------- + + +def construct_ROI_image(annotations): + """Adapted from bob.db.biowave_v1, because we want to separate it in two steps: + indexing and image construction""" + + if len(annotations) > 0: + return bob.db.biowave_v1.utils.ManualRoiCut(annotations).roi_mask() + else: + return np.array([], np.uint8) + + +#---------------------------------------------------------- + + +def construct_vein_image(annotations, center=False): + """Adapted from bob.db.biowave_v1, because we want to separate it in two steps: + indexing and image construction""" + + if len(annotations) > 0: + im = Image.new('L', (480, 480), (0)) + draw = ImageDraw.Draw(im) + if center: + xes_all = [point[1] for line in annotations for point in line] + yes_all = [point[0] for line in annotations for point in line] + for line in annotations: + xes = [point[1] - np.round(np.mean(xes_all)) + 239 for point in line] + yes = [point[0] - np.round(np.mean(yes_all)) + 239 for point in line] + for point in range(len(line) - 1): + draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=255, width = 5) + else: + for line in annotations: + xes = [point[1] for point in line] + yes = [point[0] for point in line] + for point in range(len(line) - 1): + draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=255, width = 5) + im = im.filter(ImageFilter.MedianFilter(5)) + return np.array(np.array(im, dtype = bool), dtype = np.uint8) + else: + return np.array([], np.uint8) + + +#---------------------------------------------------------- + + +def construct_alignment_annotations(annotations): + return [ dict( + x = np.int32(annotation[1]), + y = np.int32(annotation[0]) + ) for annotation in annotations ] + + +#---------------------------------------------------------- + + +class View(BaseView): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - client_id: "{{ system_user.username }}/uint64/1" + + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.biowave_v1.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=[parameters['group']], + purposes=parameters.get('purpose', None), + annotated_images=False, + imagedir=root_folder), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.make_path(root_folder, '.png')) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class TemplateView(BaseView): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - model_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/uint64/1" + + Several "image" are associated with a given "model_id". + Several "model_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | model_id | | model_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "image" + per "model_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'model_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.biowave_v1.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + purposes=parameters['purpose'], + groups=[parameters['group']], + annotated_images=False, + imagedir=root_folder), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.model_id.encode('utf-8'), + x.make_path(root_folder, '.png')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'model_id': + return { + 'text': str(obj.model_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class ViewAnnotations(BaseView): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - vein_annotations: "{{ system_user.username }}/array_2d_uint8/1" + - ROI_annotations: "{{ system_user.username }}/array_2d_uint8/1" + - alignment_annotations: "{{ system_user.username }}/array_1d_coordinates/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "vein_annotations" are associated with a given "image". + One "ROI_annotations" are associated with a given "image". + One "alignment_annotations" are associated with a given "image". + Several "image" are associated with a given "client_id". + + ------------------------- ------------------------- ------------------------- ------------------------- + | image | | image | | image | | image | + ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + | vein_annotations | | vein_annotations | | vein_annotations | | vein_annotations | + ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + | ROI_annotations | | ROI_annotations | | ROI_annotations | | ROI_annotations | + ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + | alignment_annotations | | alignment_annotations | | alignment_annotations | | alignment_annotations | + ------------------------- ------------------------- ------------------------- ------------------------- + --------------------------------------------------- --------------------------------------------------- + | client_id | | client_id | + --------------------------------------------------- --------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'alignment_annotations', 'ROI_annotations', + 'vein_annotations', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.biowave_v1.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=[parameters['group']], + purposes=parameters.get('purpose', None), + annotated_images=True, + imagedir=root_folder), + key=lambda x: (x.client_id, x.id)) + + print objs + + return [ Entry(x.client_id, + x.alignment_annotations(directory=root_folder), + x.roi_annotations(directory=root_folder), + x.vein_annotations(directory=root_folder), + x.make_path(root_folder, '.png')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'alignment_annotations': + return { + 'value': construct_alignment_annotations(obj.alignment_annotations) + } + + elif output == 'ROI_annotations': + return { + 'value': construct_ROI_image(obj.ROI_annotations) + } + + elif output == 'vein_annotations': + return { + 'value': construct_vein_image(obj.vein_annotations) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class TemplateViewAnnotations(BaseView): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - vein_annotations: "{{ system_user.username }}/array_2d_uint8/1" + - ROI_annotations: "{{ system_user.username }}/array_2d_uint8/1" + - alignment_annotations: "{{ system_user.username }}/array_1d_coordinates/1" + - model_id: "{{ system_user.username }}/model_id/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "vein_annotations" are associated with a given "image". + One "ROI_annotations" are associated with a given "image". + One "alignment_annotations" are associated with a given "image". + Several "image" are associated with a given "model_id". + Several "model_id" are associated with a given "client_id". + + ------------------------- ------------------------- ------------------------- ------------------------- + | image | | image | | image | | image | + ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + | vein_annotations | | vein_annotations | | vein_annotations | | vein_annotations | + ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + | ROI_annotations | | ROI_annotations | | ROI_annotations | | ROI_annotations | + ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + | alignment_annotations | | alignment_annotations | | alignment_annotations | | alignment_annotations | + ------------------------- ------------------------- ------------------------- ------------------------- + --------------------------------------------------- --------------------------------------------------- + | model_id | | model_id | + --------------------------------------------------- --------------------------------------------------- + ------------------------------------------------------------------------------------------------------- + | client_id | + ------------------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "image" + per "model_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'model_id', 'alignment_annotations', + 'ROI_annotations', 'vein_annotations', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.biowave_v1.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=[parameters['group']], + purposes=parameters['purpose'], + annotated_images=True, + imagedir=root_folder), + key=lambda x: (x.client_id, x.model_id, x.id)) + + return [ Entry(x.client_id, + x.model_id.encode('utf-8'), + x.alignment_annotations(directory=root_folder), + x.roi_annotations(directory=root_folder), + x.vein_annotations(directory=root_folder), + x.make_path(root_folder, '.png')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'model_id': + return { + 'text': obj.model_id + } + + elif output == 'alignment_annotations': + return { + 'value': construct_alignment_annotations(obj.alignment_annotations) + } + + elif output == 'ROI_annotations': + return { + 'value': construct_ROI_image(obj.ROI_annotations) + } + + elif output == 'vein_annotations': + return { + 'value': construct_vein_image(obj.vein_annotations) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class AnnotationBenchmark(BaseView): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - vein_annotations: "{{ system_user.username }}/array_2d_uint8/1" + - ROI_annotations: "{{ system_user.username }}/array_2d_uint8/1" + - alignment_annotations: "{{ system_user.username }}/array_1d_coordinates/1" + + One "vein_annotations" are associated with a given "image". + One "ROI_annotations" are associated with a given "image". + One "alignment_annotations" are associated with a given "image". + + ------------------------- ------------------------- ------------------------- ------------------------- + | image | | image | | image | | image | + ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + | vein_annotations | | vein_annotations | | vein_annotations | | vein_annotations | + ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + | ROI_annotations | | ROI_annotations | | ROI_annotations | | ROI_annotations | + ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + | alignment_annotations | | alignment_annotations | | alignment_annotations | | alignment_annotations | + ------------------------- ------------------------- ------------------------- ------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['alignment_annotations', 'ROI_annotations', + 'vein_annotations', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.biowave_v1.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + annotated_images=True, + imagedir=root_folder), + key=lambda x: x.id) + + return [ Entry(x.alignment_annotations(directory=root_folder), + x.roi_annotations(directory=root_folder), + x.vein_annotations(directory=root_folder), + x.make_path(root_folder, '.png')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'alignment_annotations': + return { + 'value': construct_alignment_annotations(obj.alignment_annotations) + } + + elif output == 'ROI_annotations': + return { + 'value': construct_ROI_image(obj.ROI_annotations) + } + + elif output == 'vein_annotations': + return { + 'value': construct_vein_image(obj.vein_annotations) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock methods + def mock_load(root_folder): + return np.ndarray((10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = View() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol="Idiap_1_1_R_BEAT_test", + group="world", + ) + ) + view.get('client_id', 0) + view.get('image', 0) + + + view = TemplateView() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol="Idiap_1_1_R_BEAT_test", + group="dev", + purpose="enroll", + ) + ) + view.get('client_id', 0) + view.get('model_id', 0) + view.get('image', 0) diff --git a/advanced/databases/biowave/3.rst b/advanced/databases/biowave/3.rst new file mode 100644 index 0000000..f6a9154 --- /dev/null +++ b/advanced/databases/biowave/3.rst @@ -0,0 +1,212 @@ +.. Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + +BIOWAVE Vein Database +--------------------- + +Changelog +========= + +* **Version 3**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 2**, 17/Nov/2017: + + - Add protocol 'Idiap_3_5_R' + +* **Version 1**, 30/Oct/2017: + + - Initial release + + +Description +=========== + + +The ``BIOWAVE`` database contains wrist vein images that are acquired using +BIOWATCH biometric sensor. For each subject of the database there are 3 session +images (sessions were held at least 24 hours apart). Each session consists of 5 +attempts, in each attempt 5 images were acquired, meaning, that there are +``3 sessions x 5 attempts x 5 images = 75`` images per each person's hand, +``75 x 2 images`` per person. + +Images were previously manually evaluated, and if any of the 75 one hand's +images were unusable (too blurred, veins couldn't be seen, etc), than all hand +data were discarded. That is way some persons has only 1 hand's images in the +database. + +Statistics of the data - in total 111 hands: + +1) Users with both hands images - 53 +2) Users with only R hand images - 4 +3) Users with only L hand images - 1 + + + +Data division in ``groups`` and ``purposes`` +############################################ + + +In the BIOWAVE database there are following groups and purposes: + + ++-------------+-----------+-----------------------+------------------------+ +| **groups** | ``world`` | ``dev`` | ``eval`` | ++-------------+-----------+-----------------------+------------------------+ +|**purposes** | ``train`` |``enroll`` / ``probe`` | ``enroll`` / ``probe`` | ++-------------+-----------+-----------------------+------------------------+ + + +Data are divided into training, development and evolution data sets: + +1) ``dev`` data set -- ``20`` persons with both hand images; +2) ``eval`` data set -- ``20`` persons with both hand images; +3) ``world`` data set -- the rest of ``13`` persons with both hand images + ``4`` persons with only ``right`` hand and the ``1`` person with only ``left`` hand images. + +Data are divided as follows (the number means original persons ``ID`` (as in +data collection)): + +1) ``dev`` data - [1, 3, 5, 6, 12, 15, 22, 23, 25, 45, 69, 71, 74, 76, 77, 78, 84, 129, 132, 134] +2) ``eval`` data - [4, 7, 21, 42, 43, 48, 54, 57, 58, 72, 73, 75, 83, 90, 99, 130, 131, 138, 141, 143] +3) ``world`` data - [13, 14, 17, 20, 24, 28, 31, 39, 51, 60, 68, 128, 133] + [37] + [2, 11, 46, 70] + +In the ``enroll`` data sets are images only from session ``1`` whereas in +``probe`` data sets -- session's ``2`` and ``3`` data. + + +Protocols +######### + +There currently are 6 *standard* protocols: + +- ``Idiap_1_1_R`` +- ``Idiap_1_5_R`` +- ``Idiap_5_5_R`` +- ``Idiap_1_1_L`` +- ``Idiap_1_5_L`` +- ``Idiap_5_5_L`` + +The general form:: + + Idiap_<a>_<b>_<c> + +* a -- number of attempts used to construct model; +* b -- number of attempt images used to construct model; +* c -- hand (``L`` or ``R``). Always all images are used, the opposite hand's images are mirrored. + +E.g. in protocol ``Idiap_5_5_R`` per one hand only one enroll model is +constructed using all session ``1`` images (``5 attempt x 5 images``). In this +protocol ``right`` hands are included unchanged, but ``left`` hands are +mirrored. + +After data alignment were annotated, that included common point finding between +each person images, we noted that some of the person data were almost impossible +to annotate. Hands were: + +- Person_002/Right +- Person_005/Left +- Person_015/Right +- Person_046/Right +- Person_068/Right +- Person_068/Left +- Person_129/Right +- Person_138/Left + +For protocols: + +- ``Idiap_1_1_R_less`` +- ``Idiap_1_5_R_less`` +- ``Idiap_5_5_R_less`` + +also listed hand data were removed. + + +Protocol ``Idiap_3_5_R`` is not *standard* - in this protocol enroll model is +constructed using all 3 session 1st attempt images (thus each model consists of +``3x5=15`` images). This protocol is made to understand the enroll image +variabilities impact on the recognition rate. + +Finally, the protocols: + +- ``Idiap_1_1_R_BEAT_test`` +- ``Idiap_1_5_R_BEAT_test`` +- ``Idiap_5_5_R_BEAT_test`` + +are made as test protocols mainly for the BEAT platform. Again, the numbers in +the name indicates number of images per attempt and attempt count per each +enroll model. Each of the protocol groups consists of only 2 person data: + +- train -- [128, 133], +- dev -- [132, 134], +- eval -- [141, 143]. + +All these protocols uses the ``advanced_vein_recognition_2`` template. + +Annotations +########### + + +8% of database files have annotations (2 images per session, 6 images per hand, +altogether 666 images). There are: + +- vein pattern binary annotations; +- ROI binary annotations; +- alignment point annotations (for each hand 3-5 points marked). + + +Annotations to replace extractors +################################# + + +Special protocols that consists only the files that have annotations are made. +Meaning, these protocols can be used to run !!!!! TBD + + + + + +The organisation of the database is the same as before, only now, because there +are only 2 annotated images per session, protocols are renamed accordingly (also +note, that now protocols ``Idiap_1_5_R`` and ``Idiap_1_5_L`` doesn't make any +sense and are removed, because there are only 1 image per attempt that is +annotated): + +- ``Idiap_1_1_R`` -> renamed as ``Idiap_1_1_R_annotations``; +- ``Idiap_5_5_R`` -> renamed as ``Idiap_2_1_R_annotations``; +- ``Idiap_1_1_L`` -> renamed as ``Idiap_1_1_L_annotations``; +- ``Idiap_5_5_L`` -> renamed as ``Idiap_2_1_L_annotations``; + + +Also, the same as ``Idiap_<a>_<b>_<c>_BEAT_test`` protocols were created to +speedy test BEAT implementation and compare results with ``bob.bio.vein``, also +test protocols for annotations are implemented: + +- ``Idiap_1_1_R_annotations_BEAT_test``; +- ``Idiap_2_1_R_annotations_BEAT_test``; +- ``Idiap_1_1_L_annotations_BEAT_test``; +- ``Idiap_2_1_L_annotations_BEAT_test``; + +These protocols uses the ``advanced_vein_annotations`` template. + +Also, to benchmark different algorithms against **all** image annotations, there +are implemented protocol ``annotations_benchmark`` that is implemented using +``advanced_annotation_benchmark`` template. + diff --git a/advanced/databases/casme2/4.json b/advanced/databases/casme2/4.json new file mode 100644 index 0000000..c165d1a --- /dev/null +++ b/advanced/databases/casme2/4.json @@ -0,0 +1,1010 @@ +{ + "description": "CASME 2 Spotaneous Subtle Expression Database", + "root_folder": "/idiap/resource/database/CASME2", + "protocols": [ + { + "name": "fold_1", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + { + "name": "fold_2", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_2" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_2" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + { + "name": "fold_3", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_3" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_3" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + { + "name": "fold_4", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_5", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_5" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_5" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_6", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_6" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_6" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_7", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_7" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_7" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_8", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_8" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_8" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + { + "name": "fold_9", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_9" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_9" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + + { + "name": "fold_10", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_10" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_10" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_11", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_11" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_11" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_12", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_12" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_12" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_13", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_13" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_13" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_14", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_14" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_14" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_15", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_15" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_15" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + { + "name": "fold_16", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_16" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_16" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + { + "name": "fold_17", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_17" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_17" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_18", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_18" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_18" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_19", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_19" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_19" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + { + "name": "fold_20", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_20" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_20" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_21", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_21" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_21" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_22", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_22" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_22" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_23", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_23" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_23" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_24", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_24" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_24" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + { + "name": "fold_25", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_25" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_25" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + }, + + + + { + "name": "fold_26", + "template": "simple_expression_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train", + "protocol": "fold_26" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test", + "protocol": "fold_26" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_4d_uint8/1", + "emotion": "{{ system_user.username }}/text/1" + } + } + ] + } + + + ] +} diff --git a/advanced/databases/casme2/4.py b/advanced/databases/casme2/4.py new file mode 100644 index 0000000..8f2ef31 --- /dev/null +++ b/advanced/databases/casme2/4.py @@ -0,0 +1,148 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import os +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View as BaseView + +import bob.io.base +import bob.io.image +import bob.db.casme2 + + +#---------------------------------------------------------- + + +class View(BaseView): + """Outputs: + - image: "{{ system_user.username }}/array_4d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - emotion: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "emotion". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | emotion | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['emotion', 'client_id', 'file_id', 'frames']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.casme2.Database() + + objs = sorted(db.objects(protocol=str(parameters['protocol']), + groups=parameters['group']), + key=lambda x: (x.emotion, x.client_id, x.id)) + + entries = [] + + for obj in objs: + frames = [ str(os.path.join(obj.make_path(), x.filename)). + replace('/idiap/resource/database/CASME2/Cropped', root_folder) + for x in obj.frames ] + + entries.append(Entry(obj.emotion, obj.client_id, obj.id, frames)) + + return entries + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'emotion': + return { + 'value': obj.emotion + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + frame = bob.io.base.load(obj.frames[0]) + + data = np.zeros(shape=(len(obj.frames), frame.shape[0], frame.shape[1], frame.shape[2]), dtype="uint8") + data[0] = frame + + for i in range(1, len(obj.frames)): + data[i] = bob.io.base.load(obj.frames[i]) + + return { + 'value': data + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = View() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='fold_1', + group='train', + ) + ) + view.get('emotion', 0) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('image', 0) diff --git a/advanced/databases/casme2/4.rst b/advanced/databases/casme2/4.rst new file mode 100644 index 0000000..0fa5dc8 --- /dev/null +++ b/advanced/databases/casme2/4.rst @@ -0,0 +1,65 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +----------------------------------------------- + CASME 2 Spotaneous Subtle Expression Database +----------------------------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 30/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 26/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 13/May/2015: + + - Initial release + + +Description +=========== + +The CASME II database (http://fu.psych.ac.cn/CASME/casme2-en.php) has the +following characteristics: + +- The samples are spontaneous and dynamic micro-expressions. Baseline (usually + neutral) frames are kept before and after each micro-expression, making it + possible to evaluate different detection algorithms. +- The recordings have high temporal resolution (200 fps) and relatively higher + face resolution at 280x340 pixels. +- Micro-expression labeling is based on FACS investigator's guide and Yan et + al. findings (Yan et al., 2013) that is different from the traditional 6 + categories on ordinary facial expression. +- The recordings have proper illumination without lighting flickers and with + reduced highlight regions of the face. +- Some types of facial expressions are difficult to elicit in laboratory + situations, thus the samples in different categories distributed unequally, + e.g., there are 60 disgust samples but only 7 sadness samples. In CASME II, + we provide 5 classes of micro-expressions. diff --git a/advanced/databases/cbsr_nir_vis_2/4.json b/advanced/databases/cbsr_nir_vis_2/4.json new file mode 100644 index 0000000..4910f19 --- /dev/null +++ b/advanced/databases/cbsr_nir_vis_2/4.json @@ -0,0 +1,866 @@ +{ + "description": "CASIA NIR-VIS 2.0 Face Database", + "root_folder": "/idiap/resource/database/cbsr_nir_vis_2", + "protocols": [ + { + "name": "view2_1", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_1", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_1", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_1", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_1", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "view2_2", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_2" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_2", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_2", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_2", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_2", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "view2_3", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_3" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_3", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_3", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_3", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_3", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "view2_4", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_4", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_4", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_4", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_4", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "view2_5", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_5" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_5", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_5", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_5", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_5", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "view2_6", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_6" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_6", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_6", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_6", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_6", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "view2_7", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_7" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_7", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_7", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_7", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_7", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "view2_8", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_8" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_8", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_8", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_8", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_8", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "view2_9", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_9" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_9", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_9", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_9", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_9", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "view2_10", + "template": "advanced_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view2_10" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_10", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_10", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view2_10", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view2_10", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/cbsr_nir_vis_2/4.py b/advanced/databases/cbsr_nir_vis_2/4.py new file mode 100644 index 0000000..ef3c42a --- /dev/null +++ b/advanced/databases/cbsr_nir_vis_2/4.py @@ -0,0 +1,413 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import os +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.db.cbsr_nir_vis_2 + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.cbsr_nir_vis_2.Database( + annotation_directory=os.path.join(root_folder, 'annotations') + ) + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + + for obj in objs: + if os.path.exists(obj.make_path(root_folder, '.jpg')): + filename = obj.make_path(root_folder, '.jpg') + else: + filename = obj.make_path(root_folder, '.bmp') + + entries.append(Entry(obj.client_id, obj.id, db.annotations(obj), filename)) + + return entries + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': obj.client_id + } + + elif output == 'file_id': + return { + 'text': obj.file_id + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/text/1" + - template_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.cbsr_nir_vis_2.Database( + annotation_directory=os.path.join(root_folder, 'annotations') + ) + + template_ids = db.model_ids(protocol=parameters['protocol'], + groups=parameters['group']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='enroll', + model_ids=[template_id]) + + for obj in objs: + if os.path.exists(obj.make_path(root_folder, '.jpg')): + filename = obj.make_path(root_folder, '.jpg') + else: + filename = obj.make_path(root_folder, '.bmp') + + entries.append(Entry(obj.client_id, template_id, obj.id, + db.annotations(obj), filename)) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': obj.client_id + } + + elif output == 'template_id': + return { + 'text': obj.template_id + } + + elif output == 'file_id': + return { + 'text': obj.file_id + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/text/1" + - probe_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + - template_ids: "{{ system_user.username }}/array_1d_text/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.cbsr_nir_vis_2.Database( + annotation_directory=os.path.join(root_folder, 'annotations') + ) + + template_ids = sorted(db.model_ids(protocol=parameters['protocol'], + groups=parameters['group'])) + + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + + if os.path.exists(obj.make_path(root_folder, '.jpg')): + filename = obj.make_path(root_folder, '.jpg') + else: + filename = obj.make_path(root_folder, '.bmp') + + entries.append( Entry(templates, obj.client_id, obj.id, obj.id, + db.annotations(obj), filename) ) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.probe_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'text': obj.template_ids + } + + elif output == 'client_id': + return { + 'text': obj.client_id + } + + elif output == 'probe_id': + return { + 'text': obj.probe_id + } + + elif output == 'file_id': + return { + 'text': obj.file_id + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + # Install a mock os.path.exists function + def mock_exists(path): + return True + + bob.io.base.load = mock_load + os.path.exists = mock_exists + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + # Note: This database can't be tested without the actual data, since + # some files are needed by this implementation + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='view2_1', + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='view2_1', + group='dev', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='view2_1', + group='dev', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) diff --git a/advanced/databases/cbsr_nir_vis_2/4.rst b/advanced/databases/cbsr_nir_vis_2/4.rst new file mode 100644 index 0000000..339ff94 --- /dev/null +++ b/advanced/databases/cbsr_nir_vis_2/4.rst @@ -0,0 +1,64 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +CASIA NIR-VIS 2.0 Face Database +------------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2017: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 30/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 (should not affect results) + +* **Version 1**, 06/Nov/2014: + + - Initial release + + +Description +=========== + +`CASIA NIR-VIS 2.0 +<http://www.cbsr.ia.ac.cn/english/NIR-VIS-2.0-Database.html>`_ database offers +pairs of mugshot images (VIS) and their correspondent Near Infrared (NIR) +photos. Capured by CASIA (Chinese Academy of Sciences), the images of this +database were collected in four recording sessions: 2007 spring, 2009 summer, +2009 fall and 2010 summer, in which the first session is identical to the `HFB +database <http://www.cbsr.ia.ac.cn/english/HFB%20Databases.asp>`_. The CASIA +NIR-VIS 2.0 database consists of 725 subjects in total. There are 1-22 VIS and +5-50 NIR face images per subject. + +A preview image of the Database of Faces is shown below: + +.. image:: http://www.cbsr.ia.ac.cn/english/NIR-VIS-2.0/NIR-VIS-2.0-Sample.jpg + :width: 400 px + + +The VIS images are in JPG format and the NIR images are in BMP format. The size of each image is 640x480 pixels (RGB). diff --git a/advanced/databases/cpqd/4.json b/advanced/databases/cpqd/4.json new file mode 100644 index 0000000..b1ae9dc --- /dev/null +++ b/advanced/databases/cpqd/4.json @@ -0,0 +1,694 @@ +{ + "description": "The CPqD database", + "root_folder": "/this/database/is/not/installed", + "protocols": [ + { + "name": "laptop_male", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "laptop_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "dev", + "protocol": "laptop_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "dev", + "protocol": "laptop_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "eval", + "protocol": "laptop_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "eval", + "protocol": "laptop_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "laptop_female", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "laptop_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "dev", + "protocol": "laptop_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "dev", + "protocol": "laptop_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "eval", + "protocol": "laptop_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "eval", + "protocol": "laptop_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "smartphone_male", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "smartphone_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "dev", + "protocol": "smartphone_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "dev", + "protocol": "smartphone_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "eval", + "protocol": "smartphone_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "eval", + "protocol": "smartphone_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "smartphone_female", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "smartphone_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "dev", + "protocol": "smartphone_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "dev", + "protocol": "smartphone_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "eval", + "protocol": "smartphone_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "eval", + "protocol": "smartphone_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "l2s_male", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "l2s_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "dev", + "protocol": "l2s_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "dev", + "protocol": "l2s_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "eval", + "protocol": "l2s_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "eval", + "protocol": "l2s_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "l2s_female", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "l2s_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "dev", + "protocol": "l2s_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "dev", + "protocol": "l2s_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "eval", + "protocol": "l2s_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "eval", + "protocol": "l2s_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "s2l_male", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "s2l_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "dev", + "protocol": "s2l_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "dev", + "protocol": "s2l_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "eval", + "protocol": "s2l_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "eval", + "protocol": "s2l_male" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "s2l_female", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "s2l_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "dev", + "protocol": "s2l_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "dev", + "protocol": "s2l_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "eval", + "protocol": "s2l_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "eval", + "protocol": "s2l_female" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/cpqd/4.py b/advanced/databases/cpqd/4.py new file mode 100644 index 0000000..0a09e7c --- /dev/null +++ b/advanced/databases/cpqd/4.py @@ -0,0 +1,424 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import os +import re +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.db.cpqd + + +#---------------------------------------------------------- + + +class Train(View): + + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image']) + + image_folder = os.path.join(root_folder, "images") + annotation_folder = os.path.join(root_folder, "eye_positions") + + # Open the database and load the objects to provide via the outputs + db = bob.db.cpqd.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + + for obj in objs: + obj_id = obj.id.split('/')[-1] + digits = re.findall(r'\d+', obj_id) + + entries.append(Entry(np.uint64(obj.client_id[1:]), np.uint64(''.join(digits)), + db.annotations(obj.make_path(annotation_folder, '.pos')), + obj.make_path(image_folder, '.jpg'))) + + return entries + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image']) + + image_folder = os.path.join(root_folder, "images") + annotation_folder = os.path.join(root_folder, "eye_positions") + + # Open the database and load the objects to provide via the outputs + db = bob.db.cpqd.Database() + + template_ids = db.model_ids(protocol=parameters['protocol'], + groups=parameters['group']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='enroll', + model_ids=[template_id]) + + for obj in objs: + obj_id = obj.id.split('/')[-1] + digits = re.findall(r'\d+', obj_id) + + entries.append(Entry(np.uint64(obj.client_id[1:]), np.uint64(template_id[1:]), + np.uint64(''.join(digits)), + db.annotations(obj.make_path(annotation_folder, '.pos')), + obj.make_path(image_folder, '.jpg'))) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'eye_centers', 'image']) + + image_folder = os.path.join(root_folder, "images") + annotation_folder = os.path.join(root_folder, "eye_positions") + + # Open the database and load the objects to provide via the outputs + db = bob.db.cpqd.Database() + + template_ids = sorted(db.model_ids(protocol=parameters['protocol'], + groups=parameters['group'])) + + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + + templates = [ np.uint64(x[1:]) for x in templates ] + + obj_id = obj.id.split('/')[-1] + digits = re.findall(r'\d+', obj_id) + + entries.append(Entry(templates, + np.uint64(obj.client_id[1:]), + np.uint64(''.join(digits)), + np.uint64(''.join(digits)), + db.annotations(obj.make_path(annotation_folder, '.pos')), + obj.make_path(image_folder, '.jpg'))) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.probe_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': obj.template_ids + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + def mock_annotations(obj, path): + return dict( + leye=(5, 4), + reye=(7, 4), + ) + + bob.io.base.load = mock_load + bob.db.cpqd.Database.annotations = mock_annotations + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='laptop_male' + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='laptop_male', + group='dev', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='s2l_female', + group='dev', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) diff --git a/advanced/databases/cpqd/4.rst b/advanced/databases/cpqd/4.rst new file mode 100644 index 0000000..7b50761 --- /dev/null +++ b/advanced/databases/cpqd/4.rst @@ -0,0 +1,144 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +CPqD Biometric Database (BioCPqD Phase 1) +----------------------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 30/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 01/Apr/2014: + + - Initial release + + +Description +=========== + +This database was designed to provide data that was recorded in a natural way, +using various devices in different environments. Hence, algorithms that +perform well on this database are expected to be suitable for other real-world +applications that do not require a predefined audio/video recording setup. + +Database participants were selected among employees of CPqD Foundation who +volunteered to make recordings. A unique ID was assigned for each participant, +composed by a prefix (M for male and F for female) followed by a 4-digit number +(odd for males and even for females). Each participant recorded up to five +sessions, with a time lapse of at least 10 days between sessions. + +Sessions consisted of 27 recorded sentences, whose content was specified in a +script. Each sentence was recorded on three different devices types: + +- Laptops (audio and video content); +- Smartphones (audio and video content); +- Phone calls (only audio). + +For each device type, a set of devices was used, as specified below: + +- Laptops: + - Compaq 510 with embedded mic and camera; + - Toshiba with USB Logitech QuickCam Pro 9000 webcam; + - DELL Latitude embedded mic and camera. + +- Smartphones: + - Samsung Galaxy S II; + - Apple iPhone 4; + - Apple iPhone 4. + +- Phone calls: + - landline phone call; + - personal mobile phone call. + +Recordings were made in three environments with different characteristics: +garden, restaurant (public indoor) and office. The idea behind this strategy +was to exploit the influence of environmental noise in audio recordings and the +effect of illumination and background conditions in the video recordings. +Since the database includes recordings captured on different devices of +different types and in different environments, it allows a large number of +experimental setups. + + +Content +======= + +The data collection followed a simple recording protocol that was replicated +for all sessions. For each session there was a corresponding script describing +the whole content to be recorded, as follows: + +Text reading: + +- a pre-defined text (extracted from the database's consent form); +- four phonetically rich sentences (randomly selected among 562 options); +- passphrase: three repetitions a single sentence (the same sentence for all + participants in all sessions). + +Spontaneous speech: + +- answers for generic questions (all participants answered all 15 questions + selected form a fixed set, distributed along the 5 sessions in random order); +- a fake name; +- a fake address; +- a fake birthday date; +- a fake ID number; +- a fake phone number; +- two command words (all participants spoke 10 words along the 5 sessions in + random order). + +Numbers, digits, time values and alphanumeric strings: + +- a monetary amount between 10 and 10 000, randomly generated; +- a number between 10 and 1000, randomly generated; +- a number between 1000 and 10 million, randomly generated; +- three repetitions of a random digit sequence (first one read in a slow pace + and others naturally read); +- a fake credit card number; +- an alphanumeric string composed of 6 characters, randomly generated; +- a time value, selected among a predefined set with 181 samples, equally + distributed among participants. + +It is important to note that all content was recorded in Brazilian Portuguese +language. + +BioCPqD Phase I database provides unbiased biometric verification protocols, +one for male and one for female participants, based on the MOBIO database +protocols. These protocols partition the database in three different groups: + +- a Training set: used to train the parameters of algorithm to be tested, e.g., + to create the projection matrix, Universal Background Models, etc.; +- a Development set: used to evaluate hyper-parameters of the tested algorithms; +- a Test set: used to evaluate the generalization performance of the tested + algorithms with previously unseen data. + +Both development and test sets are further split into an enrollment subset +(used to enroll participants' models), and a probe set (whose files will be +tested against all participants' models). diff --git a/advanced/databases/frgc/4.json b/advanced/databases/frgc/4.json new file mode 100644 index 0000000..0c061b4 --- /dev/null +++ b/advanced/databases/frgc/4.json @@ -0,0 +1,330 @@ +{ + "description": "The Face Recognition Grand Challenge", + "root_folder": "/idiap/resource/database/frgc/FRGC-2.0-dist", + "protocols": [ + { + "name": "2.0.1_maskI", + "template": "simple_face_recognition_frgc", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "mask": "maskI", + "protocol": "2.0.1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "mask": "maskI", + "protocol": "2.0.1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "mask": "maskI", + "protocol": "2.0.1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "2.0.4_maskI", + "template": "simple_face_recognition_frgc", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "mask": "maskI", + "protocol": "2.0.4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "mask": "maskI", + "protocol": "2.0.4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "mask": "maskI", + "protocol": "2.0.4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "2.0.1_maskII", + "template": "simple_face_recognition_frgc", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "mask": "maskII", + "protocol": "2.0.1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "mask": "maskII", + "protocol": "2.0.1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "mask": "maskII", + "protocol": "2.0.1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "2.0.4_maskII", + "template": "simple_face_recognition_frgc", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "mask": "maskII", + "protocol": "2.0.4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "mask": "maskII", + "protocol": "2.0.4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "mask": "maskII", + "protocol": "2.0.4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "2.0.1_maskIII", + "template": "simple_face_recognition_frgc", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "mask": "maskIII", + "protocol": "2.0.1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "mask": "maskIII", + "protocol": "2.0.1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "mask": "maskIII", + "protocol": "2.0.1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "2.0.4_maskIII", + "template": "simple_face_recognition_frgc", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "mask": "maskIII", + "protocol": "2.0.4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "mask": "maskIII", + "protocol": "2.0.4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "mask": "maskIII", + "protocol": "2.0.4" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/frgc/4.py b/advanced/databases/frgc/4.py new file mode 100644 index 0000000..70e5301 --- /dev/null +++ b/advanced/databases/frgc/4.py @@ -0,0 +1,412 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import os +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.db.frgc + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.frgc.Database(original_directory=root_folder) + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world', + mask_type=parameters['mask']), + key=lambda x: (x.client_id, x.id)) + + entries = [] + + for obj in objs: + filename = obj.make_path(root_folder, '.jpg') + if not os.path.exists(filename): + filename = obj.make_path(root_folder, '.JPG') + + entries.append(Entry(obj.client_id, obj.id, db.annotations(obj), filename)) + + return entries + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': obj.client_id + } + + elif output == 'file_id': + return { + 'text': obj.file_id + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/text/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.frgc.Database(original_directory=root_folder) + + model_files = bob.db.frgc.models.get_list(root_folder, + 'dev', + parameters['protocol'], + 'enroll') + + mask = bob.db.frgc.models.get_mask(root_folder, + parameters['protocol'], + parameters['mask']) + + entries = [] + for model_index, model in enumerate(model_files): + if (mask[:, model_index] > 0).any(): + for presentation in model.m_files: + obj = bob.db.frgc.models.File(model.m_signature, + presentation, + model.m_files[presentation]) + + filename = obj.make_path(root_folder, '.jpg') + if not os.path.exists(filename): + filename = obj.make_path(root_folder, '.JPG') + + entries.append(Entry(obj.client_id, model.m_model, obj.id, + db.annotations(obj), filename)) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': obj.client_id + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'text': obj.file_id + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/text/1" + - probe_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.frgc.Database(original_directory=root_folder) + + probe_files = bob.db.frgc.models.get_list(root_folder, + 'dev', + parameters['protocol'], + 'probe') + + model_files = bob.db.frgc.models.get_list(root_folder, + 'dev', + parameters['protocol'], + 'enroll') + + mask = bob.db.frgc.models.get_mask(root_folder, + parameters['protocol'], + parameters['mask']) + + template_ids = np.array([ x.m_model for x in model_files ]) + + entries = [] + for probe_index, probe in enumerate(probe_files): + template_indices = mask[probe_index, :].nonzero()[0] + templates = sorted(template_ids.take(template_indices)) + + for presentation in probe.m_files: + obj = bob.db.frgc.models.File(probe.m_signature, + presentation, + probe.m_files[presentation]) + + filename = obj.make_path(root_folder, '.jpg') + if not os.path.exists(filename): + filename = obj.make_path(root_folder, '.JPG') + + entries.append(Entry(templates, obj.client_id, obj.id, obj.id, + db.annotations(obj), filename)) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.probe_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': np.uint64(obj.template_ids) + } + + elif output == 'client_id': + return { + 'text': obj.client_id + } + + elif output == 'probe_id': + return { + 'text': obj.file_id + } + + elif output == 'file_id': + return { + 'text': obj.file_id + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + # Install a mock os.path.exists function + def mock_exists(path): + return True + + bob.io.base.load = mock_load + os.path.exists = mock_exists + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + # Note: This database can't be tested without the actual data, since + # some files are needed by this implementation + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='2.0.1', + mask='maskI', + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='2.0.1', + mask='maskI', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='2.0.1', + mask='maskI', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) diff --git a/advanced/databases/frgc/4.rst b/advanced/databases/frgc/4.rst new file mode 100644 index 0000000..ae5a754 --- /dev/null +++ b/advanced/databases/frgc/4.rst @@ -0,0 +1,67 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The Face Recognition Grand Challenge +------------------------------------ + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 31/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 08/Apr/2015: + + - Initial release + + +Description +=========== + +The `FRGC <http://www.nist.gov/itl/iad/ig/frgc.cfm>`_ data distribution +consists of three parts. The first is the FRGC data set. The second part is +the FRGC BEE. The BEE distribution includes all the data sets for performing +and scoring the six experiments. The third part is a set of baseline +algorithms for experiments 1 through 4. With all three components, it is +possible to run experiments 1 through 4, from processing the raw images +to producing Receiver Operating Characteristics (ROCs). + +The data for FRGC consists of 50,000 recordings divided into training and +validation partitions. The training partition is designed for training +algorithms and the validation partition is for assessing performance of an +approach in a laboratory setting. The validation partition consists of data +from 4,003 subject sessions. A subject session is the set of all images of a +person taken each time a person's biometric data is collected and consists of +four controlled still images, two uncontrolled still images, and one +three-dimensional image. The controlled images were taken in a studio setting, +are full frontal facial images taken under two lighting conditions and with +two facial expressions (smiling and neutral). The uncontrolled images were +taken in varying illumination conditions; e.g., hallways, atriums, or outside. +Each set of uncontrolled images contains two expressions, smiling and neutral. diff --git a/advanced/databases/gbu/4.json b/advanced/databases/gbu/4.json new file mode 100644 index 0000000..2ee7041 --- /dev/null +++ b/advanced/databases/gbu/4.json @@ -0,0 +1,159 @@ +{ + "description": "The Good, the Bad and the Ugly Face Challenge", + "root_folder": "/idiap/resource/database/MBGC-V1", + "protocols": [ + { + "name": "good", + "template": "simple_face_recognition_gbu", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "Good" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Good" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Good" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "bad", + "template": "simple_face_recognition_gbu", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "Bad" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Bad" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Bad" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "ugly", + "template": "simple_face_recognition_gbu", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "Ugly" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "Ugly" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "Ugly" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/gbu/4.py b/advanced/databases/gbu/4.py new file mode 100644 index 0000000..ca6dbd5 --- /dev/null +++ b/advanced/databases/gbu/4.py @@ -0,0 +1,381 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.db.gbu + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.gbu.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world', + subworld='x8'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, db.annotations(x), x.make_path(root_folder, '.jpg')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.gbu.Database() + + template_ids = db.model_ids(protocol=parameters['protocol'], + groups='dev') + + entries = [] + + for template_id in template_ids: + objs = db.objects(protocol=parameters['protocol'], + groups='dev', + purposes='enroll', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, db.annotations(x), + x.make_path(root_folder, '.jpg')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes: + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.gbu.Database() + + template_ids = sorted(db.model_ids(protocol=parameters['protocol'], + groups='dev')) + + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='dev', + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='dev', + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + + entries.append( Entry(templates, obj.client_id, obj.id, obj.id, + db.annotations(obj), obj.make_path(root_folder, '.jpg')) ) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.probe_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': np.uint64(obj.template_ids) + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='Good' + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='Good' + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='Good' + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) diff --git a/advanced/databases/gbu/4.rst b/advanced/databases/gbu/4.rst new file mode 100644 index 0000000..edeb8d0 --- /dev/null +++ b/advanced/databases/gbu/4.rst @@ -0,0 +1,61 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The Good, Bad and Ugly Database +------------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 31/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 08/Apr/2015: + + - Initial release + + +Description +=========== + +`The Good, the Bad, and the Ugly challenge +<http://www.nist.gov/itl/iad/ig/focs.cfm>`_ consists of three frontal still +face partitions. The paritions were designed to encourage the development of +face recognition algorithms that excel at matching `hard` face pairs, but not +at the expense of performance on `easy` face pairs. + +The images in this challenge problem are frontal face stills taken under +uncontrolled illumination, both indoors and outdoors. The three partitions +were constructed by analyzing results from the FRVT 2006. The Good set +consisted of face pairs that had above average performance, the Bad set +consisted of face pairs that had average performance, and the Ugly set +consisted of face pairs that had below average performance. There are 437 +subjects in the data set. All three partitions have the same 437 subjects. +All three paritions have 1085 images in both the target and query sets. diff --git a/advanced/databases/kboc16/4.json b/advanced/databases/kboc16/4.json new file mode 100644 index 0000000..3c4fb3c --- /dev/null +++ b/advanced/databases/kboc16/4.json @@ -0,0 +1,76 @@ +{ + "description": "The KBOC16 database", + "root_folder": "/idiap/group/biometric/databases/kboc16", + "protocols": [ + { + "name": "A", + "template": "simple_keystroke_recognition_kboc16", + "sets": [ + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "A" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "keystroke": "{{ system_user.username }}/kboc16_keystroke/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "A" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "keystroke": "{{ system_user.username }}/kboc16_keystroke/1" + } + } + ] + }, + { + "name": "D", + "template": "simple_keystroke_recognition_kboc16", + "sets": [ + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "D" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "keystroke": "{{ user.username }}/kboc16_keystroke/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "D" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "keystroke": "{{ user.username }}/kboc16_keystroke/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/kboc16/4.py b/advanced/databases/kboc16/4.py new file mode 100644 index 0000000..f2c544b --- /dev/null +++ b/advanced/databases/kboc16/4.py @@ -0,0 +1,259 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import string +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.db.kboc16 + + +#---------------------------------------------------------- + + +def keystroke_reader(filename): + times = [] + keys = [] + + for line in open(filename, 'r').readlines(): + parts = string.split(line) + times.append(np.int32(parts[1])) + keys.append(parts[0]) + + return dict( + timestamps = times, + key_events = keys, + ) + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - keystroke: "{{ system_user.username }}/kboc16_keystroke/1 + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "keystroke". + Several "keystroke" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | keystroke | | keystroke | | keystroke | | keystroke | | keystroke | | keystroke | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'keystroke']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.kboc16.Database() + + template_ids = db.model_ids(groups='eval', + protocol=parameters['protocol']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(groups='eval', + protocol=parameters['protocol'], + purposes='enrol', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.txt')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'template_id': + return { + 'text': str(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'keystroke': + return keystroke_reader(obj.keystroke) + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - keystroke: "{{ system_user.username }}/kboc16_keystroke/1 + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/text/1" + - probe_id: "{{ system_user.username }}/uint64/1", + - template_ids: "{{ system_user.username }}/array_1d_text/1", + + One "file_id" is associated with a given "keystroke". + One "probe_id" is associated with a given "keystroke". + Several "keystroke" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | keystroke | | keystroke | | keystroke | | keystroke | | keystroke | | keystroke | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'keystroke']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.kboc16.Database() + + template_ids = sorted(db.model_ids(protocol=parameters['protocol'], + groups='eval'), + key=lambda x: int(x)) + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='eval', + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='eval', + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + entries.append( Entry(templates, obj.client_id, obj.id, obj.id, + obj.make_path(root_folder, '.txt')) ) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.probe_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'text': [ str(x) for x in obj.template_ids ] + } + + elif output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'keystroke': + return keystroke_reader(obj.keystroke) + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the keystrokes + def mock_keystroke_reader(filename): + return {} + + global keystroke_reader + keystroke_reader = mock_keystroke_reader + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Templates() + view.objs = view.index(root_folder='', parameters=dict(protocol = 'A')) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('keystroke', 0) + + view = Probes() + view.objs = view.index(root_folder='', parameters=dict(protocol = 'A')) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('keystroke', 0) diff --git a/advanced/databases/kboc16/4.rst b/advanced/databases/kboc16/4.rst new file mode 100644 index 0000000..11e44ea --- /dev/null +++ b/advanced/databases/kboc16/4.rst @@ -0,0 +1,84 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The KBOC16 Database +------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 30/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 4/Apr/2016: + + - Added protocol ``A`` + +* **Version 1**: + + - Initial release + + +Description +=========== + +The KBOC16 database is part of the `KBOC competition +<https://sites.google.com/site/btas16kboc/database>`_, an official competition +of The IEEE Eighth International Conference on Biometrics: Theory, +Applications, and Systems (BTAS 2016) organized by `ATVS Biometric Research +Group <http://atvs.ii.uam.es>`_. + + +The database consists of keystroke sequences from 300 subjects acquired in four +different sessions (5 samples per session including genuine and impostors) +distributed in a four month time span. Thus, three different levels of +temporal variability are taken into account: (1) within the same session (the +samples are not acquired consecutively), (2) within weeks (between two +consecutive sessions), and (3) within months (between non-consecutive +sessions). + +Each session comprises 5 case-insensitive repetitions of the subject’s name and +surname typed in a natural and continuous manner (case insensitive). Mistakes +(and backspace) are not permitted. The time (in milliseconds) elapsed between +consecutive key events (press and release) is provided as keystroke dynamics +sequence. + +There are two types of sequences: 1) Genuine sequences: typed by the owner of +the password and 2) Impostor sequences: typed by other users (different to the +owner) who try to spoof the system. The number of genuine and impostor samples +per user varies (this information remains sequestered). + +For further information on the reproducible keystroke experiments and +competition results we refer the reader to (the following articles are publicly +available in the publications section of the `ATVS group webpage +<http://atvs.ii.uam.es/listpublications.do>`_) + +.. [BTAS15] A. Morales, M. Falanga, J. Fierrez, C. Sansone and J. Ortega-Garcia, ''Keystroke Dynamics Recognition based on Personal Data: A Comparative Experimental Evaluation Implementing Reproducible Research'', in Proc. of the IEEE Seventh International Conference on Biometrics: Theory, Applications and Systems, Arlington, Virginia, USA, September 2015. + +.. [BTAS16] To appear. + +Please remember to reference [BTAS15]_ and [BTAS16]_ on any work made public, +whatever the form, based directly or indirectly on any part of the KBOC16 DB. diff --git a/advanced/databases/lfw/4.json b/advanced/databases/lfw/4.json new file mode 100644 index 0000000..f9bf549 --- /dev/null +++ b/advanced/databases/lfw/4.json @@ -0,0 +1,54 @@ +{ + "description": "The Labeled Faces in the Wild Database", + "root_folder": "/idiap/resource/database/lfw/all_images", + "protocols": [ + { + "name": "view1", + "template": "simple_face_recognition_textid", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "view1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_3d_uint8/1" + } + }, + { + "name": "templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "view1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1" + } + }, + { + "name": "probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "view1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/lfw/4.py b/advanced/databases/lfw/4.py new file mode 100644 index 0000000..d7c01dd --- /dev/null +++ b/advanced/databases/lfw/4.py @@ -0,0 +1,285 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.db.lfw + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.lfw.Database() + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world', + world_type='unrestricted'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.jpg')) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.lfw.Database() + + template_ids = db.model_ids(protocol=parameters['protocol'], + groups='dev') + + entries = [] + + for template_id in template_ids: + objs = db.objects(protocol=parameters['protocol'], + groups='dev', + purposes='enroll', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.jpg')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/text/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.lfw.Database() + + template_ids = np.array(sorted(db.model_ids(protocol=parameters['protocol'], + groups='dev'), + key=lambda x: int(x)), + dtype='uint64') + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='dev', + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(template_ids, x.client_id, x.id, x.id, x.make_path(root_folder, '.jpg')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': obj.template_ids + } + + elif output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Train() + view.objs = view.index(root_folder='', parameters=dict(protocol='view1')) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('image', 0) + + view = Templates() + view.objs = view.index(root_folder='', parameters=dict(protocol='view1')) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('image', 0) + + view = Probes() + view.objs = view.index(root_folder='', parameters=dict(protocol='view1')) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('image', 0) diff --git a/advanced/databases/lfw/4.rst b/advanced/databases/lfw/4.rst new file mode 100644 index 0000000..c6bc3e0 --- /dev/null +++ b/advanced/databases/lfw/4.rst @@ -0,0 +1,72 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The Labeled Faces in the Wild Database +-------------------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 31/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 08/Apr/2015: + + - Initial release + + +Description +=========== + +`Labeled Faces in the Wild <http://vis-www.cs.umass.edu/lfw/>`_ is a database +of face photographs designed for studying the problem of unconstrained face +recognition. The data set contains more than 13,000 images of faces collected +from the web. Each face has been labeled with the name of the person pictured. +1680 of the people pictured have two or more distinct photos in the data set. +The only constraint on these faces is that they were detected by the +Viola-Jones face detector. More details can be found in this +`technical report <http://vis-www.cs.umass.edu/lfw/lfw_update.pdf>`_. + +There are four different sets of LFW images including the original and three +different types of "aligned" images. The platform currently relies on the +original images and provides the `view1` protocol. + +Preview of original images of this database are shown below: + +.. image:: http://vis-www.cs.umass.edu/lfw/images/AJ_Cook/AJ_Cook_0001.jpg + :width: 150px +.. image:: http://vis-www.cs.umass.edu/lfw/images/AJ_Lamas/AJ_Lamas_0001.jpg + :width: 150px +.. image:: http://vis-www.cs.umass.edu/lfw/images/Aaron_Eckhart/Aaron_Eckhart_0001.jpg + :width: 150px +.. image:: http://vis-www.cs.umass.edu/lfw/images/Aaron_Guiel/Aaron_Guiel_0001.jpg + :width: 150px +.. image:: http://vis-www.cs.umass.edu/lfw/images/Aaron_Patterson/Aaron_Patterson_0001.jpg + :width: 150px diff --git a/advanced/databases/livdet2013/4.json b/advanced/databases/livdet2013/4.json new file mode 100644 index 0000000..a3a1fa8 --- /dev/null +++ b/advanced/databases/livdet2013/4.json @@ -0,0 +1,164 @@ +{ + "description": "The LivDet 2013 Fingerprint Liveness Database", + "root_folder": "/idiap/resource/database/LivDet/LivDet2013", + "protocols": [ + { + "name": "Biometrika", + "template": "simple_fingerprint_antispoofing", + "sets": [ + { + "name": "train", + "template": "train", + "view": "All", + "parameters": { + "protocol": "Biometrika", + "group": "train" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + }, + { + "name": "test", + "template": "test", + "view": "All", + "parameters": { + "protocol": "Biometrika", + "group": "test" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + } + ] + }, + { + "name": "Italdata", + "template": "simple_fingerprint_antispoofing", + "sets": [ + { + "name": "train", + "template": "train", + "view": "All", + "parameters": { + "protocol": "Italdata", + "group": "train" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + }, + { + "name": "test", + "template": "test", + "view": "All", + "parameters": { + "protocol": "Italdata", + "group": "test" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + } + ] + }, + { + "name": "CrossMatch", + "template": "simple_fingerprint_antispoofing", + "sets": [ + { + "name": "train", + "template": "train", + "view": "All", + "parameters": { + "protocol": "CrossMatch", + "group": "train" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + }, + { + "name": "test", + "template": "test", + "view": "All", + "parameters": { + "protocol": "CrossMatch", + "group": "test" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + } + ] + }, + { + "name": "Swipe", + "template": "simple_fingerprint_antispoofing", + "sets": [ + { + "name": "train", + "template": "train", + "view": "All", + "parameters": { + "protocol": "Swipe", + "group": "train" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + }, + { + "name": "test", + "template": "test", + "view": "All", + "parameters": { + "protocol": "Swipe", + "group": "test" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + } + ] + }, + { + "name": "Full", + "template": "simple_fingerprint_antispoofing", + "sets": [ + { + "name": "train", + "template": "train", + "view": "All", + "parameters": { + "group": "train" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + }, + { + "name": "test", + "template": "test", + "view": "All", + "parameters": { + "group": "test" + }, + "outputs": { + "image": "{{ system_user.username }}/array_3d_uint8/1", + "spoof": "{{ system_user.username }}/boolean/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/livdet2013/4.py b/advanced/databases/livdet2013/4.py new file mode 100644 index 0000000..a0bc58c --- /dev/null +++ b/advanced/databases/livdet2013/4.py @@ -0,0 +1,109 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import os +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +from bob.db.livdet2013 import Database + + +#---------------------------------------------------------- + + +class All(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - spoof: "{{ system_user.username }}/boolean/1" + + Several "image" are associated with a given "spoof". + + --------------- --------------- --------------- --------------- + | image | | image | | image | | image | + --------------- --------------- --------------- --------------- + ------------------------------- ------------------------------ + | spoof | | spoof | + ------------------------------- ------------------------------ + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['spoof', 'image']) + + # Open the database and load the objects to provide via the outputs + db = Database() + objs = sorted(db.objects(protocols=parameters.get('protocol'), + groups=parameters['group'], + classes=parameters.get('class')), + key=lambda x: x.is_live()) + + return [ Entry(x.is_live(), x.make_path(root_folder)) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'spoof': + return { + 'value': obj.spoof + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = All() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='Biometrika', + group='train' + ) + ) + view.get('spoof', 0) + view.get('image', 0) diff --git a/advanced/databases/livdet2013/4.rst b/advanced/databases/livdet2013/4.rst new file mode 100644 index 0000000..9f28aec --- /dev/null +++ b/advanced/databases/livdet2013/4.rst @@ -0,0 +1,75 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +=========================================== + LivDet 2013 Fingerprint Liveness Database +=========================================== + + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 31/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 26/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 17/Sep/2015: + + - Initial release + + +Description +=========== + + +The LivDet 2013 `Fingerprint Liveness Database <http://livdet.org>`_ +[Ghiani2013]_ is a fingerprint liveness database which consists of four +sub-sets, which contain live and fake fingerprint images from four capture +devices. Images have been collected by a consensual approach and using +different materials for the artificial reproduction of the fingerprint +(gelatine, silicone, play-doh, ecoflex, body double, wood glue). + + +Data Set +======== + +========== =============== ========= ========== ============ ============ + Scanner Model Res (dpi) Image size Live samples Fake samples +========== =============== ========= ========== ============ ============ +Biometrika FX2000 569 312x372 2000 2000 +Italdata ET10 500 640x480 2000 2000 +Crossmatch L SCAN GUARDIAN 500 640x480 2500 2000 +Swipe 96 2374 1979 +========== =============== ========= ========== ============ ============ + + +References +========== + +.. [Ghiani2013] L.Ghiani, D.Yambay, V.Mura, S.Tocco, G.L.Marcialis, F.Roli, and S.Schuckers, LivDet 2013 - Fingerprint Liveness Detection Competition 2013, 6th IAPR/IEEE Int. Conf. on Biometrics, June, 4-7, 2013, Madrid (Spain). diff --git a/advanced/databases/mnist/4.json b/advanced/databases/mnist/4.json new file mode 100644 index 0000000..46d01c2 --- /dev/null +++ b/advanced/databases/mnist/4.json @@ -0,0 +1,38 @@ +{ + "description": "The MNIST Database of Handwritten Digits", + "root_folder": "/idiap/group/biometric/databases/mnist", + "protocols": [ + { + "name": "idiap", + "template": "simple_digit_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "View", + "parameters": { + "group": "train" + }, + "outputs": { + "id": "{{ system_user.username }}/uint64/1", + "class_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_2d_uint8/1" + } + }, + { + "name": "test", + "template": "test", + "view": "View", + "parameters": { + "group": "test" + }, + "outputs": { + "id": "{{ system_user.username }}/uint64/1", + "class_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_2d_uint8/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/mnist/4.py b/advanced/databases/mnist/4.py new file mode 100644 index 0000000..ba73de2 --- /dev/null +++ b/advanced/databases/mnist/4.py @@ -0,0 +1,111 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View as BaseView + +import bob.db.mnist + + +#---------------------------------------------------------- + + +class View(BaseView): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - id: "{{ system_user.username }}/uint64/1" + - class_id: "{{ system_user.username }}/uint64/1" + + One "id" is associated with a given "image". + Several "image" are associated with a given "class_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | id | | id | | id | | id | | id | | id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | class_id | | class_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['class_id', 'id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.mnist.Database(data_dir=root_folder) + + features, labels = db.data(groups=parameters['group']) + + objs = sorted([ (labels[i], i, features[i]) for i in range(len(features)) ], + key=lambda x: (x[0], x[1])) + + return [ Entry(x[0], x[1], x[2]) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'class_id': + return { + 'value': np.uint64(obj.class_id) + } + + elif output == 'id': + return { + 'value': np.uint64(obj.id) + } + + elif output == 'image': + return { + 'value': obj.features.reshape((28, 28)) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + pass + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + # Note: This database can't be tested without the actual data, since + # the actual files are needed by this implementation + + view = View() + view.objs = view.index(root_folder='', parameters=dict(group='train')) + view.get('class_id', 0) + view.get('id', 0) + view.get('image', 0) diff --git a/advanced/databases/mnist/4.rst b/advanced/databases/mnist/4.rst new file mode 100644 index 0000000..12d31d1 --- /dev/null +++ b/advanced/databases/mnist/4.rst @@ -0,0 +1,93 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The MNIST Database of Handwritten Digits +---------------------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 31/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 21/Oct/2014: + + - Initial release + + +Description +=========== + +The `MNIST database of handwritten digits <http://yann.lecun.com/exdb/mnist/>`_, +available from this page, has a training set of 60,000 examples, and a test +set of 10,000 examples. It is a subset of a larger set available from NIST. +The digits have been size-normalized and centered in a fixed-size image. + +It is a good database for people who want to try learning techniques and +pattern recognition methods on real-world data while spending minimal efforts +on preprocessing and formatting. + +The original black and white (bilevel) images from NIST were size normalized +to fit in a 20x20 pixel box while preserving their aspect ratio. The resulting +images contain grey levels as a result of the anti-aliasing technique used by +the normalization algorithm. the images were centered in a 28x28 image by +computing the center of mass of the pixels, and translating the image so as to +position this point at the center of the 28x28 field. + +The MNIST database was constructed from NIST's Special Database 3 and Special +Database 1 which contain binary images of handwritten digits. NIST originally +designated SD-3 as their training set and SD-1 as their test set. However, +SD-3 is much cleaner and easier to recognize than SD-1. The reason for this +can be found on the fact that SD-3 was collected among Census Bureau +employees, while SD-1 was collected among high-school students. Drawing +sensible conclusions from learning experiments requires that the result be +independent of the choice of training set and test among the complete set of +samples. Therefore it was necessary to build a new database by mixing NIST's +datasets. + +The MNIST training set is composed of 30,000 patterns from SD-3 and 30,000 +patterns from SD-1. Our test set was composed of 5,000 patterns from SD-3 and +5,000 patterns from SD-1. The 60,000 pattern training set contained examples +from approximately 250 writers. We made sure that the sets of writers of the +training set and test set were disjoint. + +SD-1 contains 58,527 digit images written by 500 different writers. In +contrast to SD-3, where blocks of data from each writer appeared in sequence, +the data in SD-1 is scrambled. Writer identities for SD-1 is available and we +used this information to unscramble the writers. We then split SD-1 in two: +characters written by the first 250 writers went into our new training set. +The remaining 250 writers were placed in our test set. Thus we had two sets +with nearly 30,000 examples each. The new training set was completed with +enough examples from SD-3, starting at pattern # 0, to make a full set of +60,000 training patterns. Similarly, the new test set was completed with +SD-3 examples starting at pattern # 35,000 to make a full set with 60,000 +test patterns. Only a subset of 10,000 test images (5,000 from SD-1 and +5,000 from SD-3) is available on this site. The full 60,000 sample training +set is available. diff --git a/advanced/databases/mobio/4.json b/advanced/databases/mobio/4.json new file mode 100644 index 0000000..ce946bc --- /dev/null +++ b/advanced/databases/mobio/4.json @@ -0,0 +1,188 @@ +{ + "description": "The MOBIO Database of Faces", + "root_folder": "/idiap/resource/database/mobio/IMAGES_PNG", + "protocols": [ + { + "name": "male", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "male", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "male", + "group": "dev", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "male", + "group": "dev", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "male", + "group": "eval", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "male", + "group": "eval", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "female", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "female", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "female", + "group": "dev", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "female", + "group": "dev", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "female", + "group": "eval", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "female", + "group": "eval", + "annotations": "../IMAGE_ANNOTATIONS" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/mobio/4.py b/advanced/databases/mobio/4.py new file mode 100644 index 0000000..1dbcd61 --- /dev/null +++ b/advanced/databases/mobio/4.py @@ -0,0 +1,402 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import os +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.db.mobio + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + annotations = parameters['annotations'] + if not os.path.isabs(annotations): + annotations = os.path.join(root_folder, annotations) + + db = bob.db.mobio.Database(annotation_directory=annotations) + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world', + purposes='train'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, db.annotations(x), x.make_path(root_folder, '.png')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + annotations = parameters['annotations'] + if not os.path.isabs(annotations): + annotations = os.path.join(root_folder, annotations) + + db = bob.db.mobio.Database(annotation_directory=annotations) + + template_ids = db.model_ids(protocol=parameters['protocol'], + groups=parameters['group']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='enroll', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, db.annotations(x), + x.make_path(root_folder, '.png')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + annotations = parameters['annotations'] + if not os.path.isabs(annotations): + annotations = os.path.join(root_folder, annotations) + + db = bob.db.mobio.Database(annotation_directory=annotations) + + template_ids = sorted(db.model_ids(protocol=parameters['protocol'], + groups=parameters['group'])) + + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + + entries.append( Entry(templates, obj.client_id, obj.id, obj.id, + db.annotations(obj), obj.make_path(root_folder, '.png')) ) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.probe_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': np.uint64(obj.template_ids) + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + # Note: This database can't be tested without the actual data, since + # the actual files are needed by this implementation + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='male', + annotations='../IMAGE_ANNOTATIONS', + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='male', + group='dev', + annotations='../IMAGE_ANNOTATIONS', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='male', + group='dev', + annotations='../IMAGE_ANNOTATIONS', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) diff --git a/advanced/databases/mobio/4.rst b/advanced/databases/mobio/4.rst new file mode 100644 index 0000000..7e50160 --- /dev/null +++ b/advanced/databases/mobio/4.rst @@ -0,0 +1,74 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The MOBIO Database of Faces +--------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 31/Oct/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 21/Oct/2014: + + - Initial release + + +Description +=========== + +The `MOBIO database <https://www.idiap.ch/dataset/mobio>`_ consists of +bi-modal (audio and video) data taken from 152 people. The database has a +female-male ratio or nearly 1:2 (100 males and 52 females) and was collected +from August 2008 until July 2010 in six different sites from five different +countries. This led to a diverse bi-modal database with both native and +non-native English speakers. + +.. image:: https://www.idiap.ch/dataset/mobio/video1.jpg + :width: 300px +.. image:: https://www.idiap.ch/dataset/mobio/video2.jpg + :width: 300px + +In total 12 sessions were captured for each client: 6 sessions for Phase I and 6 +sessions for Phase II. The Phase I data consists of 21 questions with the +question types ranging from: Short Response Questions, Short Response Free +Speech, Set Speech, and Free Speech. The Phase II data consists of 11 questions +with the question types ranging from: Short Response Questions, Set Speech, and +Free Speech. A more detailed description of the questions asked of the clients +is provided below. + +The database was recorded using two mobile devices: a mobile phone and a laptop +computer. The mobile phone used to capture the database was a NOKIA N93i mobile +while the laptop computer was a standard 2008 MacBook. The laptop was only used +to capture part of the first session, this first session consists of data +captured on both the laptop and the mobile phone. + +This deployment of the MOBIO database only consists of the images. diff --git a/advanced/databases/putvein/4.json b/advanced/databases/putvein/4.json new file mode 100644 index 0000000..c81cc3a --- /dev/null +++ b/advanced/databases/putvein/4.json @@ -0,0 +1,1646 @@ +{ + "description": "The PUT Vein Database", + "root_folder": "/idiap/resource/database/PUT_Vein_Dataset", + "protocols": [ + { + "name": "palm-L_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "L_1", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "L_1", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "L_1", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "L_1", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "L_1", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "palm-L_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "L_4", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "L_4", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "L_4", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "L_4", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "L_4", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "palm-R_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "R_1", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "R_1", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "R_1", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "R_1", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "R_1", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "palm-R_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "R_4", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "R_4", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "R_4", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "R_4", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "R_4", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "palm-RL_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "RL_1", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "RL_1", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "RL_1", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "RL_1", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "RL_1", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "palm-RL_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "RL_4", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "RL_4", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "RL_4", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "RL_4", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "RL_4", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "palm-LR_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "LR_1", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "LR_1", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "LR_1", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "LR_1", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "LR_1", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "palm-LR_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "LR_4", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "LR_4", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "LR_4", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "LR_4", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "LR_4", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "palm-R_BEAT_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "palm-R_BEAT_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "palm", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "palm", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "palm", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "palm", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "palm", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-L_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "L_1", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "L_1", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "L_1", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "L_1", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "L_1", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-L_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "L_4", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "L_4", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "L_4", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "L_4", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "L_4", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-R_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "R_1", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "R_1", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "R_1", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "R_1", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "R_1", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-R_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "R_4", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "R_4", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "R_4", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "R_4", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "R_4", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-RL_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "RL_1", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "RL_1", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "RL_1", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "RL_1", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "RL_1", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-RL_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "RL_4", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "RL_4", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "RL_4", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "RL_4", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "RL_4", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-LR_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "LR_1", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "LR_1", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "LR_1", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "LR_1", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "LR_1", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-LR_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "LR_4", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "LR_4", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "LR_4", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "LR_4", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "LR_4", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-R_BEAT_1", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "R_BEAT_1", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + }, + { + "name": "wrist-R_BEAT_4", + "template": "advanced_vein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "template": "train", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "wrist", + "group": "train" + }, + "name": "train" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "dev_templates", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "wrist", + "group": "dev", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "dev_probes", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "wrist", + "group": "dev", + "purpose": "probe" + }, + "template": "probe" + }, + { + "outputs": { + "model_id": "{{ system_user.username }}/text/1", + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "TemplateView", + "name": "eval_templates", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "wrist", + "group": "eval", + "purpose": "enroll" + }, + "template": "enroll" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "client_id": "{{ system_user.username }}/uint64/1" + }, + "view": "View", + "name": "eval_probes", + "parameters": { + "protocol": "R_BEAT_4", + "kind": "wrist", + "group": "eval", + "purpose": "probe" + }, + "template": "probe" + } + ] + } + ] +} diff --git a/advanced/databases/putvein/4.py b/advanced/databases/putvein/4.py new file mode 100644 index 0000000..e7e8152 --- /dev/null +++ b/advanced/databases/putvein/4.py @@ -0,0 +1,222 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import os +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View as BaseView + +import bob.db.putvein +import bob.ip.color + + +#---------------------------------------------------------- + + +class View(BaseView): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - client_id: "{{ system_user.username }}/uint64/1" + + Several "image" are associated with a given "client_id" + + --------- --------- --------- --------- --------- --------- + | image | | image | | image | | image | | image | | image | + --------- --------- --------- --------- --------- --------- + ----------------------------- ----------------------------- + | client_id | | client_id | + ----------------------------- ----------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.putvein.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + purposes=parameters.get('purpose', None), + groups=[parameters['group']], + kinds=[parameters['kind']]), + key=lambda x: x.client_id) + + return [ Entry(x.client_id, x.make_path(root_folder)) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'image': + """ + The image returned by the ``bob.db.putvein`` is RGB (with shape + (3, 768, 1024)). This method converts image to a grayscale + (shape (768, 1024)) and then rotates image by 270 deg so that + images can be used with ``bob.bio.vein`` algorithms designed for + the ``bob.db.biowave_v1`` database. + Output images dimensions: (1024, 768). + """ + color_image = bob.io.base.load(obj.image) + grayscale_image = bob.ip.color.rgb_to_gray(color_image) + grayscale_image = np.rot90(grayscale_image, k=3) + + return { + 'value': grayscale_image + } + + +#---------------------------------------------------------- + + +class TemplateView(BaseView): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - model_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/uint64/1" + + Several "image" are associated with a given "model_id". + Several "model_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | model_id | | model_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "image" + per "model_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'model_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.putvein.Database() + + model_ids = db.model_ids(protocol=parameters['protocol'], + groups=[parameters['group']], + kinds=[parameters['kind']]) + + entries = [] + + for model_id in model_ids: + objs = db.objects(protocol=parameters['protocol'], + purposes=parameters.get('purpose', None), + groups=[parameters['group']], + kinds=[parameters['kind']], + model_ids=[model_id]) + + entries.extend([ Entry(x.client_id, model_id, x.make_path(root_folder)) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.model_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'model_id': + return { + 'text': str(obj.model_id) + } + + elif output == 'image': + """ + The image returned by the ``bob.db.putvein`` is RGB (with shape + (3, 768, 1024)). This method converts image to a grayscale + (shape (768, 1024)) and then rotates image by 270 deg so that + images can be used with ``bob.bio.vein`` algorithms designed for + the ``bob.db.biowave_v1`` database. + Output images dimensions: (1024, 768). + """ + color_image = bob.io.base.load(obj.image) + grayscale_image = bob.ip.color.rgb_to_gray(color_image) + grayscale_image = np.rot90(grayscale_image, k=3) + + return { + 'value': grayscale_image + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock methods + def mock_load(filename): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = View() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol = 'LR_4', + kind = 'wrist', + group = 'dev', + purpose = 'probe', + ) + ) + view.get('client_id', 0) + view.get('image', 0) + + + view = TemplateView() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol = 'LR_4', + kind = 'wrist', + group = 'dev', + purpose = 'enroll', + ) + ) + view.get('client_id', 0) + view.get('model_id', 0) + view.get('image', 0) diff --git a/advanced/databases/putvein/4.rst b/advanced/databases/putvein/4.rst new file mode 100644 index 0000000..54a642e --- /dev/null +++ b/advanced/databases/putvein/4.rst @@ -0,0 +1,138 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The PUT Vein Database +--------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 02/Nov/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 10/Feb/2017: + + - Major update to make database compatible with BIOWAVE database toolchains + +* **Version 1**, 10/Feb/2016: + + - Initial release + + +Description +=========== + +PUT Vein pattern database consists of 2400 images presenting human vein +patterns. Half of images contains a palmar vein pattern (1200 images) and +another half contains a wrist vein pattern (another 1200 images). Data was +acquired from both hands of 50 students, with means it has a 100 different +patterns for palm and wrist region. Pictures ware taken in 3 series, 4 pictures +each, with at least one week interval between each series. In case of palm +region volunteers were asked to put his/her hand on the device to cover +acquisition window, in way that line below their fingers coincident with its +edge. No additional positioning systems ware used. In case of wrist region only +construction allowing to place palm and wrist in comfortable way was used to +help position a hand. + +Original images have **768 x 1024** resolution and are saved as 24-bit bitmap. +Database consist of 2 main sections: hand and wrist, with are further divided +in persons. Each person folder has sub-sections for left and right hand, which +are separated into series. + +More information about the original database - `PUT Vein database +<http://biometrics.put.poznan.pl/vein-dataset/>`_. + +In order to enable to use the database with BIOWAVE toolchains, this database +implementation converts image to a grey-scale (shape (768, 1024)) and then +rotates images by 270 deg so that vein pattern in wrist images would appear +vertical (wrist's part closer to the palm would appear at the top of the image) + +In this implementation we use both - original 50 client x 2 hand data - in the +database clients with IDs between ``1`` and ``50`` - and also - mirrored +file representations (left hand / palm data mirrored to look like right hand +data and vice versa) - clients with IDs between ``51`` and ``100``. + + +Protocols +========= + +Currently (as on 10.02.2017) there are 10x2 protocols, they also match the +protocols in ``bob.bio.vein``: + +- ``palm-L_4``, +- ``palm-R_4``, +- ``palm-RL_4``, +- ``palm-LR_4``, +- ``palm-R_BEAT_4``, +- ``palm-L_1``, +- ``palm-R_1``, +- ``palm-RL_1``, +- ``palm-LR_1``, +- ``palm-R_BEAT_1``, +- ``wrist-L_4``, +- ``wrist-R_4``, +- ``wrist-RL_4``, +- ``wrist-LR_4``, +- ``wrist-R_BEAT_4``, +- ``wrist-L_1``, +- ``wrist-R_1``, +- ``wrist-RL_1``, +- ``wrist-LR_1``, +- ``wrist-R_BEAT_1``. + +Protocols (except the ``BEAT`` protocols) still contains the original protocol +(``L``, ``R``, ``RL``, ``LR``) data, the difference is, whether each enroll +model is constructed using all 4 hand's images (protocol name ends with ``4``), +or each enroll image is used as a model (corresponding protocol names ends with +``1``). + +The original protocols consists of following data, ``world`` purpose dataset +consists of the **same** data, as ``dev`` purpose dataset, so won't be +separately described: + ++-------------+-----------------------------------------+-------------------------------------------+ +|**protocol** | ``dev`` | ``eval`` | ++-------------+-----------------------------------------+-------------------------------------------+ +| L | IDs 1-25, un-mirrored left hand images | IDs 26-50, un-mirrored left hand images | ++-------------+-----------------------------------------+-------------------------------------------+ +| R | IDs 1-25, un-mirrored right hand images | IDs 26-50, un-mirrored right hand images | ++-------------+-----------------------------------------+-------------------------------------------+ +| RL | IDs 1-50, un-mirrored right hand images |IDs 51-100, mirrored left hand images | +| | |(to represent right hand) | ++-------------+-----------------------------------------+-------------------------------------------+ +| LR | IDs 1-50, un-mirrored left hand images |IDs 51-100, mirrored right hand images | +| | |(to represent left hand) | ++-------------+-----------------------------------------+-------------------------------------------+ + +The new test protocols (ends with ``R_BEAT_1`` and ``R_BEAT_4``) are intended +for use with ``bob.bio.vein`` and ``BEAT`` platform for quick tests, if +necessary. Both protocols consist of such data: + ++-------------+---------------------------------------------------------+------------------------------------------------------------+ +|**protocol** | ``dev`` | ``eval`` | ++-------------+---------------------------------------------------------+------------------------------------------------------------+ +| R_BEAT | IDs ``1``, ``2``, un-mirrored right hand / wrist images | IDs ``26``, ``27``, un-mirrored right hand / wrist images | ++-------------+---------------------------------------------------------+------------------------------------------------------------+ diff --git a/advanced/databases/replay/4.json b/advanced/databases/replay/4.json new file mode 100644 index 0000000..45edb1d --- /dev/null +++ b/advanced/databases/replay/4.json @@ -0,0 +1,1122 @@ +{ + "description": "The Replay Database", + "root_folder": "/idiap/group/replay/database/protocols/replayattack-database", + "protocols": [ + { + "name": "grandtest", + "template": "simple_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": false, + "protocol": "grandtest" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "devel", + "enroll": false, + "protocol": "grandtest" + }, + "name": "dev_probes", + "template": "probes", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "test", + "enroll": false, + "protocol": "grandtest" + }, + "name": "test_probes", + "template": "probes", + "view": "All" + } + ] + }, + { + "name": "print", + "template": "simple_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": false, + "protocol": "print" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "devel", + "enroll": false, + "protocol": "print" + }, + "name": "dev_probes", + "template": "probes", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "test", + "enroll": false, + "protocol": "print" + }, + "name": "test_probes", + "template": "probes", + "view": "All" + } + ] + }, + { + "name": "photo", + "template": "simple_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": false, + "protocol": "photo" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "devel", + "enroll": false, + "protocol": "photo" + }, + "name": "dev_probes", + "template": "probes", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "test", + "enroll": false, + "protocol": "photo" + }, + "name": "test_probes", + "template": "probes", + "view": "All" + } + ] + }, + { + "name": "video", + "template": "simple_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": false, + "protocol": "video" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "devel", + "enroll": false, + "protocol": "video" + }, + "name": "dev_probes", + "template": "probes", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "test", + "enroll": false, + "protocol": "video" + }, + "name": "test_probes", + "template": "probes", + "view": "All" + } + ] + }, + { + "name": "mobile", + "template": "simple_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": false, + "protocol": "mobile" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "devel", + "enroll": false, + "protocol": "mobile" + }, + "name": "dev_probes", + "template": "probes", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "test", + "enroll": false, + "protocol": "mobile" + }, + "name": "test_probes", + "template": "probes", + "view": "All" + } + ] + }, + { + "name": "highdef", + "template": "simple_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": false, + "protocol": "highdef" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "devel", + "enroll": false, + "protocol": "highdef" + }, + "name": "dev_probes", + "template": "probes", + "view": "All" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "test", + "enroll": false, + "protocol": "highdef" + }, + "name": "test_probes", + "template": "probes", + "view": "All" + } + ] + }, + { + "name": "verification_grandtest", + "template": "advanced_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": true, + "protocol": "grandtest" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "devel", + "protocol": "grandtest" + }, + "name": "dev_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "grandtest" + }, + "name": "dev_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "grandtest" + }, + "name": "dev_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "test", + "protocol": "grandtest" + }, + "name": "test_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "grandtest" + }, + "name": "test_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "grandtest" + }, + "name": "test_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + } + ] + }, + { + "name": "verification_print", + "template": "advanced_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": true, + "protocol": "print" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "devel", + "protocol": "print" + }, + "name": "dev_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "print" + }, + "name": "dev_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "print" + }, + "name": "dev_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "test", + "protocol": "print" + }, + "name": "test_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "print" + }, + "name": "test_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "print" + }, + "name": "test_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + } + ] + }, + { + "name": "verification_photo", + "template": "advanced_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": true, + "protocol": "photo" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "devel", + "protocol": "photo" + }, + "name": "dev_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "photo" + }, + "name": "dev_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "photo" + }, + "name": "dev_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "test", + "protocol": "photo" + }, + "name": "test_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "photo" + }, + "name": "test_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "photo" + }, + "name": "test_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + } + ] + }, + { + "name": "verification_video", + "template": "advanced_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": true, + "protocol": "video" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "devel", + "protocol": "video" + }, + "name": "dev_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "video" + }, + "name": "dev_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "video" + }, + "name": "dev_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "test", + "protocol": "video" + }, + "name": "test_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "video" + }, + "name": "test_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "video" + }, + "name": "test_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + } + ] + }, + { + "name": "verification_mobile", + "template": "advanced_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": true, + "protocol": "mobile" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "devel", + "protocol": "mobile" + }, + "name": "dev_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "mobile" + }, + "name": "dev_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "mobile" + }, + "name": "dev_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "test", + "protocol": "mobile" + }, + "name": "test_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "mobile" + }, + "name": "test_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "mobile" + }, + "name": "test_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + } + ] + }, + { + "name": "verification_highdef", + "template": "advanced_face_antispoofing", + "sets": [ + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "class": "{{ system_user.username }}/text/1" + }, + "parameters": { + "group": "train", + "enroll": true, + "protocol": "highdef" + }, + "name": "train", + "template": "train", + "view": "All" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "devel", + "protocol": "highdef" + }, + "name": "dev_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "highdef" + }, + "name": "dev_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "devel", + "protocol": "highdef" + }, + "name": "dev_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + }, + { + "outputs": { + "annotations": "{{ system_user.username }}/bounding_box_video/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1" + }, + "parameters": { + "group": "test", + "protocol": "highdef" + }, + "name": "test_templates", + "template": "templates", + "view": "Templates" + }, + { + "outputs": { + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "highdef" + }, + "name": "test_probes_real", + "template": "probes_real", + "view": "ProbesReal" + }, + { + "outputs": { + "attack_support": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "video": "{{ system_user.username }}/array_4d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "annotations": "{{ system_user.username }}/bounding_box_video/1" + }, + "parameters": { + "group": "test", + "protocol": "highdef" + }, + "name": "test_probes_attack", + "template": "probes_attack", + "view": "ProbesAttack" + } + ] + } + ] +} diff --git a/advanced/databases/replay/4.py b/advanced/databases/replay/4.py new file mode 100644 index 0000000..ae1733d --- /dev/null +++ b/advanced/databases/replay/4.py @@ -0,0 +1,571 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.video +import bob.db.replay + +from bob.db.replay.driver import Interface + +INFO = Interface() +SQLITE_FILE = INFO.files()[0] + + +#---------------------------------------------------------- + + +class All(View): + """Outputs: + - video: "{{ system_user.username }}/array_4d_uint8/1" + - annotations: "{{ system_user.username }}/bounding_box_video/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - attack_support: "{{ system_user.username }}/text/1" + - class: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "video". + One "annotations" is associated with a given "video". + Several "video" are associated with a given "client_id". + Several "client_id" are associated with a given "class". + Several "attack_support" are associated with a given "class". + + --------------- --------------- --------------- --------------- --------------- --------------- + | video | | video | | video | | video | | video | | video | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | annotations | | annotations | | annotations | | annotations | | annotations | | annotations | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ------------------------------- ------------------------------- ------------------------------- + | client_id | | client_id | | client_id | + ------------------------------- ------------------------------- ------------------------------- + --------------------------------------------------------------- ------------------------------- + | attack_support | | attack_support | + --------------------------------------------------------------- ------------------------------- + ----------------------------------------------------------------------------------------------- + | class | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['cls', 'attack_support', 'client_id', 'file_id', + 'annotations', 'video']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.replay.Database() + + objs = [] + + if parameters['enroll']: + objs.extend([ ('enroll', '', x) + for x in sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='enroll'), + key=lambda x: (x.client_id, x.id)) + ]) + + objs.extend([ ('real', '', x) + for x in sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='real'), + key=lambda x: (x.client_id, x.id)) + ]) + + objs.extend([ ('attack', 'fixed', x) + for x in sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='attack', + support='fixed'), + key=lambda x: (x.client_id, x.id)) + ]) + + objs.extend([ ('attack', 'hand', x) + for x in sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='attack', + support='hand'), + key=lambda x: (x.client_id, x.id)) + ]) + + + return [ Entry(x[0], x[1], x[2].client_id, x[2].id, x[2].bbx(root_folder), + x[2].videofile(root_folder)) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'class': + return { + 'text': str(obj.cls) + } + + elif output == 'attack_support': + return { + 'text': str(obj.attack_support) + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'annotations': + annotations_list = [] + for i in range(obj.annotations.shape[0]): + annotations_list.append({ + 'frame_id': np.uint64(obj.annotations[i][0]), + 'top-left-x': np.int32(obj.annotations[i][1]), + 'top-left-y': np.int32(obj.annotations[i][2]), + 'width': np.int32(obj.annotations[i][3]), + 'height': np.int32(obj.annotations[i][4]) + }) + + return { + 'value': annotations_list + } + + elif output == 'video': + return { + 'value': bob.io.base.load(obj.video) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - video: "{{ system_user.username }}/array_4d_uint8/1" + - annotations: "{{ system_user.username }}/bounding_box_video/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "video". + One "annotations" is associated with a given "video". + Several "video" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- + | video | | video | | video | | video | + --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- + | annotations | | annotations | | annotations | | annotations | + --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- + ------------------------------- ------------------------------- + | template_id | | template_id | + ------------------------------- ------------------------------- + --------------------------------------------------------------- + | client_id | + --------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', + 'annotations', 'video']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.replay.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=[parameters['group']], + cls='enroll'), + key=lambda x: (x.client_id, x.id)) + + + return [ Entry(x.client_id, x.client_id, x.id, x.bbx(root_folder), + x.videofile(root_folder)) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'annotations': + annotations_list = [] + for i in range(obj.annotations.shape[0]): + annotations_list.append({ + 'frame_id': np.uint64(obj.annotations[i][0]), + 'top-left-x': np.int32(obj.annotations[i][1]), + 'top-left-y': np.int32(obj.annotations[i][2]), + 'width': np.int32(obj.annotations[i][3]), + 'height': np.int32(obj.annotations[i][4]) + }) + + return { + 'value': annotations_list + } + + elif output == 'video': + return { + 'value': bob.io.base.load(obj.video) + } + + +#---------------------------------------------------------- + + +class ProbesReal(View): + """Outputs: + - video: "{{ system_user.username }}/array_4d_uint8/1" + - annotations: "{{ system_user.username }}/bounding_box_video/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "video". + One "annotations" is associated with a given "video". + One "probe_id" is associated with a given "video". + Several "video" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- + | video | | video | | video | | video | + --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- + | annotations | | annotations | | annotations | | annotations | + --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- + ------------------------------- ------------------------------- + | client_id | | client_id | + ------------------------------- ------------------------------- + --------------------------------------------------------------- + | template_ids | + --------------------------------------------------------------- + + Note: for this particular database, there is only one "template_ids" + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'annotations', 'video']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.replay.Database() + + template_ids = sorted([c.id for c in db.clients() if c.set == parameters['group']]) + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=[parameters['group']], + cls='real'), + key=lambda x: (x.client_id, x.id)) + + + return [ Entry(template_ids, x.client_id, x.id, x.id, x.bbx(root_folder), + x.videofile(root_folder)) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': np.uint64(obj.template_ids) + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'annotations': + annotations_list = [] + for i in range(obj.annotations.shape[0]): + annotations_list.append({ + 'frame_id': np.uint64(obj.annotations[i][0]), + 'top-left-x': np.int32(obj.annotations[i][1]), + 'top-left-y': np.int32(obj.annotations[i][2]), + 'width': np.int32(obj.annotations[i][3]), + 'height': np.int32(obj.annotations[i][4]) + }) + + return { + 'value': annotations_list + } + + elif output == 'video': + return { + 'value': bob.io.base.load(obj.video) + } + + +#---------------------------------------------------------- + + +class ProbesAttack(View): + """Outputs: + - video: "{{ system_user.username }}/array_4d_uint8/1" + - annotations: "{{ system_user.username }}/bounding_box_video/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - attack_support: "{{ system_user.username }}/text/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "video". + One "annotations" is associated with a given "video". + One "probe_id" is associated with a given "video". + Several "video" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + Several "template_ids" are associated with a given "attack_support". + + --------------- --------------- --------------- --------------- --------------- --------------- + | video | | video | | video | | video | | video | | video | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | annotations | | annotations | | annotations | | annotations | | annotations | | annotations | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ------------------------------- ------------------------------- ------------------------------- + | client_id | | client_id | | client_id | + ------------------------------- ------------------------------- ------------------------------- + --------------------------------------------------------------- ------------------------------- + | template_ids | | template_ids | + --------------------------------------------------------------- ------------------------------- + ----------------------------------------------------------------------------------------------- + | attack_support | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['attack_support', 'template_ids', 'client_id', + 'probe_id', 'file_id', 'annotations', 'video']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.replay.Database() + + objs = [] + + objs.extend([ ('fixed', x) + for x in sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='attack', + support='fixed'), + key=lambda x: (x.client_id, x.id)) + ]) + + objs.extend([ ('hand', x) + for x in sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + cls='attack', + support='hand'), + key=lambda x: (x.client_id, x.id)) + ]) + + return [ Entry(x[0], [ x[1].client_id ], x[1].client_id, x[1].id, x[1].id, + x[1].bbx(root_folder), x[1].videofile(root_folder)) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'attack_support': + return { + 'text': str(obj.attack_support) + } + + elif output == 'template_ids': + return { + 'value': np.uint64(obj.template_ids) + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'annotations': + annotations_list = [] + for i in range(obj.annotations.shape[0]): + annotations_list.append({ + 'frame_id': np.uint64(obj.annotations[i][0]), + 'top-left-x': np.int32(obj.annotations[i][1]), + 'top-left-y': np.int32(obj.annotations[i][2]), + 'width': np.int32(obj.annotations[i][3]), + 'height': np.int32(obj.annotations[i][4]) + }) + + return { + 'value': annotations_list + } + + elif output == 'video': + return { + 'value': bob.io.base.load(obj.video) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + def mock_load(filename): + return np.ndarray((5, 3, 10, 20), dtype=np.uint8) + + def mock_bbx(obj, directory): + return np.array([(0, 1, 2, 3, 4, 5), (1, 10, 20, 30, 40, 50)]) + + bob.io.base.load = mock_load + bob.db.replay.File.bbx = mock_bbx + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = All() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol="grandtest", + group="train", + enroll=False, + ) + ) + view.get('class', 0) + view.get('attack_support', 0) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('annotations', 0) + view.get('video', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol="grandtest", + group="devel", + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('annotations', 0) + view.get('video', 0) + + + view = ProbesReal() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol="grandtest", + group="devel", + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('annotations', 0) + view.get('video', 0) + + + view = ProbesAttack() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol="grandtest", + group="devel", + ) + ) + view.get('attack_support', 0) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('annotations', 0) + view.get('video', 0) diff --git a/advanced/databases/replay/4.rst b/advanced/databases/replay/4.rst new file mode 100644 index 0000000..18ed891 --- /dev/null +++ b/advanced/databases/replay/4.rst @@ -0,0 +1,225 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The Replay Database +------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 02/Nov/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + - Support all protocols of Replay Attack + +* **Version 1**, 21/Oct/2014: + + - Initial release + + +Description +=========== + +The `Replay-Attack Database <https://www.idiap.ch/dataset/replayattack>`_ for +face spoofing consists of 1300 video clips of photo and video attack attempts +to 50 clients, under different lighting conditions. This Database was produced +at the Idiap Research Institute, in Switzerland. + + +Acknowledgements +================ + +If you use this database, please cite the following publication on your paper:: + + @INPROCEEDINGS{Chingovska_BIOSIG-2012, + author = {Chingovska, Ivana and Anjos, Andr{\'{e}} and Marcel, S{\'{e}}bastien}, + keywords = {biometric, Counter-Measures, Local Binary Patterns, Spoofing Attacks}, + month = september, + title = {On the Effectiveness of Local Binary Patterns in Face Anti-spoofing}, + journal = {IEEE BIOSIG 2012}, + year = {2012}, + } + + +Spoofing Attacks Description +============================ + +The 2D face spoofing attack database consists of 1,300 video clips of photo and +video attack attempts of 50 clients, under different lighting conditions. + +The data is split into 4 sub-groups comprising: + + * Training data ("train"), to be used for training your anti-spoof classifier; + * Development data ("devel"), to be used for threshold estimation; + * Test data ("test"), with which to report error figures; + * Enrollment data ("enroll"), that can be used to verify spoofing sensitivity on face detection algorithms. + +Clients that appear in one of the data sets (train, devel or test) do not appear +in any other set. + +Database Description +==================== + +All videos are generated by either having a (real) client trying to access a +laptop through a built-in webcam or by displaying a photo or a video recording +of the same client for at least 9 seconds. The webcam produces colour videos +with a resolution of 320 pixels (width) by 240 pixels (height). The movies were +recorded on a Macbook laptop using the QuickTime framework (codec: Motion JPEG) +and saved into ".mov" files. The frame rate is about 25 Hz. Besides the native +support on Apple computers, these files are *easily* readable using mplayer, +ffmpeg or any other video utilities available under Linux or MS Windows systems. + +Real client accesses as well as data collected for the attacks are taken under +two different lighting conditions: + +* **controlled**: The office light was turned on, blinds are down, background is homogeneous; +* **adverse**: Blinds up, more complex background, office lights are out. + +To produce the attacks, high-resolution photos and videos from each client were +taken under the same conditions as in their authentication sessions, using a +Canon PowerShot SX150 IS camera, which records both 12.1 Mpixel photographs and +720p high-definition video clips. The way to perform the attacks can be divided +into two subsets: the first subset is composed of videos generated using a stand +to hold the client biometry ("fixed"). For the second set, the attacker holds +the device used for the attack with their own hands. In total, 20 attack videos +were registered for each client, 10 for each of the attacking modes just +described: + +* 4 x mobile attacks using an iPhone 3GS screen (with resolution 480x320 pixels) displaying: + + * 1 x mobile photo/controlled + * 1 x mobile photo/adverse + * 1 x mobile video/controlled + * 1 x mobile video/adverse + +* 4 x high-resolution screen attacks using an iPad (first generation, with a screen resolution of 1024x768 pixels) displaying: + + * 1 x high-resolution photo/controlled + * 1 x high-resolution photo/adverse + * 1 x high-resolution video/controlled + * 1 x high-resolution video/adverse + +* 2 x hard-copy print attacks (produced on a Triumph-Adler DCC 2520 color laser printer) occupying the whole available printing surface on A4 paper for the following samples: + + * 1 x high-resolution print of photo/controlled + * 1 x high-resolution print of photo/adverse + +The 1300 real-accesses and attacks videos were then divided in the following +way: + +* **Training set**: contains 60 real-accesses and 300 attacks under different lighting conditions; +* **Development set**: contains 60 real-accesses and 300 attacks under different lighting conditions; +* **Test set**: contains 80 real-accesses and 400 attacks under different lighting conditions; +* **Enrollment set**: contains 100 real-accesses under different lighting conditions, to be used **exclusively** for studying the baseline performance of face recognition systems. + + +Face Locations +============== + +We also provide face locations automatically annotated by a cascade of +classifiers based on a variant of Local Binary Patterns (LBP) referred as +Modified Census Transform (MCT) [Face Detection with the Modified Census +Transform, Froba, B. and Ernst, A., 2004, IEEE International Conference on +Automatic Face and Gesture Recognition, pp. 91-96]. The automatic face +localisation procedure works in more than 99% of the total number of frames +acquired. This means that less than 1% of the total set of frames for all videos +do not possess annotated faces. User algorithms must account for this fact. + + +Protocol for Licit Biometric Transactions +========================================= + +It is possible to measure the performance of baseline face recognition systems +on the 2D Face spoofing database and evaluate how well the attacks pass such +systems or how, otherwise robust they are to attacks. Here we describe how to +use the available data at the enrollment set to create a background model, +client models and how to perform scoring using the available data. + +1. Universal Background Model (UBM): To generate the UBM, subselect the +training-set client videos from the enrollment videos. There should be 2 per +client, which means you get 30 videos, each with 375 frames to create the model; + +2. Client models: To generate client models, use the enrollment data for clients +at the development and test groups. There should be 2 videos per client (one for +each light condition) once more. At the end of the enrollment procedure, the +development set must have 1 model for each of the 15 clients available in that +set. Similarly, for the test set, 1 model for each of the 20 clients available; + +3. For a simple baseline verification, generate scores **exhaustively** for all +videos from the development and test **real-accesses** respectively, but +**without** intermixing accross development and test sets. The scores generated +against matched client videos and models (within the subset, i.e. development or +test) should be considered true client accesses, while all others impostors; + +4. If you are looking for a single number to report on the performance do the +following: exclusively using the scores from the development set, tune your +baseline face recognition system on the EER of the development set and use this +threshold to find the HTER on the test set scores. + + +Protocols for Spoofing Attacks +============================== + +Attack protocols are used to evaluate the (binary classification) performance of +counter-measures to spoof attacks. The database can be split into 6 different +protocols according to the type of device used to generate the attack: print, +mobile (phone), high-definition (tablet), photo, video or grand test (all +types). Furthermore, subsetting can be achieved on the top of the previous 6 +groups by classifying attacks as performed by the attacker bare hands or using a +fixed support. This classification scheme makes-up a total of 18 protocols that +can be used for studying the performance of counter-measures to 2D face spoofing +attacks. + ++------------+-------+-----+------+-------+-----+------+-------+-----+------+ +| | Hand-Attack | Fixed-Support | All Supports | ++------------+-------+-----+------+-------+-----+------+-------+-----+------+ +| Protocol | train | dev | test | train | dev | test | train | dev | test | ++============+=======+=====+======+=======+=====+======+=======+=====+======+ +| print | 30 | 30 | 40 | 30 | 30 | 40 | 60 | 60 | 80 | ++------------+-------+-----+------+-------+-----+------+-------+-----+------+ +| mobile | 60 | 60 | 80 | 60 | 60 | 80 | 120 | 120 | 160 | ++------------+-------+-----+------+-------+-----+------+-------+-----+------+ +| highdef | 60 | 60 | 80 | 60 | 60 | 80 | 120 | 120 | 160 | ++------------+-------+-----+------+-------+-----+------+-------+-----+------+ +| video | 60 | 60 | 80 | 60 | 60 | 80 | 120 | 120 | 160 | ++------------+-------+-----+------+-------+-----+------+-------+-----+------+ +| photo | 90 | 90 | 120 | 90 | 90 | 120 | 180 | 180 | 240 | ++------------+-------+-----+------+-------+-----+------+-------+-----+------+ +| grandtest | 150 | 150 | 200 | 150 | 150 | 200 | 300 | 300 | 400 | ++------------+-------+-----+------+-------+-----+------+-------+-----+------+ + + +Specificities to the BEAT View +============================== + +Spoofing and genuine samples are each labelled with a text field that defines +the class of the sample: ``"attack"`` or ``"real"``, for simple anti-spoofing +binary classification systems and ``"attack"``, ``"real"`` and ``"enroll"`` for +systems merging anti-spoofing with face verification systems. Code using this +database views may use the ``class`` field to differentiate samples. diff --git a/advanced/databases/utfvp/4.json b/advanced/databases/utfvp/4.json new file mode 100644 index 0000000..90b6218 --- /dev/null +++ b/advanced/databases/utfvp/4.json @@ -0,0 +1,623 @@ +{ + "description": "Finger-Vein database from the University of Twente", + "root_folder": "/idiap/resource/database/UTFVP/data", + "protocols": [ + { + "name": "1vsall", + "template": "simple_fingervein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1" + }, + "name": "train", + "template": "train", + "parameters": { + "protocol": "1vsall" + }, + "view": "Train" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "dev_templates", + "template": "templates", + "parameters": { + "group": "dev", + "protocol": "1vsall" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "dev_probes", + "template": "probes", + "parameters": { + "group": "dev", + "protocol": "1vsall" + }, + "view": "Probes" + } + ] + }, + { + "name": "nom", + "template": "advanced_fingervein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1" + }, + "name": "train", + "template": "train", + "parameters": { + "protocol": "nom" + }, + "view": "Train" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "dev_templates", + "template": "templates", + "parameters": { + "group": "dev", + "protocol": "nom" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "dev_probes", + "template": "probes", + "parameters": { + "group": "dev", + "protocol": "nom" + }, + "view": "Probes" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "test_templates", + "template": "templates", + "parameters": { + "group": "eval", + "protocol": "nom" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "test_probes", + "template": "probes", + "parameters": { + "group": "eval", + "protocol": "nom" + }, + "view": "Probes" + } + ] + }, + { + "name": "nomLeftRing", + "template": "advanced_fingervein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1" + }, + "name": "train", + "template": "train", + "parameters": { + "protocol": "nomLeftRing" + }, + "view": "Train" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "dev_templates", + "template": "templates", + "parameters": { + "group": "dev", + "protocol": "nomLeftRing" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "dev_probes", + "template": "probes", + "parameters": { + "group": "dev", + "protocol": "nomLeftRing" + }, + "view": "Probes" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "test_templates", + "template": "templates", + "parameters": { + "group": "eval", + "protocol": "nomLeftRing" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "test_probes", + "template": "probes", + "parameters": { + "group": "eval", + "protocol": "nomLeftRing" + }, + "view": "Probes" + } + ] + }, + { + "name": "nomLeftMiddle", + "template": "advanced_fingervein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1" + }, + "name": "train", + "template": "train", + "parameters": { + "protocol": "nomLeftMiddle" + }, + "view": "Train" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "dev_templates", + "template": "templates", + "parameters": { + "group": "dev", + "protocol": "nomLeftMiddle" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "dev_probes", + "template": "probes", + "parameters": { + "group": "dev", + "protocol": "nomLeftMiddle" + }, + "view": "Probes" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "test_templates", + "template": "templates", + "parameters": { + "group": "eval", + "protocol": "nomLeftMiddle" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "test_probes", + "template": "probes", + "parameters": { + "group": "eval", + "protocol": "nomLeftMiddle" + }, + "view": "Probes" + } + ] + }, + { + "name": "nomLeftIndex", + "template": "advanced_fingervein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1" + }, + "name": "train", + "template": "train", + "parameters": { + "protocol": "nomLeftIndex" + }, + "view": "Train" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "dev_templates", + "template": "templates", + "parameters": { + "group": "dev", + "protocol": "nomLeftIndex" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "dev_probes", + "template": "probes", + "parameters": { + "group": "dev", + "protocol": "nomLeftIndex" + }, + "view": "Probes" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "test_templates", + "template": "templates", + "parameters": { + "group": "eval", + "protocol": "nomLeftIndex" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "test_probes", + "template": "probes", + "parameters": { + "group": "eval", + "protocol": "nomLeftIndex" + }, + "view": "Probes" + } + ] + }, + { + "name": "nomRightIndex", + "template": "advanced_fingervein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1" + }, + "name": "train", + "template": "train", + "parameters": { + "protocol": "nomRightIndex" + }, + "view": "Train" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "dev_templates", + "template": "templates", + "parameters": { + "group": "dev", + "protocol": "nomRightIndex" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "dev_probes", + "template": "probes", + "parameters": { + "group": "dev", + "protocol": "nomRightIndex" + }, + "view": "Probes" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "test_templates", + "template": "templates", + "parameters": { + "group": "eval", + "protocol": "nomRightIndex" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "test_probes", + "template": "probes", + "parameters": { + "group": "eval", + "protocol": "nomRightIndex" + }, + "view": "Probes" + } + ] + }, + { + "name": "nomRightMiddle", + "template": "advanced_fingervein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1" + }, + "name": "train", + "template": "train", + "parameters": { + "protocol": "nomRightMiddle" + }, + "view": "Train" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "dev_templates", + "template": "templates", + "parameters": { + "group": "dev", + "protocol": "nomRightMiddle" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "dev_probes", + "template": "probes", + "parameters": { + "group": "dev", + "protocol": "nomRightMiddle" + }, + "view": "Probes" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "test_templates", + "template": "templates", + "parameters": { + "group": "eval", + "protocol": "nomRightMiddle" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "test_probes", + "template": "probes", + "parameters": { + "group": "eval", + "protocol": "nomRightMiddle" + }, + "view": "Probes" + } + ] + }, + { + "name": "nomRightRing", + "template": "advanced_fingervein_recognition", + "sets": [ + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1" + }, + "name": "train", + "template": "train", + "parameters": { + "protocol": "nomRightRing" + }, + "view": "Train" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "dev_templates", + "template": "templates", + "parameters": { + "group": "dev", + "protocol": "nomRightRing" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "dev_probes", + "template": "probes", + "parameters": { + "group": "dev", + "protocol": "nomRightRing" + }, + "view": "Probes" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1" + }, + "name": "test_templates", + "template": "templates", + "parameters": { + "group": "eval", + "protocol": "nomRightRing" + }, + "view": "Templates" + }, + { + "outputs": { + "image": "{{ system_user.username }}/array_2d_uint8/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1" + }, + "name": "test_probes", + "template": "probes", + "parameters": { + "group": "eval", + "protocol": "nomRightRing" + }, + "view": "Probes" + } + ] + } + ] +} diff --git a/advanced/databases/utfvp/4.py b/advanced/databases/utfvp/4.py new file mode 100644 index 0000000..f9c5c88 --- /dev/null +++ b/advanced/databases/utfvp/4.py @@ -0,0 +1,308 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.db.utfvp + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.utfvp.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world', + purposes='train'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.png')) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.utfvp.Database() + + template_ids = db.model_ids(protocol=parameters['protocol'], + groups=parameters['group']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='enroll', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.png')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'template_id': + return { + 'text': str(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_2d_uint8/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/text/1" + - template_ids: "{{ system_user.username }}/array_1d_text/1" + + One "file_id" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.utfvp.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + objs = [ (sorted([ model.name for model in obj.models_probe + if model.sgroup == parameters['group'] ]), obj) + for obj in objs ] + + entries = [ Entry(x[0], x[1].client_id, x[1].id, x[1].id, x[1].make_path(root_folder, '.png')) + for x in objs ] + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'text': obj.template_ids + } + + elif output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='1vsall', + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('image', 0) + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='nomLeftRing', + group='dev', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('image', 0) + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='nomLeftRing', + group='dev', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('image', 0) diff --git a/advanced/databases/utfvp/4.rst b/advanced/databases/utfvp/4.rst new file mode 100644 index 0000000..93f19c3 --- /dev/null +++ b/advanced/databases/utfvp/4.rst @@ -0,0 +1,108 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The UTFVP Database of Finger vein +--------------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 02/Nov/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 06/Nov/2014: + + - Initial release + + +Description +=========== + +The University of Twente Finger Vascular Pattern (UTFVP) database is a +realistic and challenging finger vein database to support and stimulate +research efforts in the area of developing, testing and evaluating algorithms +for vascular pattern recognition. + +The collected dataset contains 1440 finger vascular pattern images in total +which have been collected from 60 volunteers at Twente University during the +2011-2012 academic year. Images were captured in two identical sessions with an +average time lapse of 15 days. For each volunteer the vascular pattern of the +index, ring and middle finger of both hands has been collected twice at each +session. This means that each individual finger has been captured four times in +total. The captured images have a resolution of 672 x 380 pixels and have a +pixel density of 126 pixels per centimetre (ppcm). The images are stored using +the lossless 8 bit grey scale Portable Network Graphics(PNG) format. + +The percentage of male volunteers was 73% and the percentage of right handed +volunteers was 87%. The dataset represents a young population with 82% of the +volunteers falling in the age range of 19-30, the remaining volunteers were +older than this. The quality of the collected images varies from person to +person, but the variation in quality of the images from the same person is +small. The width of the visible blood vessels range from 4-20 pixels which +corresponds to vessel widths of approximately 0.3-1.6 mm. These vessel widths +are approximate numbers because the pixel density was determined assuming a +flat surface. + +Associated with the database is the UTFVP protocol called **1vsall**, which is +based on the database reference paper [Ton+13]. + +The `Idiap Research Institue <http://www.idiap.ch/>`_ and `The Swiss Center for +Biometrics Research and Testing <http://www.biometrics-center.ch/>`_ define the +Normal Operation Mode (NOM) protocols which set of data to use for training, +evaluation and testing. Performing experiments according to the protocol allows +institutions to easily compare their results to others. + +Description: The nom protocols divide the database on three subsets: world +(subjects 1-10), development (subjects 11-28) and test (subjects 29-60). Only +the images in world set should be used to train system components such as +world/background models, PCA/LDA subspaces, etc., or to otherwise use as +background data, for example for score normalisation, etc. The development set +only should be used to train system hyper-parameters such as the decision +threshold, number of dimensions in a subspace, feature extraction and +preprocessing hyper-parameters, coefficients for linear fusion, etc., to +minimise the chosen error rate metric. Finally, the test set should be used to +test finger vein verification accuracy. The decision threshold must be +determined by tuning on the development set, and then blindly applied to finger +vein verification scores produced on the test set. + +In the **nom** protocol the different fingers of one subject are considered +different subjects. Therefore, a total number of 60 x 6 = 360 subjects will be +considered for the experiments. The remaining protocols: **nomLeftRing**, +**nomLeftMiddle**, **nomLeftIndex**, **nomRightIndex**, **nomRightMiddle**, +**nomRightRing** consider just one finger per subject. Therefore, a total +number of 60 subjects will be considered for the experiments in these cases. +In all these protocols, the two finger vein images from the first session are +used for enrolment and the two from the second session as probe samples. + +Citation: All documents and papers that report on research that uses the UTFVP +database must acknowledge the use of the database by including a citation of +the paper [Ton+13]. + +.. [Ton+13] *B. Ton and R.N.J. Veldhuis*. **A High Quality Finger Vascular Pattern Dataset Collected Using a Custom Designed Capturing Device**. In: 6th IAPR International Conference on Biometrics (ICB), pp. 1-5, June 4-7, Madrid, Spain, 2013. diff --git a/advanced/databases/voxforge/4.json b/advanced/databases/voxforge/4.json new file mode 100644 index 0000000..31b4360 --- /dev/null +++ b/advanced/databases/voxforge/4.json @@ -0,0 +1,80 @@ +{ + "description": "The VoxForge Database", + "root_folder": "/idiap/resource/database/VoxForge/dbase/SpeechCorpus/Trunk/Audio/Main/16kHz_16bit", + "protocols": [ + { + "name": "default", + "template": "advanced_speaker_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_id": "{{ system_user.username }}/text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/text/1", + "probe_id": "{{ system_user.username }}/text/1", + "client_id": "{{ system_user.username }}/text/1", + "template_ids": "{{ system_user.username }}/array_1d_text/1", + "speech": "{{ system_user.username }}/array_1d_floats/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/voxforge/4.py b/advanced/databases/voxforge/4.py new file mode 100644 index 0000000..ac3e698 --- /dev/null +++ b/advanced/databases/voxforge/4.py @@ -0,0 +1,323 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.db.voxforge +import bob.io.base +import bob.io.audio + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - speech: "{{ system_user.username }}/array_1d_floats/1" + - file_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "speech". + Several "speech" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | speech | | speech | | speech | | speech | | speech | | speech | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'speech']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.voxforge.Database() + + objs = sorted(db.objects(groups='world'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.wav')) for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'file_id': + return { + 'text': str(obj.file_id) + } + + elif output == 'speech': + audio = bob.io.base.load(obj.speech) + + return { + 'value': np.cast['float'](audio[0] * pow(2, 15)) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - speech: "{{ system_user.username }}/array_1d_floats/1" + - file_id: "{{ system_user.username }}/text/1" + - template_id: "{{ system_user.username }}/text/1" + - client_id: "{{ system_user.username }}/text/1" + + One "file_id" is associated with a given "speech". + Several "speech" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | speech | | speech | | speech | | speech | | speech | | speech | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'speech']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.voxforge.Database() + + template_ids = db.model_ids(groups=parameters['group']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(groups=parameters['group'], + purposes='enroll', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.wav')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'template_id': + return { + 'text': str(obj.template_id) + } + + elif output == 'file_id': + return { + 'text': str(obj.file_id) + } + + elif output == 'speech': + audio = bob.io.base.load(obj.speech) + + return { + 'value': np.cast['float'](audio[0] * pow(2, 15)) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - speech: "{{ system_user.username }}/array_1d_floats/1" + - file_id: "{{ system_user.username }}/text/1" + - probe_id: "{{ system_user.username }}/text/1", + - client_id: "{{ system_user.username }}/text/1" + - template_ids: "{{ system_user.username }}/array_1d_text/1", + + One "file_id" is associated with a given "speech". + One "probe_id" is associated with a given "speech". + Several "speech" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + Each probe must be matched against a number of templates defined by a list of + client identifiers. + + --------------- --------------- --------------- --------------- --------------- --------------- + | speech | | speech | | speech | | speech | | speech | | speech | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'speech']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.voxforge.Database() + + template_ids = db.model_ids(groups=parameters['group']) + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(groups=parameters['group'], + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + objs = sorted(db.objects(groups=parameters['group'], + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + entries.append( (templates, obj) ) + + return sorted([ Entry(x[0], x[1].client_id, x[1].id, x[1].id, + x[1].make_path(root_folder, '.wav')) + for x in entries ], + key=lambda x: (len(x.template_ids), x.template_ids, x.client_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'text': [ str(x) for x in obj.template_ids ] + } + + elif output == 'client_id': + return { + 'text': str(obj.client_id) + } + + elif output == 'probe_id': + return { + 'text': str(obj.probe_id) + } + + elif output == 'file_id': + return { + 'text': str(obj.file_id) + } + + elif output == 'speech': + audio = bob.io.base.load(obj.speech) + + return { + 'value': np.cast['float'](audio[0] * pow(2, 15)) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((1, 512), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('speech', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + group='dev', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('speech', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + group='dev', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('speech', 0) diff --git a/advanced/databases/voxforge/4.rst b/advanced/databases/voxforge/4.rst new file mode 100644 index 0000000..175d4eb --- /dev/null +++ b/advanced/databases/voxforge/4.rst @@ -0,0 +1,56 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +----------------------- + The VoxForge Database +----------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 02/Nov/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 26/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 21/Oct/2014: + + - Initial release + + +Description +=========== + +`VoxForge <http://www.voxforge.org/>`_ offers a collection of transcribed +speech for use with Free and Open Source Speech Recognition Engines. + +`bob.db.voxforge <https://pypi.python.org/pypi/bob.db.voxforge>`_ defines +speaker recognition protocol that uses a small subset of the english audio +files (only 6561 files) belonging to 30 speakers randomly selected. This subset +is split into three equivalent parts: Training (10 speakers), Development (10 +speakers) and Test (10 speakers) sets. diff --git a/advanced/databases/xm2vts/4.json b/advanced/databases/xm2vts/4.json new file mode 100644 index 0000000..d505d4d --- /dev/null +++ b/advanced/databases/xm2vts/4.json @@ -0,0 +1,350 @@ +{ + "description": "The XM2VTS Database of Faces", + "root_folder": "/idiap/resource/database/xm2vtsdb/images", + "protocols": [ + { + "name": "lp1", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "lp1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "lp1", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "lp1", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "lp1", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "lp1", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "lp2", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "lp2" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "lp2", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "lp2", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "lp2", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "lp2", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "darkened-lp1", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "darkened-lp1" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "darkened-lp1", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "darkened-lp1", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "darkened-lp1", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "darkened-lp1", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + }, + { + "name": "darkened-lp2", + "template": "advanced_face_recognition", + "sets": [ + { + "name": "train", + "template": "train", + "view": "Train", + "parameters": { + "protocol": "darkened-lp2" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "darkened-lp2", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "dev_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "darkened-lp2", + "group": "dev" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_templates", + "template": "templates", + "view": "Templates", + "parameters": { + "protocol": "darkened-lp2", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_id": "{{ system_user.username }}/uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + }, + { + "name": "test_probes", + "template": "probes", + "view": "Probes", + "parameters": { + "protocol": "darkened-lp2", + "group": "eval" + }, + "outputs": { + "file_id": "{{ system_user.username }}/uint64/1", + "probe_id": "{{ system_user.username }}/uint64/1", + "client_id": "{{ system_user.username }}/uint64/1", + "template_ids": "{{ system_user.username }}/array_1d_uint64/1", + "image": "{{ system_user.username }}/array_3d_uint8/1", + "eye_centers": "{{ system_user.username }}/eye_positions/1" + } + } + ] + } + ] +} diff --git a/advanced/databases/xm2vts/4.py b/advanced/databases/xm2vts/4.py new file mode 100644 index 0000000..3a084c5 --- /dev/null +++ b/advanced/databases/xm2vts/4.py @@ -0,0 +1,382 @@ +############################################################################### +# # +# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy as np +from collections import namedtuple + +from beat.backend.python.database import View + +import bob.io.base +import bob.io.image +import bob.db.xm2vts + + +#---------------------------------------------------------- + + +class Train(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.xm2vts.Database() + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups='world'), + key=lambda x: (x.client_id, x.id)) + + return [ Entry(x.client_id, x.id, db.annotations(x), x.make_path(root_folder, '.ppm')) + for x in objs ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Templates(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - template_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + Several "image" are associated with a given "template_id". + Several "template_id" are associated with a given "client_id". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | template_id | | template_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | client_id | + ----------------------------------------------------------------------------------------------- + + Note: for this particular database, there is only one "template_id" + per "client_id". + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.xm2vts.Database() + + template_ids = db.model_ids(protocol=parameters['protocol'], + groups=parameters['group']) + + entries = [] + + for template_id in template_ids: + objs = db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='enroll', + model_ids=[template_id]) + + entries.extend([ Entry(x.client_id, template_id, x.id, db.annotations(x), + x.make_path(root_folder, '.ppm')) + for x in objs ]) + + return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'template_id': + return { + 'value': np.uint64(obj.template_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +class Probes(View): + """Outputs: + - image: "{{ system_user.username }}/array_3d_uint8/1" + - eye_centers: "{{ system_user.username }}/eye_positions/1" + - file_id: "{{ system_user.username }}/uint64/1" + - probe_id: "{{ system_user.username }}/uint64/1" + - client_id: "{{ system_user.username }}/uint64/1" + - template_ids: "{{ system_user.username }}/array_1d_uint64/1" + + One "file_id" is associated with a given "image". + One "eye_centers" is associated with a given "image". + One "probe_id" is associated with a given "image". + Several "image" are associated with a given "client_id". + Several "client_id" are associated with a given "template_ids". + + --------------- --------------- --------------- --------------- --------------- --------------- + | image | | image | | image | | image | | image | | image | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + --------------- --------------- --------------- --------------- --------------- --------------- + --------------- --------------- --------------- --------------- --------------- --------------- + | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | | probe_id | + --------------- --------------- --------------- --------------- --------------- --------------- + ----------------------------------------------- ----------------------------------------------- + | client_id | | client_id | + ----------------------------------------------- ----------------------------------------------- + ----------------------------------------------------------------------------------------------- + | template_ids | + ----------------------------------------------------------------------------------------------- + """ + + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', + 'eye_centers', 'image']) + + # Open the database and load the objects to provide via the outputs + db = bob.db.xm2vts.Database() + + template_ids = sorted(db.model_ids(protocol=parameters['protocol'], + groups=parameters['group'])) + + + template_probes = {} + for template_id in template_ids: + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe', + model_ids=[template_id]), + key=lambda x: (x.client_id, x.id)) + + template_probes[template_id] = [ p.id for p in objs ] + + + objs = sorted(db.objects(protocol=parameters['protocol'], + groups=parameters['group'], + purposes='probe'), + key=lambda x: (x.client_id, x.id)) + + entries = [] + for obj in objs: + templates = [ template_id for template_id in template_ids + if obj.id in template_probes[template_id] ] + + entries.append( Entry(templates, obj.client_id, obj.id, obj.id, + db.annotations(obj), obj.make_path(root_folder, '.ppm')) ) + + return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids, + x.client_id, x.probe_id)) + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'template_ids': + return { + 'value': np.uint64(obj.template_ids) + } + + elif output == 'client_id': + return { + 'value': np.uint64(obj.client_id) + } + + elif output == 'probe_id': + return { + 'value': np.uint64(obj.probe_id) + } + + elif output == 'file_id': + return { + 'value': np.uint64(obj.file_id) + } + + elif output == 'eye_centers': + return { + 'left': { + 'y': np.int32(obj.eye_centers['leye'][0]), + 'x': np.int32(obj.eye_centers['leye'][1]), + }, + 'right': { + 'y': np.int32(obj.eye_centers['reye'][0]), + 'x': np.int32(obj.eye_centers['reye'][1]), + } + } + + elif output == 'image': + return { + 'value': bob.io.base.load(obj.image) + } + + +#---------------------------------------------------------- + + +def setup_tests(): + # Install a mock load function for the images + def mock_load(root_folder): + return np.ndarray((3, 10, 20), dtype=np.uint8) + + bob.io.base.load = mock_load + + +#---------------------------------------------------------- + + +# Test the behavior of the views (on fake data) +if __name__ == '__main__': + + setup_tests() + + view = Train() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='lp1', + ) + ) + view.get('client_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Templates() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='lp1', + group='dev', + ) + ) + view.get('client_id', 0) + view.get('template_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) + + + view = Probes() + view.objs = view.index( + root_folder='', + parameters=dict( + protocol='lp1', + group='dev', + ) + ) + view.get('template_ids', 0) + view.get('client_id', 0) + view.get('probe_id', 0) + view.get('file_id', 0) + view.get('eye_centers', 0) + view.get('image', 0) diff --git a/advanced/databases/xm2vts/4.rst b/advanced/databases/xm2vts/4.rst new file mode 100644 index 0000000..f907727 --- /dev/null +++ b/advanced/databases/xm2vts/4.rst @@ -0,0 +1,103 @@ +.. Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ .. +.. Contact: beat.support@idiap.ch .. +.. .. +.. This file is part of the beat.examples module of the BEAT platform. .. +.. .. +.. Commercial License Usage .. +.. Licensees holding valid commercial BEAT licenses may use this file in .. +.. accordance with the terms contained in a written agreement between you .. +.. and Idiap. For further information contact tto@idiap.ch .. +.. .. +.. Alternatively, this file may be used under the terms of the GNU Affero .. +.. Public License version 3 as published by the Free Software and appearing .. +.. in the file LICENSE.AGPL included in the packaging of this file. .. +.. The BEAT platform is distributed in the hope that it will be useful, but .. +.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY .. +.. or FITNESS FOR A PARTICULAR PURPOSE. .. +.. .. +.. You should have received a copy of the GNU Affero Public License along .. +.. with the BEAT platform. If not, see http://www.gnu.org/licenses/. .. + + +The XM2VTS Database of Faces +---------------------------- + +Changelog +========= + +* **Version 4**, 30/Jan/2018: + + - Port to beat.backend.python v1.5.0 + +* **Version 3**, 02/Nov/2017: + + - Port to beat.backend.python v1.4.2 + +* **Version 2**, 20/Jan/2016: + + - Port to Bob v2 + +* **Version 1**, 21/Oct/2014: + + - Initial release + + +Description +=========== + +The `XM2VTS database <http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/>`_ is a large +multi-modal database that was captured onto high quality digital video. It +contains four recordings of 295 subjects taken over a period of four months. +Each recording contains a speaking head shot and a rotating head shot. Sets +of data taken from this database are available including high quality colour +images, 32 KHz 16-bit sound files, video sequences and a 3d Model. + +The database was aquired within the M2VTS project (Multi Modal Verification for +Teleservices and Security applications), a part of the EU ACTS programme, which +deals with access control by the use of multimodal identification of human faces. +The goal of using a multimodal recognition scheme is to improve the recognition +efficiency by combining single modalities, namely face and voice features. + +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/000_1.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/001_1.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/002_1.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/003_1.jpg + :width: 150 px + +\ + +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/000_2.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/001_2.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/002_2.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/003_2.jpg + :width: 150 px + +\ + +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/000_3.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/001_3.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/002_3.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/003_3.jpg + :width: 150 px + +\ + +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/000_4.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/001_4.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/002_4.jpg + :width: 150 px +.. image:: http://www.ee.surrey.ac.uk/CVSSP/xm2vtsdb/images/003_4.jpg + :width: 150 px + +This deployment of the XM2VTS database only consists of the images. diff --git a/test/algorithms/username/integers_sum/1.json b/test/algorithms/username/integers_sum/1.json new file mode 100644 index 0000000..079fa87 --- /dev/null +++ b/test/algorithms/username/integers_sum/1.json @@ -0,0 +1,26 @@ +{ + "language": "python", + "splittable": true, + "groups": [ + { + "name": "main", + "inputs": { + "a": { + "type": "{{ system_user.username }}/integer/1" + }, + "b": { + "type": "{{ system_user.username }}/integer/1" + } + }, + "outputs": { + "sum": { + "type": "{{ system_user.username }}/integer/1" + } + } + } + ], + "parameters": { + }, + "uses": { + } +} diff --git a/test/algorithms/username/integers_sum/1.py b/test/algorithms/username/integers_sum/1.py new file mode 100644 index 0000000..3d405d4 --- /dev/null +++ b/test/algorithms/username/integers_sum/1.py @@ -0,0 +1,37 @@ +############################################################################### +# # +# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/ # +# Contact: beat.support@idiap.ch # +# # +# This file is part of the beat.examples module of the BEAT platform. # +# # +# Commercial License Usage # +# Licensees holding valid commercial BEAT licenses may use this file in # +# accordance with the terms contained in a written agreement between you # +# and Idiap. For further information contact tto@idiap.ch # +# # +# Alternatively, this file may be used under the terms of the GNU Affero # +# Public License version 3 as published by the Free Software and appearing # +# in the file LICENSE.AGPL included in the packaging of this file. # +# The BEAT platform is distributed in the hope that it will be useful, but # +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # +# or FITNESS FOR A PARTICULAR PURPOSE. # +# # +# You should have received a copy of the GNU Affero Public License along # +# with the BEAT platform. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +import numpy + +class Algorithm: + + def __init__(self): + self.offset = 1 + + def process(self, inputs, outputs): + outputs['sum'].write({ + 'value': numpy.int32(inputs['a'].data.value + inputs['b'].data.value), + }) + + return True diff --git a/test/databases/simple/1.json b/test/databases/simple/1.json index fac1117..46a799c 100644 --- a/test/databases/simple/1.json +++ b/test/databases/simple/1.json @@ -45,6 +45,35 @@ } } ] + }, + { + "name": "10_numbers", + "template": "one_block", + "sets": [ + { + "name": "numbers", + "template": "numbers", + "view": "View10", + "outputs": { + "out": "{{ system_user.username }}/integer/1" + } + } + ] + }, + { + "name": "duo", + "template": "one_block_two_inputs", + "sets": [ + { + "name": "numbers", + "template": "numbers", + "view": "ViewDuo", + "outputs": { + "a": "{{ system_user.username }}/integer/1", + "b": "{{ system_user.username }}/integer/1" + } + } + ] } ] } diff --git a/test/databases/simple/1.py b/test/databases/simple/1.py index 4501e7e..d887ae8 100644 --- a/test/databases/simple/1.py +++ b/test/databases/simple/1.py @@ -23,12 +23,14 @@ ############################################################################### import numpy as np +from collections import namedtuple +from beat.backend.python.database import View as BaseView #---------------------------------------------------------- -class View: +class View(BaseView): """Outputs: - out: "{{ system_user.username }}/integer/1" @@ -37,47 +39,27 @@ class View: --------------- --------------- --------------- --------------- """ - def setup(self, root_folder, outputs, parameters, force_start_index=None, - force_end_index=None): + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['out']) - # Initialisations - self.outputs = outputs + return [ + Entry(42), + ] - # Determine the range of indices that must be provided - self.start_index = force_start_index if force_start_index is not None else 0 - self.end_index = force_end_index if force_end_index is not None else 0 - self.next_index = self.start_index + def get(self, output, index): + obj = self.objs[index] - return True - - - def done(self, last_data_index): - return last_data_index >= self.end_index - - - def next(self): - # Output: out (provide data at each iteration) - if self.outputs['out'].isConnected(): - self.outputs['out'].write( - { - 'value': np.int32(42) - }, - self.next_index - ) - - # Determine the next data index that must be provided - self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs - if x.isConnected() ] - ) - - return True + if output == 'out': + return { + 'value': np.int32(obj.out) + } #---------------------------------------------------------- -class View2: +class View2(BaseView): """Outputs: - out: "{{ system_user.username }}/integer/1" @@ -86,47 +68,61 @@ class View2: --------------- --------------- --------------- --------------- """ - def setup(self, root_folder, outputs, parameters, force_start_index=None, - force_end_index=None): + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['out']) - # Initialisations - self.outputs = outputs + return [ + Entry(53), + ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'out': + return { + 'value': np.int32(obj.out) + } + + +#---------------------------------------------------------- - # Determine the range of indices that must be provided - self.start_index = force_start_index if force_start_index is not None else 0 - self.end_index = force_end_index if force_end_index is not None else 0 - self.next_index = self.start_index +class LargeView(BaseView): - return True + """Outputs: + - out: "{{ system_user.username }}/integer/1" + --------------- --------------- --------------- --------------- + | out | | out | | out | | out | + --------------- --------------- --------------- --------------- + """ - def done(self, last_data_index): - return last_data_index >= self.end_index + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['out']) + return [ + Entry(0), + Entry(1), + Entry(2), + Entry(3), + Entry(4), + ] - def next(self): - # Output: out (provide data at each iteration) - if self.outputs['out'].isConnected(): - self.outputs['out'].write( - { - 'value': np.int32(53) - }, - self.next_index - ) - # Determine the next data index that must be provided - self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs - if x.isConnected() ] - ) + def get(self, output, index): + obj = self.objs[index] - return True + if output == 'out': + return { + 'value': np.int32(obj.out) + } #---------------------------------------------------------- -class LargeView: +class View10(BaseView): """Outputs: - out: "{{ system_user.username }}/integer/1" @@ -136,41 +132,77 @@ class LargeView: --------------- --------------- --------------- --------------- """ - def setup(self, root_folder, outputs, parameters, force_start_index=None, - force_end_index=None): + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['out']) - # Initialisations - self.outputs = outputs + return [ + Entry(0), + Entry(1), + Entry(2), + Entry(3), + Entry(4), + Entry(5), + Entry(6), + Entry(7), + Entry(8), + Entry(9), + ] - # Determine the range of indices that must be provided - self.start_index = force_start_index if force_start_index is not None else 0 - self.end_index = force_end_index if force_end_index is not None else 4 - self.next_index = self.start_index + def get(self, output, index): + obj = self.objs[index] - return True + if output == 'out': + return { + 'value': np.int32(obj.out) + } - def done(self, last_data_index): - return last_data_index >= self.end_index +#---------------------------------------------------------- + +class ViewDuo(BaseView): - def next(self): - # Output: out (provide data at each iteration) - if self.outputs['out'].isConnected(): - self.outputs['out'].write( - { - 'value': np.int32(self.next_index) - }, - self.next_index - ) + """Outputs: + - a: "{{ system_user.username }}/integer/1" + - b: "{{ system_user.username }}/integer/1" - # Determine the next data index that must be provided - self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs - if x.isConnected() ] - ) + ------------------------------- ------------------------------- + | a | | a | + ------------------------------- ------------------------------- + --------------- --------------- --------------- --------------- + | b | | b | | b | | b | + --------------- --------------- --------------- --------------- + """ - return True + def index(self, root_folder, parameters): + Entry = namedtuple('Entry', ['a', 'b']) + + return [ + Entry(0, 0), + Entry(0, 1), + Entry(0, 2), + Entry(10, 3), + Entry(10, 4), + Entry(10, 5), + Entry(20, 6), + Entry(20, 7), + Entry(20, 8), + ] + + + def get(self, output, index): + obj = self.objs[index] + + if output == 'a': + return { + 'value': np.int32(obj.a) + } + + elif output == 'b': + return { + 'value': np.int32(obj.b) + } #---------------------------------------------------------- diff --git a/test/experiments/username/username/duo/1/split_2.json b/test/experiments/username/username/duo/1/split_2.json new file mode 100644 index 0000000..1c321e7 --- /dev/null +++ b/test/experiments/username/username/duo/1/split_2.json @@ -0,0 +1,37 @@ +{ + "analyzers": { + "analysis": { + "algorithm": "{{ user.username }}/integers_echo_analyzer/1", + "inputs": { + "in_data": "in" + } + } + }, + "blocks": { + "operation": { + "algorithm": "{{ user.username }}/integers_sum/1", + "nb_slots": 2, + "inputs": { + "a": "a", + "b": "b" + }, + "outputs": { + "sum": "out" + } + } + }, + "datasets": { + "set": { + "database": "simple/1", + "protocol": "duo", + "set": "numbers" + } + }, + "globals": { + "queue": "{{ queue }}", + "environment": { + "name": "{{ environment.name }}", + "version": "{{ environment.version }}" + } + } +} diff --git a/test/experiments/username/username/single/1/single.json b/test/experiments/username/username/single/1/single.json index 236549d..5ee0ee1 100644 --- a/test/experiments/username/username/single/1/single.json +++ b/test/experiments/username/username/single/1/single.json @@ -21,8 +21,8 @@ "datasets": { "set": { "database": "simple/1", - "protocol": "protocol", - "set": "set" + "protocol": "10_numbers", + "set": "numbers" } }, "globals": { diff --git a/test/experiments/username/username/single/1/single_add.json b/test/experiments/username/username/single/1/single_add.json index 0d46f10..a446d7e 100644 --- a/test/experiments/username/username/single/1/single_add.json +++ b/test/experiments/username/username/single/1/single_add.json @@ -20,9 +20,9 @@ }, "datasets": { "set": { - "database": "simple/1", - "protocol": "protocol", - "set": "set" + "database": "simple/1", + "protocol": "10_numbers", + "set": "numbers" } }, "globals": { diff --git a/test/experiments/username/username/single/1/single_error.json b/test/experiments/username/username/single/1/single_error.json index 86eddd2..b6d9f09 100644 --- a/test/experiments/username/username/single/1/single_error.json +++ b/test/experiments/username/username/single/1/single_error.json @@ -21,7 +21,7 @@ "datasets": { "set": { "database": "simple/1", - "protocol": "protocol", + "protocol": "protocol2", "set": "set" } }, diff --git a/test/experiments/username/username/single/1/single_error_split_2.json b/test/experiments/username/username/single/1/single_error_split_2.json index 1824212..6463a7d 100644 --- a/test/experiments/username/username/single/1/single_error_split_2.json +++ b/test/experiments/username/username/single/1/single_error_split_2.json @@ -22,7 +22,7 @@ "datasets": { "set": { "database": "simple/1", - "protocol": "protocol", + "protocol": "protocol2", "set": "set" } }, diff --git a/test/experiments/username/username/single/1/single_split_10.json b/test/experiments/username/username/single/1/single_split_10.json index 57587c5..856486e 100644 --- a/test/experiments/username/username/single/1/single_split_10.json +++ b/test/experiments/username/username/single/1/single_split_10.json @@ -22,7 +22,7 @@ "datasets": { "set": { "database": "simple/1", - "protocol": "protocol", + "protocol": "protocol2", "set": "set" } }, diff --git a/test/experiments/username/username/single/1/single_split_2.json b/test/experiments/username/username/single/1/single_split_2.json index 4f3f96d..f9d3d42 100644 --- a/test/experiments/username/username/single/1/single_split_2.json +++ b/test/experiments/username/username/single/1/single_split_2.json @@ -22,8 +22,8 @@ "datasets": { "set": { "database": "simple/1", - "protocol": "protocol", - "set": "set" + "protocol": "10_numbers", + "set": "numbers" } }, "globals": { diff --git a/test/toolchains/username/duo/1.json b/test/toolchains/username/duo/1.json new file mode 100644 index 0000000..8959e73 --- /dev/null +++ b/test/toolchains/username/duo/1.json @@ -0,0 +1,60 @@ +{ + "description": "", + "connections": [ + { + "to": "operation.a", + "from": "set.a", + "channel": "set" + }, + { + "to": "operation.b", + "from": "set.b", + "channel": "set" + }, + { + "to": "analysis.in", + "from": "operation.out", + "channel": "set" + } + ], + "analyzers": [ + { + "inputs": [ + "in" + ], + "synchronized_channel": "set", + "name": "analysis" + } + ], + "datasets": [ + { + "outputs": [ + "a", + "b" + ], + "name": "set" + } + ], + "blocks": [ + { + "inputs": [ + "a", + "b" + ], + "synchronized_channel": "set", + "name": "operation", + "outputs": [ + "out" + ] + } + ], + "representation": { + "connections": { + }, + "blocks": { + }, + "channel_colors": { + "set": "#0000FF" + } + } +} -- GitLab