From 04c86c8466470406d0b0f716fb4a368b832a499d Mon Sep 17 00:00:00 2001
From: ageorge <anjith.george@idiap.ch>
Date: Fri, 3 May 2019 15:01:18 +0200
Subject: [PATCH] Paths to models

---
 .../mccnn/tifs2018/config/FASNet_config.py    | 98 +++++++++----------
 .../mccnn/tifs2018/config/MCCNN_config.py     | 93 +++++++++---------
 2 files changed, 91 insertions(+), 100 deletions(-)

diff --git a/bob/paper/mccnn/tifs2018/config/FASNet_config.py b/bob/paper/mccnn/tifs2018/config/FASNet_config.py
index 2acbff1..861bbd3 100644
--- a/bob/paper/mccnn/tifs2018/config/FASNet_config.py
+++ b/bob/paper/mccnn/tifs2018/config/FASNet_config.py
@@ -18,7 +18,7 @@ from torchvision import transforms
 from bob.learn.pytorch.datasets import ChannelSelect
 
 # names of the channels to process:
-_channel_names = ['color','depth','infrared','thermal']
+_channel_names = ['color', 'depth', 'infrared', 'thermal']
 
 # dictionary containing preprocessors for all channels:
 _preprocessors = {}
@@ -34,18 +34,18 @@ FACE_DETECTION_METHOD = 'mtcnn'  # use ANNOTATIONS
 MIN_FACE_SIZE = 50  # skip small faces
 ALIGNMENT_TYPE = 'lightcnn'
 
-_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
-                                    rgb_output_flag = RGB_OUTPUT_FLAG,
-                                    use_face_alignment = USE_FACE_ALIGNMENT,
-                                    alignment_type =ALIGNMENT_TYPE,
-                                    max_image_size = MAX_IMAGE_SIZE,
-                                    face_detection_method = FACE_DETECTION_METHOD,
-                                    min_face_size = MIN_FACE_SIZE)
+_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
+                                    rgb_output_flag=RGB_OUTPUT_FLAG,
+                                    use_face_alignment=USE_FACE_ALIGNMENT,
+                                    alignment_type=ALIGNMENT_TYPE,
+                                    max_image_size=MAX_IMAGE_SIZE,
+                                    face_detection_method=FACE_DETECTION_METHOD,
+                                    min_face_size=MIN_FACE_SIZE)
 
-_frame_selector = FrameSelector(selection_style = "all")
+_frame_selector = FrameSelector(selection_style="all")
 
-_preprocessor_rgb = Wrapper(preprocessor = _image_preprocessor,
-                            frame_selector = _frame_selector)
+_preprocessor_rgb = Wrapper(preprocessor=_image_preprocessor,
+                            frame_selector=_frame_selector)
 
 _preprocessors[_channel_names[0]] = _preprocessor_rgb
 
@@ -60,20 +60,20 @@ FACE_DETECTION_METHOD = None  # use annotations
 MIN_FACE_SIZE = 50  # skip small faces
 NORMALIZATION_FUNCTION = _norm_func
 NORMALIZATION_FUNCTION_KWARGS = {}
-NORMALIZATION_FUNCTION_KWARGS = {'n_sigma':3.0, 'norm_method':'MAD'}
+NORMALIZATION_FUNCTION_KWARGS = {'n_sigma': 3.0, 'norm_method': 'MAD'}
 
-_image_preprocessor_ir = FaceCropAlign(face_size = FACE_SIZE,
-                                    rgb_output_flag = RGB_OUTPUT_FLAG,
-                                    use_face_alignment = USE_FACE_ALIGNMENT,
-                                    alignment_type =ALIGNMENT_TYPE,
-                                    max_image_size = MAX_IMAGE_SIZE,
-                                    face_detection_method = FACE_DETECTION_METHOD,
-                                    min_face_size = MIN_FACE_SIZE,
-                                    normalization_function = NORMALIZATION_FUNCTION,
-                                    normalization_function_kwargs = NORMALIZATION_FUNCTION_KWARGS)
+_image_preprocessor_ir = FaceCropAlign(face_size=FACE_SIZE,
+                                       rgb_output_flag=RGB_OUTPUT_FLAG,
+                                       use_face_alignment=USE_FACE_ALIGNMENT,
+                                       alignment_type=ALIGNMENT_TYPE,
+                                       max_image_size=MAX_IMAGE_SIZE,
+                                       face_detection_method=FACE_DETECTION_METHOD,
+                                       min_face_size=MIN_FACE_SIZE,
+                                       normalization_function=NORMALIZATION_FUNCTION,
+                                       normalization_function_kwargs=NORMALIZATION_FUNCTION_KWARGS)
 
-_preprocessor_ir = Wrapper(preprocessor = _image_preprocessor_ir,
-                               frame_selector = _frame_selector)
+_preprocessor_ir = Wrapper(preprocessor=_image_preprocessor_ir,
+                           frame_selector=_frame_selector)
 
 _preprocessors[_channel_names[1]] = _preprocessor_ir
 
@@ -83,9 +83,9 @@ _preprocessors[_channel_names[2]] = _preprocessor_ir
 _preprocessors[_channel_names[3]] = _preprocessor_ir
 
 
-preprocessor = VideoFaceCropAlignBlockPatch(preprocessors = _preprocessors,
-                                            channel_names = _channel_names,
-                                            return_multi_channel_flag = True)
+preprocessor = VideoFaceCropAlignBlockPatch(preprocessors=_preprocessors,
+                                            channel_names=_channel_names,
+                                            return_multi_channel_flag=True)
 
 
 #====================================================================================
@@ -96,42 +96,31 @@ from bob.learn.pytorch.extractor.image import FASNetExtractor
 
 from bob.bio.video.extractor import Wrapper
 
-# MODEL_FILE= None # Replace with '<PATH_TO_MODEL>'   
+# MODEL_FILE= None # Replace with '<PATH_TO_MODEL>'
 # ####################################################################
 
 # If you want to use the pretrained model
 
 import pkg_resources
 
-MODEL_FILE = pkg_resources.resource_filename( 'bob.paper.mccnn.tifs2018', 'models/fasnet.pth')
+MODEL_FILE = pkg_resources.resource_filename('bob.paper.mccnn.tifs2018', 'models/fasnet.pth')
 
-URL='http://www.idiap.ch/~ageorge/model_100_0.pth'
+URL = 'https://www.idiap.ch/software/bob/data/bob/bob.paper.mccnn.tifs2018/master/fasnet.pth'
 
 if not os.path.exists(MODEL_FILE):
 
-  logger.info('Downloading the FASNet model')
+    logger.info('Downloading the FASNet model')
 
-  bob.extension.download.download_file(URL,MODEL_FILE)
+    bob.extension.download.download_file(URL, MODEL_FILE)
 
-  logger.info('Downloaded FASNet model to location: {}'.format(MODEL_FILE))
+    logger.info('Downloaded FASNet model to location: {}'.format(MODEL_FILE))
 
 
+_img_transform = transforms.Compose([transforms.ToPILImage(), transforms.Resize(224, interpolation=2), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
+                                                                                                                                                   std=[0.229, 0.224, 0.225])])
 
 
-
-MODEL_FILE=
-
-/idiap/temp/ageorge/MCCNN_paperpackage/bob.paper.mccnn.tifs2018/src/bob.paper.mccnn.tifs2018/bob/paper/mccnn/tifs2018/models/mccnn_best_C1-B1-FFC.pth
-/idiap/temp/ageorge/MCCNN_paperpackage/bob.paper.mccnn.tifs2018/src/bob.paper.mccnn.tifs2018/bob/paper/mccnn/tifs2018/models/fasnet.pth
-
-
-
-
-_img_transform = transforms.Compose([transforms.ToPILImage(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
-                                 std=[0.229, 0.224, 0.225])])
-
-
-_image_extracor=FASNetExtractor(transforms=_img_transform, model_file=MODEL_FILE)
+_image_extracor = FASNetExtractor(transforms=_img_transform, model_file=MODEL_FILE)
 
 extractor = Wrapper(_image_extracor)
 
@@ -141,26 +130,27 @@ extractor = Wrapper(_image_extracor)
 
 from bob.pad.base.algorithm import Algorithm
 
+
 class DummyAlgorithm(Algorithm):
     """An algorithm that takes the precomputed predictions and uses them for
     scoring."""
 
     def __init__(self, **kwargs):
 
-      super(DummyAlgorithm, self).__init__(
-          **kwargs)
+        super(DummyAlgorithm, self).__init__(
+            **kwargs)
 
     def project(self, feature):
-      # print("feature",feature.as_array())
-      return feature.as_array().reshape(-1,1)
-
+        # print("feature",feature.as_array())
+        return feature.as_array().reshape(-1, 1)
 
     def score_for_multiple_projections(self, predictions):
-      # one node at the output
+        # one node at the output
 
-      return list(predictions)
+        return list(predictions)
 
     def score(self, predictions):
-      return list(predictions)
+        return list(predictions)
+
 
 algorithm = DummyAlgorithm(performs_projection=True,  requires_projector_training=False)
diff --git a/bob/paper/mccnn/tifs2018/config/MCCNN_config.py b/bob/paper/mccnn/tifs2018/config/MCCNN_config.py
index d698bcc..b36b5b7 100644
--- a/bob/paper/mccnn/tifs2018/config/MCCNN_config.py
+++ b/bob/paper/mccnn/tifs2018/config/MCCNN_config.py
@@ -16,7 +16,7 @@ from torchvision import transforms
 from bob.learn.pytorch.datasets import ChannelSelect
 
 # names of the channels to process:
-_channel_names = ['color','depth','infrared','thermal']
+_channel_names = ['color', 'depth', 'infrared', 'thermal']
 
 # dictionary containing preprocessors for all channels:
 _preprocessors = {}
@@ -32,18 +32,18 @@ FACE_DETECTION_METHOD = 'mtcnn'  # use ANNOTATIONS
 MIN_FACE_SIZE = 50  # skip small faces
 ALIGNMENT_TYPE = 'lightcnn'
 
-_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
-                                    rgb_output_flag = RGB_OUTPUT_FLAG,
-                                    use_face_alignment = USE_FACE_ALIGNMENT,
-                                    alignment_type =ALIGNMENT_TYPE,
-                                    max_image_size = MAX_IMAGE_SIZE,
-                                    face_detection_method = FACE_DETECTION_METHOD,
-                                    min_face_size = MIN_FACE_SIZE)
+_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
+                                    rgb_output_flag=RGB_OUTPUT_FLAG,
+                                    use_face_alignment=USE_FACE_ALIGNMENT,
+                                    alignment_type=ALIGNMENT_TYPE,
+                                    max_image_size=MAX_IMAGE_SIZE,
+                                    face_detection_method=FACE_DETECTION_METHOD,
+                                    min_face_size=MIN_FACE_SIZE)
 
-_frame_selector = FrameSelector(selection_style = "all")
+_frame_selector = FrameSelector(selection_style="all")
 
-_preprocessor_rgb = Wrapper(preprocessor = _image_preprocessor,
-                            frame_selector = _frame_selector)
+_preprocessor_rgb = Wrapper(preprocessor=_image_preprocessor,
+                            frame_selector=_frame_selector)
 
 _preprocessors[_channel_names[0]] = _preprocessor_rgb
 
@@ -58,20 +58,20 @@ FACE_DETECTION_METHOD = None  # use annotations
 MIN_FACE_SIZE = 50  # skip small faces
 NORMALIZATION_FUNCTION = _norm_func
 NORMALIZATION_FUNCTION_KWARGS = {}
-NORMALIZATION_FUNCTION_KWARGS = {'n_sigma':3.0, 'norm_method':'MAD'}
+NORMALIZATION_FUNCTION_KWARGS = {'n_sigma': 3.0, 'norm_method': 'MAD'}
 
-_image_preprocessor_ir = FaceCropAlign(face_size = FACE_SIZE,
-                                    rgb_output_flag = RGB_OUTPUT_FLAG,
-                                    use_face_alignment = USE_FACE_ALIGNMENT,
-                                    alignment_type =ALIGNMENT_TYPE,
-                                    max_image_size = MAX_IMAGE_SIZE,
-                                    face_detection_method = FACE_DETECTION_METHOD,
-                                    min_face_size = MIN_FACE_SIZE,
-                                    normalization_function = NORMALIZATION_FUNCTION,
-                                    normalization_function_kwargs = NORMALIZATION_FUNCTION_KWARGS)
+_image_preprocessor_ir = FaceCropAlign(face_size=FACE_SIZE,
+                                       rgb_output_flag=RGB_OUTPUT_FLAG,
+                                       use_face_alignment=USE_FACE_ALIGNMENT,
+                                       alignment_type=ALIGNMENT_TYPE,
+                                       max_image_size=MAX_IMAGE_SIZE,
+                                       face_detection_method=FACE_DETECTION_METHOD,
+                                       min_face_size=MIN_FACE_SIZE,
+                                       normalization_function=NORMALIZATION_FUNCTION,
+                                       normalization_function_kwargs=NORMALIZATION_FUNCTION_KWARGS)
 
-_preprocessor_ir = Wrapper(preprocessor = _image_preprocessor_ir,
-                               frame_selector = _frame_selector)
+_preprocessor_ir = Wrapper(preprocessor=_image_preprocessor_ir,
+                           frame_selector=_frame_selector)
 
 _preprocessors[_channel_names[1]] = _preprocessor_ir
 
@@ -80,9 +80,9 @@ _preprocessors[_channel_names[2]] = _preprocessor_ir
 _preprocessors[_channel_names[3]] = _preprocessor_ir
 
 
-preprocessor = VideoFaceCropAlignBlockPatch(preprocessors = _preprocessors,
-                                            channel_names = _channel_names,
-                                            return_multi_channel_flag = True)
+preprocessor = VideoFaceCropAlignBlockPatch(preprocessors=_preprocessors,
+                                            channel_names=_channel_names,
+                                            return_multi_channel_flag=True)
 
 
 #====================================================================================
@@ -92,38 +92,38 @@ from bob.learn.pytorch.extractor.image import MCCNNExtractor
 
 from bob.bio.video.extractor import Wrapper
 
-# MODEL_FILE= None # Replace with '<PATH_TO_MODEL>'   
+# MODEL_FILE= None # Replace with '<PATH_TO_MODEL>'
 # ####################################################################
 
 # If you want to use the pretrained model
 
 import pkg_resources
 
-MODEL_FILE = pkg_resources.resource_filename( 'bob.paper.mccnn.tifs2018', 'models/mccnn_best_C1-B1-FFC.pth')
+MODEL_FILE = pkg_resources.resource_filename('bob.paper.mccnn.tifs2018', 'models/mccnn_best_C1-B1-FFC.pth')
 
-URL='http://www.idiap.ch/~ageorge/model_100_0.pth'
+URL = 'http://www.idiap.ch/software/bob/data/bob/bob.paper.mccnn.tifs2018/master/mccnn_best_C1-B1-FFC.pth'
 
 if not os.path.exists(MODEL_FILE):
 
-  logger.info('Downloading the MCCNN model')
+    logger.info('Downloading the MCCNN model')
 
-  bob.extension.download.download_file(URL,MODEL_FILE)
+    bob.extension.download.download_file(URL, MODEL_FILE)
 
-  logger.info('Downloaded MCCNN model to location: {}'.format(MODEL_FILE))
+    logger.info('Downloaded MCCNN model to location: {}'.format(MODEL_FILE))
 
 
-ADAPTED_LAYERS= 'conv1-group1-ffc'
+ADAPTED_LAYERS = 'conv1-group1-ffc'
 ####################################################################
 
-SELECTED_CHANNELS= [0,1,2,3]
+SELECTED_CHANNELS = [0, 1, 2, 3]
 ####################################################################
 
 
-NUM_CHANNELS_USED=len(SELECTED_CHANNELS)
+NUM_CHANNELS_USED = len(SELECTED_CHANNELS)
 
-_img_transform = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),transforms.ToTensor()])
+_img_transform = transforms.Compose([ChannelSelect(selected_channels=SELECTED_CHANNELS), transforms.ToTensor()])
 
-_image_extracor=MCCNNExtractor(num_channels_used=NUM_CHANNELS_USED, transforms=_img_transform, model_file=MODEL_FILE)
+_image_extracor = MCCNNExtractor(num_channels_used=NUM_CHANNELS_USED, transforms=_img_transform, model_file=MODEL_FILE)
 
 extractor = Wrapper(_image_extracor)
 
@@ -133,26 +133,27 @@ extractor = Wrapper(_image_extracor)
 
 from bob.pad.base.algorithm import Algorithm
 
+
 class DummyAlgorithm(Algorithm):
     """An algorithm that takes the precomputed predictions and uses them for
     scoring."""
 
     def __init__(self, **kwargs):
 
-      super(DummyAlgorithm, self).__init__(
-          **kwargs)
+        super(DummyAlgorithm, self).__init__(
+            **kwargs)
 
     def project(self, feature):
-      # print("feature",feature.as_array())
-      return feature.as_array().reshape(-1,1)
-
+        # print("feature",feature.as_array())
+        return feature.as_array().reshape(-1, 1)
 
     def score_for_multiple_projections(self, predictions):
-      # one node at the output
+        # one node at the output
 
-      return list(predictions)
+        return list(predictions)
 
     def score(self, predictions):
-      return list(predictions)
+        return list(predictions)
+
 
-algorithm = DummyAlgorithm(performs_projection=True,  requires_projector_training=False)
\ No newline at end of file
+algorithm = DummyAlgorithm(performs_projection=True,  requires_projector_training=False)
-- 
GitLab