Commit 5615c74b authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Removed legacy components

parent 81e443ea
Pipeline #46558 failed with stage
in 14 minutes and 25 seconds
import numpy
from bob.bio.base_legacy.extractor import Extractor
from bob.bio.base.extractor import Extractor
class DummyExtractor(Extractor):
""" The Dummy class for passing the extracted embedding.
""" The Dummy class for passing the extracted embedding.
"""
def __init__(self):
""" Init function
def __init__(self):
""" Init function
"""
super(DummyExtractor, self).__init__()
def __call__(self, data):
"""__call__(data) -> feature
super(DummyExtractor, self).__init__()
def __call__(self, data):
"""__call__(data) -> feature
This function will actually perform the feature extraction.
It must be overwritten by derived classes.
......@@ -27,4 +29,5 @@ class DummyExtractor(Extractor):
feature : object (usually :py:class:`numpy.ndarray`)
The extracted feature.
"""
return data
\ No newline at end of file
return data
import numpy as np
import numpy as np
import torch
from torch.autograd import Variable
import torchvision.transforms as transforms
from bob.learn.pytorch.architectures import DeepPixBiS
from bob.bio.base_legacy.extractor import Extractor
from bob.bio.base.extractor import Extractor
import logging
logger = logging.getLogger("bob.learn.pytorch")
class DeepPixBiSExtractor(Extractor):
""" The class implementing the DeepPixBiS score computation.
""" The class implementing the DeepPixBiS score computation.
Attributes
----------
......@@ -21,9 +23,21 @@ class DeepPixBiSExtractor(Extractor):
The transform from numpy.array to torch.Tensor
"""
def __init__(self, transforms = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), model_file=None, scoring_method='pixel_mean'):
""" Init method
def __init__(
self,
transforms=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
),
model_file=None,
scoring_method="pixel_mean",
):
""" Init method
Parameters
----------
......@@ -37,38 +51,44 @@ class DeepPixBiSExtractor(Extractor):
"""
Extractor.__init__(self, skip_extractor_training=True)
# model
self.transforms = transforms
self.network = DeepPixBiS(pretrained=False)
self.scoring_method = scoring_method
self.available_scoring_methods=['pixel_mean','binary','combined']
logger.debug('Scoring method is : {}'.format(self.scoring_method.upper()))
if model_file is None:
# do nothing (used mainly for unit testing)
logger.debug("No pretrained file provided")
pass
else:
logger.debug('Starting to load the pretrained PAD model')
try:
cp = torch.load(model_file)
except:
raise ValueError('Failed to load the model file : {}'.format(model_file))
if 'state_dict' in cp:
self.network.load_state_dict(cp['state_dict'])
else:
raise ValueError('Failed to load the state_dict for model file: {}'.format(model_file))
logger.debug('Loaded the pretrained PAD model')
self.network.eval()
Extractor.__init__(self, skip_extractor_training=True)
# model
self.transforms = transforms
self.network = DeepPixBiS(pretrained=False)
self.scoring_method = scoring_method
self.available_scoring_methods = ["pixel_mean", "binary", "combined"]
logger.debug("Scoring method is : {}".format(self.scoring_method.upper()))
if model_file is None:
# do nothing (used mainly for unit testing)
logger.debug("No pretrained file provided")
pass
else:
logger.debug("Starting to load the pretrained PAD model")
try:
cp = torch.load(model_file)
except:
raise ValueError(
"Failed to load the model file : {}".format(model_file)
)
def __call__(self, image):
""" Extract features from an image
if "state_dict" in cp:
self.network.load_state_dict(cp["state_dict"])
else:
raise ValueError(
"Failed to load the state_dict for model file: {}".format(
model_file
)
)
logger.debug("Loaded the pretrained PAD model")
self.network.eval()
def __call__(self, image):
""" Extract features from an image
Parameters
----------
......@@ -81,23 +101,27 @@ class DeepPixBiSExtractor(Extractor):
The extracted feature is a scalar values ~1 for bonafide and ~0 for PAs
"""
input_image = np.rollaxis(np.rollaxis(image, 2),2) # changes from CxHxW to HxWxC
input_image = self.transforms(input_image)
input_image = input_image.unsqueeze(0)
output = self.network.forward(Variable(input_image))
output_pixel = output[0].data.numpy().flatten()
output_binary = output[1].data.numpy().flatten()
if self.scoring_method=='pixel_mean':
score=np.mean(output_pixel)
elif self.scoring_method=='binary':
score=np.mean(output_binary)
elif self.scoring_method=='combined':
score= (np.mean(output_pixel)+np.mean(output_binary))/2.0
else:
raise ValueError('Scoring method {} is not implemented.'.format(self.scoring_method))
# output is a scalar score
return np.reshape(score,(1,-1))
input_image = np.rollaxis(
np.rollaxis(image, 2), 2
) # changes from CxHxW to HxWxC
input_image = self.transforms(input_image)
input_image = input_image.unsqueeze(0)
output = self.network.forward(Variable(input_image))
output_pixel = output[0].data.numpy().flatten()
output_binary = output[1].data.numpy().flatten()
if self.scoring_method == "pixel_mean":
score = np.mean(output_pixel)
elif self.scoring_method == "binary":
score = np.mean(output_binary)
elif self.scoring_method == "combined":
score = (np.mean(output_pixel) + np.mean(output_binary)) / 2.0
else:
raise ValueError(
"Scoring method {} is not implemented.".format(self.scoring_method)
)
# output is a scalar score
return np.reshape(score, (1, -1))
import numpy as np
import numpy as np
import torch
from torch.autograd import Variable
import torchvision.transforms as transforms
from bob.learn.pytorch.architectures import FASNet
from bob.bio.base_legacy.extractor import Extractor
from bob.bio.base.extractor import Extractor
import logging
logger = logging.getLogger("bob.learn.pytorch")
class FASNetExtractor(Extractor):
""" The class implementing the FASNet score computation.
""" The class implementing the FASNet score computation.
Attributes
----------
......@@ -21,10 +23,22 @@ class FASNetExtractor(Extractor):
The transform from numpy.array to torch.Tensor
"""
def __init__(self, transforms = transforms.Compose([transforms.ToPILImage(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]), model_file=None):
""" Init method
def __init__(
self,
transforms=transforms.Compose(
[
transforms.ToPILImage(),
transforms.Resize(224, interpolation=2),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
),
model_file=None,
):
""" Init method
Parameters
----------
......@@ -40,34 +54,32 @@ class FASNetExtractor(Extractor):
"""
Extractor.__init__(self, skip_extractor_training=True)
# model
self.transforms = transforms
self.network = FASNet(pretrained=False)
Extractor.__init__(self, skip_extractor_training=True)
#self.network=self.network.to(device)
# model
self.transforms = transforms
self.network = FASNet(pretrained=False)
if model_file is None:
# do nothing (used mainly for unit testing)
logger.debug("No pretrained file provided")
pass
else:
# self.network=self.network.to(device)
if model_file is None:
# do nothing (used mainly for unit testing)
logger.debug("No pretrained file provided")
pass
else:
# With the new training
logger.debug('Starting to load the pretrained PAD model')
cp = torch.load(model_file)
if 'state_dict' in cp:
self.network.load_state_dict(cp['state_dict'])
# With the new training
logger.debug("Starting to load the pretrained PAD model")
cp = torch.load(model_file)
if "state_dict" in cp:
self.network.load_state_dict(cp["state_dict"])
logger.debug('Loaded the pretrained PAD model')
self.network.eval()
logger.debug("Loaded the pretrained PAD model")
def __call__(self, image):
""" Extract features from an image
self.network.eval()
def __call__(self, image):
""" Extract features from an image
Parameters
----------
......@@ -83,14 +95,16 @@ class FASNetExtractor(Extractor):
The extracted feature is a scalar values ~1 for bonafide and ~0 for PAs
"""
input_image = np.rollaxis(np.rollaxis(image, 2),2) # changes to 128x128xnum_channels
input_image = self.transforms(input_image)
input_image = input_image.unsqueeze(0)
output = self.network.forward(Variable(input_image))
output = output.data.numpy().flatten()
# output is a scalar score
input_image = np.rollaxis(
np.rollaxis(image, 2), 2
) # changes to 128x128xnum_channels
input_image = self.transforms(input_image)
input_image = input_image.unsqueeze(0)
output = self.network.forward(Variable(input_image))
output = output.data.numpy().flatten()
# output is a scalar score
return output
return output
......@@ -6,10 +6,11 @@ from torch.autograd import Variable
import torchvision.transforms as transforms
from bob.learn.pytorch.architectures import LightCNN29
from bob.bio.base_legacy.extractor import Extractor
from bob.bio.base.extractor import Extractor
class LightCNN29Extractor(Extractor):
""" The class implementing the feature extraction of LightCNN29 embeddings.
""" The class implementing the feature extraction of LightCNN29 embeddings.
Attributes
----------
......@@ -21,9 +22,9 @@ class LightCNN29Extractor(Extractor):
The transform to normalize the input
"""
def __init__(self, model_file=None, num_classes=79077):
""" Init method
def __init__(self, model_file=None, num_classes=79077):
""" Init method
Parameters
----------
......@@ -33,44 +34,45 @@ class LightCNN29Extractor(Extractor):
The number of classes.
"""
Extractor.__init__(self, skip_extractor_training=True)
# model
self.network = LightCNN29(num_classes=num_classes)
if model_file is None:
# do nothing (used mainly for unit testing)
pass
else:
cp = torch.load(model_file, map_location='cpu')
# checked if pre-trained model was saved using nn.DataParallel ...
saved_with_nnDataParallel = False
for k, v in cp['state_dict'].items():
if 'module' in k:
saved_with_nnDataParallel = True
break
# if it was, you have to rename the keys of state_dict ... (i.e. remove 'module.')
if saved_with_nnDataParallel:
if 'state_dict' in cp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in cp['state_dict'].items():
name = k[7:]
new_state_dict[name] = v
self.network.load_state_dict(new_state_dict)
else:
self.network.load_state_dict(cp['state_dict'])
self.network.eval()
# image pre-processing
self.to_tensor = transforms.ToTensor()
self.norm = transforms.Normalize((0.5,), (0.5,))
def __call__(self, image):
""" Extract features from an image
Extractor.__init__(self, skip_extractor_training=True)
# model
self.network = LightCNN29(num_classes=num_classes)
if model_file is None:
# do nothing (used mainly for unit testing)
pass
else:
cp = torch.load(model_file, map_location="cpu")
# checked if pre-trained model was saved using nn.DataParallel ...
saved_with_nnDataParallel = False
for k, v in cp["state_dict"].items():
if "module" in k:
saved_with_nnDataParallel = True
break
# if it was, you have to rename the keys of state_dict ... (i.e. remove 'module.')
if saved_with_nnDataParallel:
if "state_dict" in cp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in cp["state_dict"].items():
name = k[7:]
new_state_dict[name] = v
self.network.load_state_dict(new_state_dict)
else:
self.network.load_state_dict(cp["state_dict"])
self.network.eval()
# image pre-processing
self.to_tensor = transforms.ToTensor()
self.norm = transforms.Normalize((0.5,), (0.5,))
def __call__(self, image):
""" Extract features from an image
Parameters
----------
......@@ -83,16 +85,16 @@ class LightCNN29Extractor(Extractor):
The extracted features as a 1d array of size 320
"""
# torchvision.transforms expect a numpy array of size HxWxC
input_image = numpy.expand_dims(image, axis=2)
input_image = self.to_tensor(input_image)
input_image = self.norm(input_image)
input_image = input_image.unsqueeze(0)
# to be compliant with the loaded model, where weight and biases are torch.FloatTensor
input_image = input_image.float()
_ , features = self.network.forward(Variable(input_image))
features = features.data.numpy().flatten()
return features
# torchvision.transforms expect a numpy array of size HxWxC
input_image = numpy.expand_dims(image, axis=2)
input_image = self.to_tensor(input_image)
input_image = self.norm(input_image)
input_image = input_image.unsqueeze(0)
# to be compliant with the loaded model, where weight and biases are torch.FloatTensor
input_image = input_image.float()
_, features = self.network.forward(Variable(input_image))
features = features.data.numpy().flatten()
return features
......@@ -6,10 +6,11 @@ from torch.autograd import Variable
import torchvision.transforms as transforms
from bob.learn.pytorch.architectures import LightCNN29v2
from bob.bio.base_legacy.extractor import Extractor
from bob.bio.base.extractor import Extractor
class LightCNN29v2Extractor(Extractor):
""" The class implementing the feature extraction of LightCNN29v2 embeddings.
""" The class implementing the feature extraction of LightCNN29v2 embeddings.
Attributes
----------
......@@ -21,9 +22,9 @@ class LightCNN29v2Extractor(Extractor):
The transform to normalize the input
"""
def __init__(self, model_file=None, num_classes=79077):
""" Init method
def __init__(self, model_file=None, num_classes=79077):
""" Init method
Parameters
----------
......@@ -33,44 +34,45 @@ class LightCNN29v2Extractor(Extractor):
The number of classes.
"""
Extractor.__init__(self, skip_extractor_training=True)
# model
self.network = LightCNN29v2(num_classes=num_classes)
if model_file is None:
# do nothing (used mainly for unit testing)
pass
else:
cp = torch.load(model_file, map_location='cpu')
# checked if pre-trained model was saved using nn.DataParallel ...
saved_with_nnDataParallel = False
for k, v in cp['state_dict'].items():
if 'module' in k:
saved_with_nnDataParallel = True
break
# if it was, you have to rename the keys of state_dict ... (i.e. remove 'module.')
if saved_with_nnDataParallel:
if 'state_dict' in cp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in cp['state_dict'].items():
name = k[7:]
new_state_dict[name] = v
self.network.load_state_dict(new_state_dict)
else:
self.network.load_state_dict(cp['state_dict'])
self.network.eval()
# image pre-processing
self.to_tensor = transforms.ToTensor()
self.norm = transforms.Normalize((0.5,), (0.5,))
def __call__(self, image):
""" Extract features from an image
Extractor.__init__(self, skip_extractor_training=True)
# model
self.network = LightCNN29v2(num_classes=num_classes)
if model_file is None:
# do nothing (used mainly for unit testing)
pass
else:
cp = torch.load(model_file, map_location="cpu")
# checked if pre-trained model was saved using nn.DataParallel ...
saved_with_nnDataParallel = False
for k, v in cp["state_dict"].items():
if "module" in k:
saved_with_nnDataParallel = True
break
# if it was, you have to rename the keys of state_dict ... (i.e. remove 'module.')
if saved_with_nnDataParallel:
if "state_dict" in cp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in cp["state_dict"].items():
name = k[7:]
new_state_dict[name] = v
self.network.load_state_dict(new_state_dict)
else:
self.network.load_state_dict(cp["state_dict"])
self.network.eval()
# image pre-processing
self.to_tensor = transforms.ToTensor()
self.norm = transforms.Normalize((0.5,), (0.5,))
def __call__(self, image):
""" Extract features from an image
Parameters
----------
......@@ -83,16 +85,16 @@ class LightCNN29v2Extractor(Extractor):
The extracted features as a 1d array of size 320
"""
# torchvision.transforms expect a numpy array of size HxWxC
input_image = numpy.expand_dims(image, axis=2)
input_image = self.to_tensor(input_image)
input_image = self.norm(input_image)
input_image = input_image.unsqueeze(0)
# to be compliant with the loaded model, where weight and biases are torch.FloatTensor
input_image = input_image.float()
_ , features = self.network.forward(Variable(input_image))
features = features.data.numpy().flatten()
return features
# torchvision.transforms expect a numpy array of size HxWxC
input_image = numpy.expand_dims(image, axis=2)
input_image = self.to_tensor(input_image)
input_image = self.norm(input_image)