Skip to content
Snippets Groups Projects
Commit 6a9c75c9 authored by Guillaume HEUSCH's avatar Guillaume HEUSCH
Browse files

added all of Xiaojiang architectures (at least the ones in network.py)

parent a5f31f81
No related branches found
No related tags found
No related merge requests found
Pipeline #
import numpy
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms import torchvision.transforms as transforms
...@@ -75,10 +78,12 @@ class CNN8Extractor(Extractor): ...@@ -75,10 +78,12 @@ class CNN8Extractor(Extractor):
input_image = self.norm(input_image) input_image = self.norm(input_image)
input_image = input_image.unsqueeze(0) input_image = input_image.unsqueeze(0)
features = self.network.forward(Variable(input_image)) features = self.network.forward(Variable(input_image))
feat = feat.data.cpu().numpy().flatten()
features = features.data.numpy().flatten() features = features.data.numpy().flatten()
print features.shape # normalize
std = numpy.sqrt((features ** 2).sum(-1))
features /= (std+1e-10)
return features return features
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
CASIA_CONFIG = [32, 64, 'M', 64, 128, 'M', 96, 192, 'M', 128, 256, 'M', 160, 320]
def make_conv_layers(cfg, input_c = 3):
layers = []
in_channels = input_c
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU()]
in_channels = v
return nn.Sequential(*layers)
class CASIA_NET(nn.Module):
def __init__(self, num_cls, drop_rate=0.5):
super(CASIA_NET, self).__init__()
self.num_classes = num_cls
self.drop_rate = float(drop_rate)
self.conv = make_conv_layers(CASIA_CONFIG)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(320, self.num_classes)
def forward(self, x):
x = self.conv(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = F.dropout(x, p = self.drop_rate, training=self.training)
return x
from bob.bio.base.extractor import Extractor
class CasiaNetExtractor(Extractor):
def __init__(self, model_file, num_classes=10575):
Extractor.__init__(self, skip_extractor_training=True)
# model
self.network = CASIA_NET(num_classes)
cp = torch.load(model_file)
if 'state_dict' in cp:
self.network.load_state_dict(cp['state_dict'])
self.network.eval()
# image pre-processing
self.to_tensor = transforms.ToTensor()
self.norm = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
def __call__(self, image):
"""__call__(image) -> feature
Extract features
**Parameters:**
image : 3D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D :py:class:`numpy.ndarray` (floats)
The extracted features
"""
input_image = numpy.rollaxis(numpy.rollaxis(image, 2),2)
input_image = self.to_tensor(input_image)
input_image = self.norm(input_image)
input_image = input_image.unsqueeze(0)
features = self.network.forward(Variable(input_image))
features = features.data.numpy().flatten()
return features
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3,stride=1, padding=1):
super(BasicBlock, self).__init__()
self.inplanes = inplanes
self.outplanes = planes
self.downsample = nn.Conv2d(inplanes, planes,kernel_size=1, stride=stride)
self.conv_block = self.build_conv_block(inplanes, planes, kernel_size=kernel_size, stride=stride, padding=padding)
def build_conv_block(self, inplanes, planes, kernel_size=3, stride=1, padding=1):
conv_block = []
conv_block += [nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=stride, padding=padding), nn.ReLU()]
conv_block += [nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=stride, padding=padding), nn.ReLU()]
return nn.Sequential(*conv_block)
def forward(self, x):
residual = x
out = self.conv_block(x)
if self.inplanes!=self.outplanes:
residual = self.downsample(x)
out += residual
return out
class ResNet26(nn.Module):
def __init__(self, num_cls, drop_rate=0.5):
super(ResNet26, self).__init__()
self.num_classes = num_cls
self.drop_rate = float(drop_rate)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv_1_1 = nn.Sequential(nn.Conv2d(3, 32, 3, 1, 1), nn.ReLU())
self.conv_1_2 = nn.Sequential(nn.Conv2d(32, 64, 3, 1, 1), nn.ReLU())
self.conv2_res = self._make_layer(BasicBlock, 64, 64, 1, 1)
self.conv2_2 = nn.Sequential(nn.Conv2d(64, 128, 3, 1, 1), nn.ReLU())
self.conv3_res = self._make_layer(BasicBlock, 128, 128, 2, 1)
self.conv3_2 = nn.Sequential(nn.Conv2d(128, 256, 3, 1, 1), nn.ReLU())
self.conv4_res = self._make_layer(BasicBlock, 256, 256, 3, 1)
self.conv4_2 = nn.Sequential(nn.Conv2d(256, 512, 3, 1, 1), nn.ReLU())
self.conv5_res = self._make_layer(BasicBlock, 512, 512, 4, 1)
self.fc = nn.Sequential(nn.Linear(8*8*512, 512), nn.ReLU())
self.classifier = nn.Linear(512, self.num_classes)
def forward(self, x):
x = self.conv_1_2(self.conv_1_1(x))
x = self.maxpool(x)
x = self.conv2_2(self.conv2_res(x))
x = self.maxpool(x)
x = self.conv3_2(self.conv3_res(x))
x = self.maxpool(x)
x = self.conv4_2(self.conv4_res(x))
x = self.maxpool(x)
x = self.conv5_res(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = F.dropout(x, p = self.drop_rate, training=self.training)
out = self.classifier(x)
return out, x # x for feature
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
layers = []
layers.append(block(inplanes, planes, 3, stride))
for i in range(1, blocks):
layers.append(block(planes, planes, 3, 1))
return nn.Sequential(*layers)
from bob.bio.base.extractor import Extractor
class ResNet26Extractor(Extractor):
def __init__(self, model_file, num_classes=):
Extractor.__init__(self, skip_extractor_training=True)
# model
self.network = ResNet26(num_classes)
cp = torch.load(model_file)
if 'state_dict' in cp:
self.network.load_state_dict(cp['state_dict'])
self.network.eval()
# image pre-processing
self.to_tensor = transforms.ToTensor()
self.norm = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
def __call__(self, image):
"""__call__(image) -> feature
Extract features
**Parameters:**
image : 3D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D :py:class:`numpy.ndarray` (floats)
The extracted features
"""
input_image = numpy.rollaxis(numpy.rollaxis(image, 2),2)
input_image = self.to_tensor(input_image)
input_image = self.norm(input_image)
input_image = input_image.unsqueeze(0)
features = self.network.forward(Variable(input_image))
features = features.data.numpy().flatten()
# normalize
std = numpy.sqrt((features ** 2).sum(-1))
features /= (std+1e-10)
return features
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
from .DRGANLight import DRGANLight from .DRGANLight import DRGANLight
from .DRGANOriginal import DRGANOriginal from .DRGANOriginal import DRGANOriginal
from .CNN8 import CNN8Extractor from .CNN8 import CNN8Extractor
from .CasiaNet import CasiaNetExtractor
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
...@@ -24,6 +25,7 @@ __appropriate__( ...@@ -24,6 +25,7 @@ __appropriate__(
DRGANLight, DRGANLight,
DRGANOriginal, DRGANOriginal,
CNN8Extractor, CNN8Extractor,
CasiaNetExtractor,
) )
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment