Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bob.learn.pytorch
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
This is an archived project. Repository and other project resources are read-only.
Show more breadcrumbs
bob
bob.learn.pytorch
Merge requests
!17
Mccn extended
Code
Review changes
Check out branch
Download
Patches
Plain diff
Merged
Mccn extended
mccn_extended
into
master
Overview
7
Commits
7
Pipelines
5
Changes
3
Merged
Anjith GEORGE
requested to merge
mccn_extended
into
master
6 years ago
Overview
7
Commits
7
Pipelines
5
Changes
3
Expand
Adds the extendable MCCNN architecture.
0
0
Merge request reports
Compare
master
version 4
7ebd5ae4
6 years ago
version 3
f4c8b539
6 years ago
version 2
ea9eea49
6 years ago
version 1
f9df33b0
6 years ago
master (base)
and
latest version
latest version
8ff2c8f3
7 commits,
6 years ago
version 4
7ebd5ae4
6 commits,
6 years ago
version 3
f4c8b539
5 commits,
6 years ago
version 2
ea9eea49
4 commits,
6 years ago
version 1
f9df33b0
3 commits,
6 years ago
3 files
+
261
−
0
Side-by-side
Compare changes
Side-by-side
Inline
Show whitespace changes
Show one file at a time
Files
3
Search (e.g. *.vue) (Ctrl+P)
bob/learn/pytorch/architectures/MCCNN.py
0 → 100644
+
252
−
0
Options
#!/usr/bin/env python
# encoding: utf-8
import
torch
import
torch.nn
as
nn
import
torch.nn.functional
as
F
import
os
import
numpy
as
np
import
pkg_resources
import
bob.extension.download
import
bob.io.base
from
.utils
import
MaxFeatureMap
from
.utils
import
group
from
.utils
import
resblock
import
logging
logger
=
logging
.
getLogger
(
"
bob.learn.pytorch
"
)
class
MCCNN
(
nn
.
Module
):
"""
The class defining the MCCNN
This class implements the MCCNN for multi-channel PAD
Attributes
----------
num_channels: int
The number of channels present in the input
lcnn_layers: list
The adaptable layers present in the base LightCNN model
module_dict: dict
A dictionary containing module names and `torch.nn.Module` elements as key, value pairs.
layer_dict: :py:class:`torch.nn.ModuleDict`
Pytorch class containing the modules as a dictionary.
light_cnn_model_file: str
Absolute path to the pretrained LightCNN model file.
url: str
The path to download the pretrained LightCNN model from.
"""
def
__init__
(
self
,
block
=
resblock
,
layers
=
[
1
,
2
,
3
,
4
],
num_channels
=
4
,
verbosity_level
=
2
):
"""
Init function
Parameters
----------
num_channels: int
The number of channels present in the input
verbosity_level: int
Verbosity level.
"""
super
(
MCCNN
,
self
).
__init__
()
self
.
num_channels
=
num_channels
self
.
lcnn_layers
=
[
'
conv1
'
,
'
block1
'
,
'
group1
'
,
'
block2
'
,
'
group2
'
,
'
block3
'
,
'
group3
'
,
'
block4
'
,
'
group4
'
,
'
fc
'
]
logger
.
setLevel
(
verbosity_level
)
self
.
pool1
=
nn
.
MaxPool2d
(
kernel_size
=
2
,
stride
=
2
,
ceil_mode
=
True
)
self
.
pool2
=
nn
.
MaxPool2d
(
kernel_size
=
2
,
stride
=
2
,
ceil_mode
=
True
)
self
.
pool3
=
nn
.
MaxPool2d
(
kernel_size
=
2
,
stride
=
2
,
ceil_mode
=
True
)
self
.
pool4
=
nn
.
MaxPool2d
(
kernel_size
=
2
,
stride
=
2
,
ceil_mode
=
True
)
# newly added FC layers
self
.
linear1fc
=
nn
.
Linear
(
256
*
num_channels
,
10
)
self
.
linear2fc
=
nn
.
Linear
(
10
,
1
)
# add modules
module_dict
=
{}
for
i
in
range
(
self
.
num_channels
):
m_dict
=
{}
m_dict
[
'
conv1
'
]
=
MaxFeatureMap
(
1
,
48
,
5
,
1
,
2
)
m_dict
[
'
block1
'
]
=
self
.
_make_layer
(
block
,
layers
[
0
],
48
,
48
)
m_dict
[
'
group1
'
]
=
group
(
48
,
96
,
3
,
1
,
1
)
m_dict
[
'
block2
'
]
=
self
.
_make_layer
(
block
,
layers
[
1
],
96
,
96
)
m_dict
[
'
group2
'
]
=
group
(
96
,
192
,
3
,
1
,
1
)
m_dict
[
'
block3
'
]
=
self
.
_make_layer
(
block
,
layers
[
2
],
192
,
192
)
m_dict
[
'
group3
'
]
=
group
(
192
,
128
,
3
,
1
,
1
)
m_dict
[
'
block4
'
]
=
self
.
_make_layer
(
block
,
layers
[
3
],
128
,
128
)
m_dict
[
'
group4
'
]
=
group
(
128
,
128
,
3
,
1
,
1
)
m_dict
[
'
fc
'
]
=
MaxFeatureMap
(
8
*
8
*
128
,
256
,
type
=
0
)
# ch_0_should be the anchor
for
layer
in
self
.
lcnn_layers
:
layer_name
=
"
ch_{}_
"
.
format
(
i
)
+
layer
module_dict
[
layer_name
]
=
m_dict
[
layer
]
self
.
layer_dict
=
nn
.
ModuleDict
(
module_dict
)
# check for pretrained model
light_cnn_model_file
=
os
.
path
.
join
(
MCCNN
.
get_mccnnpath
(),
"
LightCNN_29Layers_checkpoint.pth.tar
"
)
url
=
'
http://www.idiap.ch/software/bob/data/bob/bob.learn.pytorch/master/LightCNN_29Layers_checkpoint.pth.tar
'
logger
.
info
(
"
Light_cnn_model_file path: {}
"
.
format
(
light_cnn_model_file
))
if
not
os
.
path
.
exists
(
light_cnn_model_file
):
bob
.
io
.
base
.
create_directories_safe
(
os
.
path
.
split
(
light_cnn_model_file
)[
0
])
logger
.
info
(
'
Downloading the LightCNN model
'
)
bob
.
extension
.
download
.
download_file
(
url
,
light_cnn_model_file
)
logger
.
info
(
'
Downloaded LightCNN model to location: {}
'
.
format
(
light_cnn_model_file
))
## Loding the pretrained model for ch_0
self
.
load_state_dict
(
self
.
get_model_state_dict
(
light_cnn_model_file
),
strict
=
False
)
# copy over the weights to all other layers
for
layer
in
self
.
lcnn_layers
:
for
i
in
range
(
1
,
self
.
num_channels
):
# except for 0 th channel
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
layer
].
load_state_dict
(
self
.
layer_dict
[
"
ch_0_
"
+
layer
].
state_dict
())
def
_make_layer
(
self
,
block
,
num_blocks
,
in_channels
,
out_channels
):
"""
makes multiple copies of the same base module
Parameters
----------
block: :py:class:`torch.nn.Module`
The base block to replicate
num_blocks: int
Number of copies of the block to be made
in_channels: int
Number of input channels for a block
out_channels: int
Number of output channels for a block
"""
layers
=
[]
for
i
in
range
(
0
,
num_blocks
):
layers
.
append
(
block
(
in_channels
,
out_channels
))
return
nn
.
Sequential
(
*
layers
)
def
forward
(
self
,
img
):
"""
Propagate data through the network
Parameters
----------
img: :py:class:`torch.Tensor`
The data to forward through the network. Image of size num_channelsx128x128
Returns
-------
output: :py:class:`torch.Tensor`
score
"""
embeddings
=
[]
for
i
in
range
(
self
.
num_channels
):
x
=
img
[:,
i
,:,:].
unsqueeze
(
1
)
# the image for the specific channel
x
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
conv1
"
](
x
)
x
=
self
.
pool1
(
x
)
x
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
block1
"
](
x
)
x
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
group1
"
](
x
)
x
=
self
.
pool2
(
x
)
x
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
block2
"
](
x
)
x
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
group2
"
](
x
)
x
=
self
.
pool3
(
x
)
x
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
block3
"
](
x
)
x
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
group3
"
](
x
)
x
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
block4
"
](
x
)
x
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
group4
"
](
x
)
x
=
self
.
pool4
(
x
)
x
=
x
.
view
(
x
.
size
(
0
),
-
1
)
fc
=
self
.
layer_dict
[
"
ch_{}_
"
.
format
(
i
)
+
"
fc
"
](
x
)
fc
=
F
.
dropout
(
fc
,
training
=
self
.
training
)
embeddings
.
append
(
fc
)
merged
=
torch
.
cat
(
embeddings
,
1
)
output
=
self
.
linear1fc
(
merged
)
output
=
nn
.
Sigmoid
()(
output
)
output
=
self
.
linear2fc
(
output
)
if
self
.
training
:
output
=
nn
.
Sigmoid
()(
output
)
return
output
@staticmethod
def
get_mccnnpath
():
import
pkg_resources
return
pkg_resources
.
resource_filename
(
'
bob.learn.pytorch
'
,
'
models
'
)
def
get_model_state_dict
(
self
,
pretrained_model_path
):
"""
The class to load pretrained LightCNN model
Attributes
----------
pretrained_model_path: str
Absolute path to the LightCNN model file
new_state_dict: dict
Dictionary with LightCNN weights
"""
checkpoint
=
torch
.
load
(
pretrained_model_path
,
map_location
=
lambda
storage
,
loc
:
storage
)
start_epoch
=
checkpoint
[
'
epoch
'
]
state_dict
=
checkpoint
[
'
state_dict
'
]
# create new OrderedDict that does not contain `module.`
from
collections
import
OrderedDict
new_state_dict
=
OrderedDict
()
for
k
,
v
in
state_dict
.
items
():
name
=
'
layer_dict.ch_0_
'
+
k
[
7
:]
# remove `module.`
new_state_dict
[
name
]
=
v
# load params
return
new_state_dict
Loading