Commit f55be918 authored by Guillaume HEUSCH's avatar Guillaume HEUSCH

Merge branch '12-config-files-should-be-removed-from-the-package' into 'master'

Resolve "Config files should be removed from the package"

Closes #12

See merge request !29
parents 15496c1b 07cf574f
Pipeline #28583 passed with stages
in 21 minutes and 44 seconds
### DATA ###
from bob.learn.pytorch.datasets import CasiaWebFaceDataset
import torchvision.transforms as transforms
from bob.learn.pytorch.datasets import RollChannels
from bob.learn.pytorch.datasets import ToTensor
from bob.learn.pytorch.datasets import Normalize
dataset = CasiaWebFaceDataset(root_dir='/idiap/project/fargo/xpeng_prepro/CASIA-Webface-crop-128/',
transform=transforms.Compose([
RollChannels(), # bob to skimage:
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
)
### NETWORK ###
from bob.learn.pytorch.architectures import CASIANet
number_of_classes = 10575
dropout = 0.5
network = CASIANet(number_of_classes, dropout)
### DATA ###
from bob.learn.pytorch.datasets import CasiaWebFaceDataset
import torchvision.transforms as transforms
from bob.learn.pytorch.datasets import RollChannels
from bob.learn.pytorch.datasets import ToTensor
from bob.learn.pytorch.datasets import Normalize
dataset = CasiaWebFaceDataset(root_dir='/idiap/project/fargo/xpeng_prepro/CASIA-Webface-crop-128/',
transform=transforms.Compose([
RollChannels(), # bob to skimage:
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
)
### NETWORK ###
from bob.learn.pytorch.architectures import CNN8
number_of_classes = 10575
dropout = 0.5
network = CNN8(number_of_classes, dropout)
### NETWORK ###
from bob.learn.pytorch.architectures import ConditionalGAN_generator
from bob.learn.pytorch.architectures import ConditionalGAN_discriminator
from bob.learn.pytorch.architectures import weights_init
noise_dim = 100
conditional_dim = 13
channels = 3
ngpu = 1
generator = ConditionalGAN_generator(noise_dim, conditional_dim)
generator.apply(weights_init)
discriminator = ConditionalGAN_discriminator(conditional_dim)
discriminator.apply(weights_init)
### DATA ###
from bob.learn.pytorch.datasets.multipie import MultiPIEDataset
from bob.learn.pytorch.datasets import RollChannels
from bob.learn.pytorch.datasets import ToTensor
from bob.learn.pytorch.datasets import Normalize
import torchvision.transforms as transforms
print("loading data ....")
dataset = MultiPIEDataset(root_dir='/idiap/temp/heusch/data/multipie-cropped-64x64',
frontal_only=False,
transform=transforms.Compose([
RollChannels(), # bob to skimage:
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
)
print("done")
### DATA ###
from bob.learn.pytorch.datasets.multipie import MultiPIEDataset
from bob.learn.pytorch.datasets import RollChannels
from bob.learn.pytorch.datasets import ToTensor
from bob.learn.pytorch.datasets import Normalize
import torchvision.transforms as transforms
dataset = MultiPIEDataset(root_dir='/idiap/temp/heusch/data/multipie-cropped-64x64',
frontal_only=True,
transform=transforms.Compose([
RollChannels(), # bob to skimage:
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
)
### NETWORK ###
from bob.learn.pytorch.architectures import DCGAN_generator
from bob.learn.pytorch.architectures import DCGAN_discriminator
from bob.learn.pytorch.architectures import weights_init
ngpu = 1
generator = DCGAN_generator(ngpu)
generator.apply(weights_init)
discriminator = DCGAN_discriminator(ngpu)
discriminator.apply(weights_init)
from torchvision import transforms
from bob.learn.pytorch.architectures import MCCNN
from bob.learn.pytorch.datasets import DataFolder
from bob.pad.face.database import BatlPadDatabase
from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage
#==============================================================================
# Load the dataset
""" The steps are as follows
1. Initialize a databae instance, with the protocol, groups and number of frames
(currently for the ones in 'bob.pad.face', and point 'data_folder_train' to the preprocessed directory )
Note: Here we assume that we have already preprocessed the with `spoof.py` script and dumped it to location
pointed to by 'data_folder_train'.
2. Specify the transform to be used on the images. It can be instances of `torchvision.transforms.Compose` or custom functions.
3. Initialize the `data_folder` class with the database instance and all other parameters. This dataset instance is used in
the trainer class
4. Initialize the network architecture with required arguments.
5. Define the parameters for the trainer.
"""
#==============================================================================
# Initialize the bob database instance
data_folder_train='/idiap/temp/ageorge/WMCA/preprocessed/'
output_base_path='/idiap/temp/ageorge/Pytorch_WMCA/MCCNNv1/'
unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
PROTOCOL_INDEX=0
####################################################################
frames=50
extension='.h5'
train_groups=['train'] # only 'train' group is used for training the network
val_groups=['dev']
do_crossvalidation=True
#=======================
if do_crossvalidation:
phases=['train','val']
else:
phases=['train']
groups={"train":['train'],"val":['dev']}
protocols="grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX] # makeup is excluded anyway here
exlude_attacks_list=["makeup"]
bob_hldi_instance = BatlPadDatabase(
protocol=protocols,
original_directory=data_folder_train,
original_extension=extension,
landmark_detect_method="mtcnn", # detect annotations using mtcnn
exclude_attacks_list=exlude_attacks_list,
exclude_pai_all_sets=True, # exclude makeup from all the sets, which is the default behavior for grandtest protocol
append_color_face_roi_annot=False)
#==============================================================================
# Initialize the torch dataset, subselect channels from the pretrained files if needed.
SELECTED_CHANNELS = [0,1,2,3]
####################################################################
img_transform={}
img_transform['train'] = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),RandomHorizontalFlipImage(p=0.5),transforms.ToTensor()])
img_transform['val'] = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),transforms.ToTensor()])
dataset={}
for phase in phases:
dataset[phase] = DataFolder(data_folder=data_folder_train,
transform=img_transform[phase],
extension='.hdf5',
bob_hldi_instance=bob_hldi_instance,
groups=groups[phase],
protocol=protocols,
purposes=['real', 'attack'],
allow_missing_files=True)
#==============================================================================
# Specify other training parameters
NUM_CHANNELS = len(SELECTED_CHANNELS)
ADAPTED_LAYERS = 'conv1-block1-group1-ffc'
####################################################################
ADAPT_REF_CHANNEL = False
####################################################################
batch_size = 32
num_workers = 0
epochs=25
learning_rate=0.0001
seed = 3
use_gpu = False
adapted_layers = ADAPTED_LAYERS
adapt_reference_channel = ADAPT_REF_CHANNEL
verbose = 2
UID = "_".join([str(i) for i in SELECTED_CHANNELS])+"_"+str(ADAPT_REF_CHANNEL)+"_"+ADAPTED_LAYERS+"_"+str(NUM_CHANNELS)+"_"+protocols
training_logs= output_base_path+UID+'/train_log_dir/'
output_dir = output_base_path+UID
#==============================================================================
# Load the architecture
assert(len(SELECTED_CHANNELS)==NUM_CHANNELS)
network=MCCNN(num_channels = NUM_CHANNELS)
#==============================================================================
"""
Note: Running in GPU
jman submit --queue gpu \
--name mccnnv2 \
--log-dir /idiap/temp/ageorge/Pytorch_WMCA/MCCNNv2/logs/ \
--environment="PYTHONUNBUFFERED=1" -- \
./bin/train_mccnn.py \
/idiap/user/ageorge/WORK/COMMON_ENV_PAD_BATL_DB/src/bob.learn.pytorch/bob/learn/pytorch/config/mccnn/wmca_mccnn.py --use-gpu -vvv
Note: Running in cpu
./bin/train_mccnn.py \
/idiap/user/ageorge/WORK/COMMON_ENV_PAD_BATL_DB/src/bob.learn.pytorch/bob/learn/pytorch/config/mccnn/wmca_mccnn.py -vvv
"""
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment