train.lua 7.13 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
--[[This software purpose is to train convolutional neural networks for voice presentation attack detection.

Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
Written by Hannah Muckenhirn <hannah.muckenhirn@idiap.ch>,

This file is part of CNN-voice-PAD.

CNN-voice-PAD is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.

CNN-voice-PAD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with CNN-voice-PAD. If not, see <http://www.gnu.org/licenses/>.--]]

Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
require "paths"
require "nn"
require "math"
require "gnuplot"

torch.setdefaulttensortype('torch.FloatTensor');

-- functions

paths.dofile("speech_dataset.lua")


-- OPTIONS

cmd=torch.CmdLine();

-- hyper-parameters
cmd:option('-Lr',0.0001,'Learning rate');
cmd:option('-maxIter',50,'Max Iteration');
cmd:option('-context',15,'Number of context frames');
40
cmd:option('-nf1',20,'Number of hidden units  of conv1');
Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
41 42 43
cmd:option('-kW1',300,'kernel width of conv1');
cmd:option('-dW1',100,'kernel shift of conv1');
cmd:option('-arch','','conv1');
44
cmd:option('-nhu',0,'Number of hidden units of MLP')
Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85

cmd:option('-trainData',"",'File containing data of training set')
cmd:option('-trainLabel',"",'File containing labels of training set')
cmd:option('-trainVAD',"",'File containing VAD labels (0 or 1) of training set');
cmd:option('-devData',"",'File containing data of development set')
cmd:option('-devLabel',"",'File containing labels of development set')
cmd:option('-devVAD',"",'File containing VAD labels (0 0r 1) of development set');

-- misc
cmd:option('-save',"",'path for model saving');
cmd:option('-norm',"win",'norm for wav: (win, seq, dset)');
params=cmd:parse(arg);


if params.save ~= "" then	
	
	dirname=params.arch .. "_"
	
	for k,v in pairs(params) do
		if v~=0 and v~="" and k~="save" and k~="trainData" and k~="trainLabel" and k~="devData" and k~="devLabel" and k~="trainVAD" and k~="devVAD" then
			dirname=dirname .. k .. "=" .. v .. "_"
		end	
	end
	
	os.execute("mkdir -p " .. params.save .. "/" .. dirname);

	cmd:log(params.save .. "/" .. dirname .. "/logfile",params);
	
else
	print(params)
end

seed = 10
torch.manualSeed(seed) 

-- DATASET

local configTrain={}
configTrain.datafile=params.trainData
configTrain.labelfile=params.trainLabel
configTrain.vadfile=params.trainVAD
86
configTrain.pathfile=""
Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
87 88 89 90 91 92 93 94 95 96
configTrain.feat="wav"
configTrain.nSamplePerFrame=160 -- 10ms @ 16kHz
configTrain.norm=params.norm
configTrain.contextframe=params.context

local configValid={}

configValid.datafile=params.devData
configValid.labelfile=params.devLabel
configValid.vadfile=params.devVAD
97
configValid.pathfile=""
Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
configValid.feat="wav"
configValid.nSamplePerFrame=160 -- 10ms @ 16kHz
configValid.norm=params.norm	
configValid.contextframe=params.context

-- normalize
trainSet=SpeechDataset(configTrain)
trainSet:normalize();




-- in case of dataset norm, use the stat computed on the training set
if params.norm=="dset" then
	params.normMean=trainSet.normMean
	params.normStd=trainSet.normStd
	configValid.normMean=trainSet.normMean
	configValid.normStd=trainSet.normStd
end
	
validSet=SpeechDataset(configValid)
validSet:normalize()

print( trainSet.nData .. " training sequences")
print( validSet.nData .. " validation sequences")


print("Creating network ...");


nInput=trainSet.nInput
nOutput=2;
net=nn.Sequential()

if params.arch=="cnnMLP" then
	
	local nout1= math.floor((nInput-params.kW1)/params.dW1)+1
		
136 137
	net:add(nn.TemporalConvolution(1,params.nf1,params.kW1,params.dW1));
	net:add(nn.Reshape(nout1*params.nf1));
Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
138
	net:add(nn.HardTanh());
139
	net:add(nn.Linear(nout1*params.nf1,params.nhu)); 
Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
140
	net:add(nn.HardTanh());
141
	net:add(nn.Linear(params.nhu,nOutput));
Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
142 143 144 145

elseif 	params.arch=="cnnSLP" then
	local nout1= math.floor((nInput-params.kW1)/params.dW1)+1
		
146 147
	net:add(nn.TemporalConvolution(1,params.nf1,params.kW1,params.dW1));
	net:add(nn.Reshape(nout1*params.nf1));
Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
148
	net:add(nn.HardTanh());
149
	net:add(nn.Linear(nout1*params.nf1,nOutput));
Hannah MUCKENHIRN's avatar
Hannah MUCKENHIRN committed
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273

else
	error("Architecture not recognized")
end	

-- criterion
net:add(nn.LogSoftMax());
criterion=nn.ClassNLLCriterion();

print(nInput .. " samples for each example");

print(trainSet.nExample .. " training examples");
print(validSet.nExample .. " validation examples");

maxIteration=params.maxIter;

errTrain=torch.Tensor(maxIteration):fill(0);
errValid=torch.Tensor(maxIteration):fill(0);
errCrit=torch.Tensor(maxIteration):fill(0);
iteration=1;

-- MAIN LOOP

seq=torch.Tensor(nInput,1):fill(0);
perm2=torch.randperm(validSet.nExample);
while true do
	
	local currentError = 0
	local errNumT=0
	local errNumV=0
	perm=torch.randperm(trainSet.nExample);
	trueNbSamplesTrain=0
	for d = 1,trainSet.nExample do
		trainSet:get_data(perm[d],seq)
		vad = trainSet:get_vad(perm[d])
		if (vad==1 and trainSet:is_too_short(perm[d])==0) then
			target=trainSet:get_label(perm[d])		
		
			net:forward(seq)
	
			_,pred=torch.max(net.output,1)
	
			if pred:squeeze()~=target then
				errNumT=errNumT+1;
			end
	
			-- UPDATE WEIGHT		
			currentError = currentError + criterion:forward(net.output, target)
		
			net:updateGradInput(seq, criterion:updateGradInput(net.output, target))
			net:accUpdateGradParameters(seq, criterion.gradInput, params.Lr)
			trueNbSamplesTrain=trueNbSamplesTrain+1
		end
	end
	
	-- VALID SET 
    --If valid set too large, reduce it to nbSamples frames
	nbSamples=2000000
	trueNbSamplesValid=0
	if (validSet.nExample>nbSamples) then
        	nValid = 2000000
	else
		nValid = validSet.nExample
	end
	for d = 1,nValid do	
       		validSet:get_data(perm2[d],seq)
		vad = validSet:get_vad(perm2[d])
		if (vad==1 and validSet:is_too_short(perm2[d])==0) then
			trueNbSamplesValid=trueNbSamplesValid+1
			target=validSet:get_label(perm2[d])
			net:forward(seq)
	
			_,pred=torch.max(net.output,1)
	
			if pred:squeeze()~=target then
				errNumV=errNumV+1;
			end
		end	
	end
	
	

	currentError = currentError / trueNbSamplesTrain
	errCrit[iteration]=currentError;
	errTrain[iteration]=100*errNumT/ trueNbSamplesTrain
	errValid[iteration]=100*errNumV/trueNbSamplesValid
	if(iteration==1) then
		print("number samples training set: "..trueNbSamplesTrain)
		print("number samples valid set: "..trueNbSamplesValid)
	end
	

	-- printing
	print(iteration .. " " .. string.format("%4.4f",currentError)  .. " " .. string.format("%4.2f",errTrain[iteration]) .. " " .. string.format("%4.2f",errValid[iteration]))
	
	-- saving
	if params.save~="" then
		
		local savepath=params.save .. "/" .. dirname 

		torch.save(savepath .. "/model_" .. iteration .. ".bin",{net,params});
		
		file=io.open(savepath .. "/crit","a")
		file:write(errCrit[iteration] .. "\n")
		file:close();
		
		file=io.open(savepath  .. "/train-frame-acc","a")
		file:write(errTrain[iteration] .. "\n")
		file:close();
		
		file=io.open(savepath .. "/valid-frame-acc","a")
		file:write(errValid[iteration] .. "\n")
		file:close();
		
	end
	

	iteration = iteration + 1
    
    if iteration>params.maxIter then
		break
	end	
    
end