Skip to content
Snippets Groups Projects
Commit bfa032b5 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

initial commit

parents
No related branches found
No related tags found
No related merge requests found
Pipeline #37914 failed
#!/usr/bin/env python
from bob.extension.download import download_and_unzip
import os
# Test if we are in the CI
base_url = (
"https://www.idiap.ch/software/bob/data/bob/"
if os.environ.get("CI_PROJECT_ID") is None
else "http://www.idiap.ch/public/data/bob/"
)
all_urls = [
"bob.learn.tensorflow/master/densenet-161-imagenet.tar.xz",
"bob.db.oulunpu/master/oulunpu-mtcnn-annotations.tar.xz",
"bob.db.replaymobile/master/replaymobile-mtcnn-annotations.tar.xz",
"bob.db.swan/master/swan-mtcnn-annotations.tar.xz",
"bob.db.batl/master/WMCA_annotations_50_frames.tar.xz",
]
fldr = "downloads"
os.makedirs(fldr, exist_ok=True)
for path in all_urls:
filename = f"{fldr}/{os.path.basename(path)}"
url = [f"{base_url}{path}"]
print(f"Downloading {url} and saving in {filename}")
download_and_unzip(url, filename)
print("Finished Downloading!")
#!/bin/bash
set -ex
databases=(oulunpu replaymobile swan batl)
protocols_1=(Protocol_1 grandtest pad_p2_face_f1 grandtest_color_50_PrintReplay)
protocols_2=(Protocol_1 grandtest pad_p2_face_f1 grandtest-color-50-PrintReplay)
for ((i=0;i<${#databases[@]};i++)); do
bob pad metrics --eval --decimal 2 \
results/${databases[i]}/${protocols_1[i]}/{deep_pix_bis,deep_pix_bis_pruned_by_replaymobile,deep_pix_bis_pruned_by_swan,deep_pix_bis_pruned_by_batl,deep_pix_bis_pruned_by_ijbc}/${protocols_2[i]}/scores/scores-{dev,eval} \
--legends deep_pix_bis,deep_pix_bis_pruned_by_replaymobile,deep_pix_bis_pruned_by_swan,deep_pix_bis_pruned_by_batl,deep_pix_bis_pruned_by_ijbc
done
This diff is collapsed.
setuptools
numpy
bob.extension
\ No newline at end of file
#!/bin/bash
# The experiments are done in 3 stages:
# 1. Caching where all the face PAD datasets are cached for faster training and evaluation in future.
# 2. Training deep_pix_bis on oulunpu
# 3. Testing deep_pix_bis on all 4 datasets.
set -ex
databases=(oulunpu replaymobile swan batl)
protocols=(Protocol_1 grandtest pad_p2_face_f1 grandtest_color_50_PrintReplay)
algorithms=(deep_pix_bis)
BOB_SCRIPT=$(which bob)
TRAIN_CACHE_JOBS=()
PREDICT_CACHE_JOBS=()
TRAIN_JOBS=()
# jman options for running on SGE
LOCAL_OPT=""
SUBMIT_OPTS="--queue gpu"
# # jman options for running locally
# LOCAL_OPT="--local"
# SUBMIT_OPTS=""
# Cache the datasets:
TRAIN_CACHE_JOBS+=($(jman ${LOCAL_OPT} submit ${SUBMIT_OPTS} --print-id --stop-on-failure --name "cache_train_data" \
${BOB_SCRIPT} tf cache -vvv --mode train \
bob.paper.icassp2020_domain_guided_pruning.{\
deep_pix_bis,\
estimator,\
oulunpu,\
Protocol_1,\
train,\
load_data_with_normalizer,\
input_fn}))
TRAIN_CACHE_JOBS+=($(jman ${LOCAL_OPT} submit ${SUBMIT_OPTS} --print-id --stop-on-failure --name "cache_eval_data" \
${BOB_SCRIPT} tf cache -vvv --mode eval \
bob.paper.icassp2020_domain_guided_pruning.{\
deep_pix_bis,\
estimator,\
oulunpu,\
Protocol_1,\
dev,\
load_data_with_normalizer,\
input_fn}))
for ((i=0;i<${#databases[@]};i++)); do
PREDICT_CACHE_JOBS+=($(jman ${LOCAL_OPT} submit ${SUBMIT_OPTS} --print-id --stop-on-failure --name "cache_${databases[i]}" \
${BOB_SCRIPT} tf cache -vvv --mode infer \
bob.paper.icassp2020_domain_guided_pruning.{\
deep_pix_bis,\
estimator,\
${databases[i]},\
${protocols[i]},\
train_dev_eval,\
load_data_without_normalizer,\
input_fn}))
done
# train the models
for algorithm in ${algorithms[@]}; do
TRAIN_JOBS+=($(jman ${LOCAL_OPT} submit ${SUBMIT_OPTS} --print-id --stop-on-failure --name "train_${algorithm}" \
--dependencies ${TRAIN_CACHE_JOBS[@]} -- \
${BOB_SCRIPT} tf train -vvv \
bob.paper.icassp2020_domain_guided_pruning.{\
${algorithm},\
estimator,\
oulunpu,\
Protocol_1,\
train,\
load_data_with_normalizer,\
input_fn}))
TRAIN_JOBS+=($(jman ${LOCAL_OPT} submit ${SUBMIT_OPTS} --print-id --stop-on-failure --name "eval_${algorithm}" \
--dependencies ${TRAIN_CACHE_JOBS[@]} -- \
${BOB_SCRIPT} tf eval -vvv \
bob.paper.icassp2020_domain_guided_pruning.{\
${algorithm},\
estimator,\
oulunpu,\
Protocol_1,\
dev,\
load_data_with_normalizer,\
input_fn}))
done
# run the prediction
for algorithm in ${algorithms[@]}; do
for ((i=0;i<${#databases[@]};i++)); do
SUB_DIR="$(pwd)/results/${databases[i]}/${protocols[i]}/${algorithm}"
LAST_JOB=$(jman ${LOCAL_OPT} submit ${SUBMIT_OPTS} --print-id --stop-on-failure --name "predict_${algorithm}_${databases[i]}" \
--dependencies ${PREDICT_CACHE_JOBS[@]} ${TRAIN_JOBS[@]} -- \
${BOB_SCRIPT} tf predict -vvv \
bob.paper.icassp2020_domain_guided_pruning.{\
${algorithm},\
estimator,\
${databases[i]},\
${protocols[i]},\
train_dev_eval,\
load_data_without_normalizer,\
input_fn} \
--checkpoint-path results/${algorithm}/eval \
--output-dir ${SUB_DIR}/predictions)
jman ${LOCAL_OPT} submit ${SUBMIT_OPTS} --print-id --stop-on-failure --name "score_${algorithm}_${databases[i]}" \
--dependencies ${LAST_JOB} -- \
$(which spoof.py) -vvv \
bob.paper.icassp2020_domain_guided_pruning.{\
${algorithm},\
estimator,\
${databases[i]},\
${protocols[i]},\
train_dev_eval,\
load_data_with_normalizer,\
input_fn,\
pad_video_predictions} \
--sub-directory ${SUB_DIR} \
--force
done
done
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
0.0.1b0
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment