diff --git a/README.md b/README.md index cbf04ec0144a16ab050d995b84cdd35aa44af1b4..7af54ff62ecb6e64284c6f563b11a5c53981927b 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,8 @@ This repository is organised as follows: 3. Run the frames extraction code as follows: `python preprocessor/extract_frames.py -l <path_to_list> -o <path_to_output_folder>` 4. Run the database organization code as follows: `python database/create_database_dataframe.py --frames_list --output_path -metadata_filename -save_mode --min_face_size` 5. Run the pipeline as follows: `python pipeline_vuln.py --database_path --output_path --metadata_filename --save_mode --min_face_size --attack_type --attack_params --attac` +6. Once you have the score files, namely the `score-dev.csv`, you can use the script `utils/split_scores.sh` to split the scores into bona-fide and attack scores. The script will create three files: `scores-dev_print-attack.csv`, `scores-dev_replay-attack.csv` and `scores-dev_hyg-maks.csv`. +7. You can then use these files to compute the metrics as follows: `bob vuln metrics scores-dev_print-attack.csv scores-dev_replay-attack.csv scores-dev_hyg-maks.csv` ## Contact For questions or reporting issues to this software package, contact our development [mailing list](https://www.idiap.ch/software/bob/discuss). diff --git a/pipeline_vuln.py b/pipeline_vuln.py index 794f8521e442ad899e6f0a3f973fb9f17453b3b8..71ad2c172012ced63a642fbc215f945bb50613a7 100644 --- a/pipeline_vuln.py +++ b/pipeline_vuln.py @@ -42,8 +42,8 @@ def run_pipeline(root, bonafide_annotations_path, print_path, replay_path, hyg_a # Create a probes dataframe containing only files from the second session, the main camera only, and the first scenario: indoor normal light session_id = "s2" - protocol_probes_dev = '((session==@session_id) & (frame_nb < @nb_of_frames) & (scenario_id == @scene_id) & (with_mask==False) & (subject_id <= 35))' - protocol_probes_eval = '((session==@session_id) & (frame_nb < @nb_of_frames) & (scenario_id == @scene_id) & (with_mask==False) & (subject_id > 35))' + protocol_probes_dev = '((session==@session_id) & (frame_nb < @nb_of_frames) & (scenario_id == @scene_id) & (with_mask==False) & (subject_id <= 70))' + protocol_probes_eval = '((session==@session_id) & (frame_nb < @nb_of_frames) & (scenario_id == @scene_id) & (with_mask==False) & (subject_id > 69))' probes_df_dev=df.query(protocol_probes_dev) probes_df_eval=df.query(protocol_probes_eval) @@ -95,25 +95,23 @@ def run_pipeline(root, bonafide_annotations_path, print_path, replay_path, hyg_a probes_df_eval = pd.concat(fr) ###====================================================== - # # Add the hygienic mask attack frames to the probes dataframe - # frames_hyg = [ _utils.process_df(os.path.join(hyg_attack_path, f)) for f in os.listdir(hyg_attack_path)] - # df_hyg = pd.concat(frames_hyg) - # df_hyg['frame_nb'] = df_hyg['frame_nb'].astype(int) + # Add the hygienic mask attack frames to the probes dataframe + df_hyg = load_annotations(hyg_attack_path) - # protocol_probes_dev_hyg = '(subject_id <= 70)' - # protocol_probes_eval_hyg = '(subject_id > 69)' + protocol_probes_dev_hyg = '(subject_id <= 70)' + protocol_probes_eval_hyg = '(subject_id > 69)' - # probes_df_dev_hyg=df_hyg.query(protocol_probes_dev_hyg) - # probes_df_eval_hyg=df_hyg.query(protocol_probes_eval_hyg) + probes_df_dev_hyg=df_hyg.query(protocol_probes_dev_hyg) + probes_df_eval_hyg=df_hyg.query(protocol_probes_eval_hyg) - # probes_df_dev_hyg['reference_id'] = probes_df_dev_hyg['filename'].values - # probes_df_eval_hyg['reference_id'] = probes_df_eval_hyg['filename'].values + probes_df_dev_hyg['reference_id'] = probes_df_dev_hyg['filename'].values + probes_df_eval_hyg['reference_id'] = probes_df_eval_hyg['filename'].values - # # Concatenate the hygienic mask attacks probe frames with the bonafide - # fr = [probes_df_dev, probes_df_dev_hyg] - # probes_df_dev = pd.concat(fr) - # fr = [probes_df_eval, probes_df_eval_hyg] - # probes_df_eval = pd.concat(fr) + # Concatenate the hygienic mask attacks probe frames with the bonafide + fr = [probes_df_dev, probes_df_dev_hyg] + probes_df_dev = pd.concat(fr) + fr = [probes_df_eval, probes_df_eval_hyg] + probes_df_eval = pd.concat(fr) ###====================================================== # Create matching lists for the probes and references @@ -134,12 +132,14 @@ def run_pipeline(root, bonafide_annotations_path, print_path, replay_path, hyg_a # Create matchings for attacks matching_eval_replay = _utils.create_matchings_for_attacks(probes_df_eval_replay) matching_eval_print = _utils.create_matchings_for_attacks(probes_df_eval_print) + matching_eval_hyg = _utils.create_matchings_for_attacks(probes_df_eval_hyg) matching_dev_replay = _utils.create_matchings_for_attacks(probes_df_dev_replay) matching_dev_print = _utils.create_matchings_for_attacks(probes_df_dev_print) + matching_dev_hyg = _utils.create_matchings_for_attacks(probes_df_dev_hyg) - matching_eval = pd.concat([matching_eval, matching_eval_replay, matching_eval_print]) - matching_dev = pd.concat([matching_dev, matching_dev_replay, matching_dev_print]) + matching_eval = pd.concat([matching_eval, matching_eval_replay, matching_eval_print, matching_eval_hyg]) + matching_dev = pd.concat([matching_dev, matching_dev_replay, matching_dev_print, matching_dev_hyg]) def iresnet_template(): diff --git a/utils/split_scores.sh b/utils/split_scores.sh new file mode 100644 index 0000000000000000000000000000000000000000..78607393205d8b0be88bdefc41c88cbc2fbad47a --- /dev/null +++ b/utils/split_scores.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Check that the correct number of arguments were passed +if [ "$#" -ne 1 ]; then + echo "Usage: $0 file1.csv" + exit 1 +fi + +# Define the strings to search for +bonafide="BONAFIDE" +attack1="print-attack" +attack2="replay-attack" +attack3="hyg-mask" + +file="$1" +# split the filename into filename and extension +filename="${file%.*}" +# Check that the input file exists and is a CSV file +if [ ! -f "$file" ]; then + echo "Error: $file does not exist or is not a file" + exit 1 +elif [ "${file##*.}" != "csv" ]; then + echo "Error: $file is not a CSV file" + exit 1 +fi + +# Split the input file into four output files based on the strings +head -n1 "$file" > "${filename}_${attack1}.csv" +grep -e $attack1 -e $bonafide "$file" >> "${filename}_${attack1}.csv" +head -n1 "$file" > "${filename}_${attack2}.csv" +grep -e $attack2 -e $bonafide "$file" >> "${filename}_${attack2}.csv" +head -n1 "$file" > "${filename}_${attack3}.csv" +grep -e $attack3 -e $bonafide "$file" >> "${filename}_${attack3}.csv" + +echo "Splitting complete"