Commit 3f342953 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Merge branch 'score-writer' into 'master'

Switch to CSV scores for pipelines and analysis commands

See merge request !87
parents f3417121 0937127a
Pipeline #51536 passed with stages
in 9 minutes and 17 seconds
......@@ -3,10 +3,20 @@
# Fri Dec 7 12:33:37 CET 2012
"""Utility functions for computation of EPSC curve and related measurement"""
from bob.measure import far_threshold, eer_threshold, min_hter_threshold, farfrr, frr_threshold
from bob.bio.base.score.load import four_column
from bob.measure import (
far_threshold,
eer_threshold,
min_hter_threshold,
farfrr,
frr_threshold,
)
from bob.bio.base.score.load import _iterate_csv_score_file
from collections import defaultdict
import re
import numpy
import logging
logger = logging.getLogger(__name__)
def calc_threshold(method, pos, negs, all_negs, far_value=None, is_sorted=False):
......@@ -116,83 +126,117 @@ def apcer_bpcer(threshold, pos, *negs):
return apcers, max(apcers), bpcer
def negatives_per_pai_and_positives(filename, regexps=None, regexp_column="real_id"):
def split_csv_pad_per_pai(filename, regexps=[], regexp_column="attack_type"):
"""Returns scores for Bona-Fide samples and scores for each PAI.
By default, the real_id column (second column) is used as indication for each
Presentation Attack Instrument (PAI).
For example, if you have scores like:
001 001 bona_fide_sample_1_path 0.9
001 print print_sample_1_path 0.6
001 print print_sample_2_path 0.6
001 replay replay_sample_1_path 0.2
001 replay replay_sample_2_path 0.2
001 mask mask_sample_1_path 0.5
001 mask mask_sample_2_path 0.5
this function will return 3 sets of negative scores (for each print, replay, and
mask PAIs).
For example, with default regexps and regexp_column, if you have scores like:
claimed_id, test_label, is_bonafide, attack_type, score
001, bona_fide_sample_1_path, True, , 0.9
001, print_sample_1_path, False, print, 0.6
001, print_sample_2_path, False, print, 0.6
001, replay_sample_1_path, False, replay, 0.2
001, replay_sample_2_path, False, replay, 0.2
001, mask_sample_1_path, False, mask, 0.5
001, mask_sample_2_path, False, mask, 0.5
this function will return 1 set of positive scores, and 3 sets of negative scores
(for each print, replay, and mask PAIs).
Otherwise, you can provide a list regular expressions that match each PAI.
For example, if you have scores like:
001 001 bona_fide_sample_1_path 0.9
001 print/1 print_sample_1_path 0.6
001 print/2 print_sample_2_path 0.6
001 replay/1 replay_sample_1_path 0.2
001 replay/2 replay_sample_2_path 0.2
001 mask/1 mask_sample_1_path 0.5
001 mask/2 mask_sample_2_path 0.5
and give a list of regexps as ('print', 'replay', 'mask') the function will return 3
sets of negative scores (for each print, replay, and mask PAIs).
For example, with regexps as ['print', 'replay', 'mask'], if you have scores like:
claimed_id, test_label, is_bonafide, attack_type, score
001, bona_fide_sample_1_path, True, , 0.9
001, print_sample_1_path, False, print/1, 0.6
001, print_sample_2_path, False, print/2, 0.6
001, replay_sample_1_path, False, replay/1, 0.2
001, replay_sample_2_path, False, replay/2, 0.2
001, mask_sample_1_path, False, mask/1, 0.5
001, mask_sample_2_path, False, mask/2, 0.5
the function will return 3 sets of negative scores (for print, replay, and mask
PAIs, given in regexp).
Parameters
----------
filename : str
Path to the score file.
regexps : None, optional
regexps : List of str, optional
A list of regular expressions that match each PAI. If not given, the values in
the real_id column are used to find scores for different PAIs.
the column pointed by regexp_column are used to find scores for different PAIs.
regexp_column : str, optional
If a list of regular expressions are given, those patterns will be matched
against the values in this column.
against the values in this column. default: ``attack_type``
Returns
-------
tuple
A tuple containing pos scores and a dict of negative scores mapping PAIs to
their scores.
tuple ([positives], {'pai_name': [negatives]})
A tuple containing positive scores and a dict of negative scores mapping PAIs
names to their respective scores.
Raises
------
ValueError
If none of the given regular expressions match the values in regexp_column.
KeyError
If regexp_column is not a column of the CSV file.
"""
pos = []
negs = defaultdict(list)
logger.debug(f"Loading CSV score file: '{filename}'")
if regexps:
regexps = [re.compile(pattern) for pattern in regexps]
assert regexp_column in ("claimed_id", "real_id", "test_label"), regexp_column
for claimed_id, real_id, test_label, score in four_column(filename):
for row in _iterate_csv_score_file(filename):
# if it is a Bona-Fide score
if claimed_id == real_id:
pos.append(score)
if row["is_bonafide"].lower() == "true":
pos.append(row["score"])
continue
if not regexps:
negs[real_id].append(score)
negs[row[regexp_column]].append(row["score"])
continue
# if regexps is not None or empty and is not a Bona-Fide score
string = {
"claimed_id": claimed_id,
"real_id": real_id,
"test_label": test_label,
}[regexp_column]
for pattern in regexps:
if pattern.match(string):
negs[pattern.pattern].append(score)
if pattern.search(row[regexp_column]):
negs[pattern.pattern].append(row["score"])
break
else: # this else is for the for loop: ``for pattern in regexps:``
raise ValueError(
f"No regexps: {regexps} match `{string}' from `{regexp_column}' column"
f"No regexps: {regexps} match `{row[regexp_column]}' "
f"from `{regexp_column}' column."
)
logger.debug(f"Found {len(negs)} different PAIs names: {list(negs.keys())}")
return pos, negs
def split_csv_pad(filename):
"""Loads PAD scores from a CSV score file, splits them by attack vs bonafide.
The CSV must contain a ``is_bonafide`` column with each field either
``True`` or ``False`` (case insensitive).
Parameters
----------
filename: str
The path to a CSV file containing all the scores.
Returns
-------
(attack, bonafide): Tuple of 1D-arrays
The negative (attacks) and positives (bonafide) scores.
"""
logger.debug(f"Loading CSV score file: '{filename}'")
split_scores = defaultdict(list)
for row in _iterate_csv_score_file(filename):
if row["is_bonafide"].lower() == "true":
split_scores["bonafide"].append(row["score"])
else:
split_scores["attack"].append(row["score"])
logger.debug(
f"Found {len(split_scores['attack'])} negative (attack), and"
f"{len(split_scores['bonafide'])} positive (bonafide) scores."
)
# Cast the scores to numpy float
for key, scores in split_scores.items():
split_scores[key] = numpy.array(scores, dtype=numpy.float64)
return split_scores["attack"], split_scores["bonafide"]
......@@ -5,16 +5,14 @@ from bob.measure.script import common_options
from bob.extension.scripts.click_helper import verbosity_option
import bob.bio.base.script.gen as bio_gen
import bob.measure.script.figure as measure_figure
from bob.bio.base.score import load
from . import pad_figure as figure
from .error_utils import negatives_per_pai_and_positives
from .error_utils import split_csv_pad, split_csv_pad_per_pai
from functools import partial
from csv import DictWriter
import numpy
import os
SCORE_FORMAT = (
"Files must be 4-col or 5-col format, see "
":py:func:`bob.bio.base_legacy.score.load.four_column` and"
":py:func:`bob.bio.base_legacy.score.load.five_column`."
)
SCORE_FORMAT = "Files must be in CSV format."
CRITERIA = (
"eer",
"min-hter",
......@@ -53,7 +51,7 @@ def metrics_option(
help="List of metrics to print. Provide a string with comma separated metric "
"names. For possible values see the default value.",
default="apcer_pais,apcer_ap,bpcer,acer,fta,fpr,fnr,hter,far,frr,precision,recall,f1_score,auc,auc-log-scale",
**kwargs
**kwargs,
):
"""The metrics option"""
......@@ -71,7 +69,7 @@ def metrics_option(
help=help,
show_default=True,
callback=callback,
**kwargs
**kwargs,
)(func)
return custom_metrics_option
......@@ -80,7 +78,7 @@ def metrics_option(
def regexps_option(
help="A list of regular expressions (by repeating this option) to be used to "
"categorize PAIs. Each regexp must match one type of PAI.",
**kwargs
**kwargs,
):
def custom_regexps_option(func):
def callback(ctx, param, value):
......@@ -94,7 +92,7 @@ def regexps_option(
multiple=True,
help=help,
callback=callback,
**kwargs
**kwargs,
)(func)
return custom_regexps_option
......@@ -102,7 +100,7 @@ def regexps_option(
def regexp_column_option(
help="The column in the score files to match the regular expressions against.",
**kwargs
**kwargs,
):
def custom_regexp_column_option(func):
def callback(ctx, param, value):
......@@ -112,35 +110,110 @@ def regexp_column_option(
return click.option(
"-rc",
"--regexp-column",
default="real_id",
type=click.Choice(("claimed_id", "real_id", "test_label")),
default="attack_type",
help=help,
show_default=True,
callback=callback,
**kwargs
**kwargs,
)(func)
return custom_regexp_column_option
def gen_pad_csv_scores(
filename, mean_match, mean_attacks, n_attack_types, n_clients, n_samples
):
"""Generates a CSV file containing random scores for PAD."""
columns = [
"claimed_id",
"test_label",
"is_bonafide",
"attack_type",
"sample_n",
"score",
]
with open(filename, "w") as f:
writer = DictWriter(f, fieldnames=columns)
writer.writeheader()
# Bonafide rows
for client_id in range(n_clients):
for sample in range(n_samples):
writer.writerow(
{
"claimed_id": client_id,
"test_label": f"client/real/{client_id:03d}",
"is_bonafide": "True",
"attack_type": None,
"sample_n": sample,
"score": numpy.random.normal(loc=mean_match),
}
)
# Attacks rows
for attack_type in range(n_attack_types):
for client_id in range(n_clients):
for sample in range(n_samples):
writer.writerow(
{
"claimed_id": client_id,
"test_label": f"client/attack/{client_id:03d}",
"is_bonafide": "False",
"attack_type": f"type_{attack_type}",
"sample_n": sample,
"score": numpy.random.normal(
loc=mean_attacks[attack_type % len(mean_attacks)]
),
}
)
@click.command()
@click.argument("outdir")
@click.option("-mm", "--mean-match", default=10, type=click.FLOAT, show_default=True)
@click.option(
"-mnm", "--mean-non-match", default=-10, type=click.FLOAT, show_default=True
"-ma",
"--mean-attacks",
default=[-10, -6],
type=click.FLOAT,
show_default=True,
multiple=True,
)
@click.option("-n", "--n-sys", default=1, type=click.INT, show_default=True)
@click.option("-c", "--n-clients", default=10, type=click.INT, show_default=True)
@click.option("-s", "--n-samples", default=2, type=click.INT, show_default=True)
@click.option("-a", "--n-attacks", default=2, type=click.INT, show_default=True)
@verbosity_option()
@click.pass_context
def gen(ctx, outdir, mean_match, mean_non_match, n_sys, **kwargs):
def gen(
ctx, outdir, mean_match, mean_attacks, n_clients, n_samples, n_attacks, **kwargs
):
"""Generate random scores.
Generates random scores in 4col or 5col format. The scores are generated
Generates random scores in CSV format. The scores are generated
using Gaussian distribution whose mean is an input
parameter. The generated scores can be used as hypothetical datasets.
Invokes :py:func:`bob.bio.base.script.commands.gen`.
n-attacks defines the number of different type of attacks generated (like print and
mask). When multiples attacks are present, the mean-attacks option can be set
multiple times, specifying the mean of each attack scores distribution.
Example:
bob pad gen results/generated/scores-dev.csv -a 3 -ma 2 -ma 5 -ma 7 -mm 8
"""
ctx.meta["five_col"] = False
ctx.forward(bio_gen.gen)
numpy.random.seed(0)
gen_pad_csv_scores(
os.path.join(outdir, "scores-dev.csv"),
mean_match,
mean_attacks,
n_attacks,
n_clients,
n_samples,
)
gen_pad_csv_scores(
os.path.join(outdir, "scores-eval.csv"),
mean_match,
mean_attacks,
n_attacks,
n_clients,
n_samples,
)
@common_options.metrics_command(
......@@ -174,7 +247,7 @@ See also ``bob pad multi-metrics``.
@metrics_option()
def metrics(ctx, scores, evaluation, regexps, regexp_column, metrics, **kwargs):
load_fn = partial(
negatives_per_pai_and_positives, regexps=regexps, regexp_column=regexp_column
split_csv_pad_per_pai, regexps=regexps, regexp_column=regexp_column
)
process = figure.Metrics(ctx, scores, evaluation, load_fn, metrics)
process.run()
......@@ -184,7 +257,7 @@ def metrics(ctx, scores, evaluation, regexps, regexp_column, metrics, **kwargs):
common_options.ROC_HELP.format(score_format=SCORE_FORMAT, command="bob pad roc")
)
def roc(ctx, scores, evaluation, **kwargs):
process = figure.Roc(ctx, scores, evaluation, load.split)
process = figure.Roc(ctx, scores, evaluation, split_csv_pad)
process.run()
......@@ -192,7 +265,7 @@ def roc(ctx, scores, evaluation, **kwargs):
common_options.DET_HELP.format(score_format=SCORE_FORMAT, command="bob pad det")
)
def det(ctx, scores, evaluation, **kwargs):
process = figure.Det(ctx, scores, evaluation, load.split)
process = figure.Det(ctx, scores, evaluation, split_csv_pad)
process.run()
......@@ -200,7 +273,7 @@ def det(ctx, scores, evaluation, **kwargs):
common_options.EPC_HELP.format(score_format=SCORE_FORMAT, command="bob pad epc")
)
def epc(ctx, scores, **kwargs):
process = measure_figure.Epc(ctx, scores, True, load.split, hter="ACER")
process = measure_figure.Epc(ctx, scores, True, split_csv_pad, hter="ACER")
process.run()
......@@ -208,7 +281,7 @@ def epc(ctx, scores, **kwargs):
common_options.HIST_HELP.format(score_format=SCORE_FORMAT, command="bob pad hist")
)
def hist(ctx, scores, evaluation, **kwargs):
process = figure.Hist(ctx, scores, evaluation, load.split)
process = figure.Hist(ctx, scores, evaluation, split_csv_pad)
process.run()
......@@ -250,7 +323,7 @@ def multi_metrics(
):
ctx.meta["min_arg"] = protocols_number * (2 if evaluation else 1)
load_fn = partial(
negatives_per_pai_and_positives, regexps=regexps, regexp_column=regexp_column
split_csv_pad_per_pai, regexps=regexps, regexp_column=regexp_column
)
process = figure.MultiMetrics(ctx, scores, evaluation, load_fn, metrics)
process.run()
......@@ -7,6 +7,8 @@ from bob.extension.scripts.click_helper import ConfigCommand
from bob.extension.scripts.click_helper import ResourceOption
from bob.extension.scripts.click_helper import verbosity_option
from bob.pipelines.distributed import dask_get_partition_size
from io import StringIO
import csv
@click.command(
......@@ -71,6 +73,14 @@ from bob.pipelines.distributed import dask_get_partition_size
help="Saves scores (and checkpoints) in this folder.",
cls=ResourceOption,
)
@click.option(
"--csv-scores/--lst-scores",
"write_metadata_scores",
default=True,
help="Choose the score file format as 'csv' with additional metadata or 'lst' 4 "
"columns. Default: --csv-scores",
cls=ResourceOption,
)
@click.option(
"--checkpoint",
"-c",
......@@ -108,6 +118,7 @@ def vanilla_pad(
dask_client,
groups,
output,
write_metadata_scores,
checkpoint,
dask_partition_size,
dask_n_workers,
......@@ -131,6 +142,10 @@ def vanilla_pad(
logger = logging.getLogger(__name__)
log_parameters(logger)
get_score_row = score_row_csv if write_metadata_scores else score_row_four_columns
output_file_ext = ".csv" if write_metadata_scores else ""
intermediate_file_ext = ".csv.gz" if write_metadata_scores else ".txt.gz"
os.makedirs(output, exist_ok=True)
if checkpoint:
......@@ -146,7 +161,7 @@ def vanilla_pad(
predict_samples[group] = database.predict_samples(group=group)
total_samples += len(predict_samples[group])
# Checking if the pipieline is dask-wrapped
# Checking if the pipeline is dask-wrapped
first_step = pipeline[0]
if not isinstance_nested(first_step, "estimator", DaskWrapper):
......@@ -182,13 +197,13 @@ def vanilla_pad(
logger.info(f"Running vanilla biometrics for group {group}")
result = getattr(pipeline, decision_function)(predict_samples[group])
scores_path = os.path.join(output, f"scores-{group}")
scores_path = os.path.join(output, f"scores-{group}{output_file_ext}")
if isinstance(result, dask.bag.core.Bag):
# write each partition into a zipped txt file
result = result.map(pad_predicted_sample_to_score_line)
prefix, postfix = f"{output}/scores/scores-{group}-", ".txt.gz"
# write each partition into a zipped txt file, one line per sample
result = result.map(get_score_row)
prefix, postfix = f"{output}/scores/scores-{group}-", intermediate_file_ext
pattern = f"{prefix}*{postfix}"
os.makedirs(os.path.dirname(prefix), exist_ok=True)
logger.info("Writing bag results into files ...")
......@@ -198,29 +213,52 @@ def vanilla_pad(
)
with open(scores_path, "w") as f:
csv_writer, header = None, None
# concatenate scores into one score file
for path in sorted(
glob(pattern),
key=lambda l: int(l.replace(prefix, "").replace(postfix, "")),
):
with gzip.open(path, "rt") as f2:
f.write(f2.read())
if write_metadata_scores:
if csv_writer is None:
# Retrieve the header from one of the _header fields
tmp_reader = csv.reader(f2)
# Reconstruct a list from the str representation
header = next(tmp_reader)[-1].strip("][").split(", ")
header = [s.strip("' ") for s in header]
csv_writer = csv.DictWriter(f, fieldnames=header)
csv_writer.writeheader()
f2.seek(0, 0)
# There is no header in the intermediary files, specify it
csv_reader = csv.DictReader(
f2, fieldnames=header + ["_header"]
)
for row in csv_reader:
# Write each element of the row, except `_header`
csv_writer.writerow(
{k: row[k] for k in row.keys() if k != "_header"}
)
else:
f.write(f2.read())
# delete intermediate score files
os.remove(path)
else:
with open(scores_path, "w") as f:
if write_metadata_scores:
csv.DictWriter(
f, fieldnames=_get_csv_columns(result[0]).keys()
).writeheader()
for sample in result:
f.write(pad_predicted_sample_to_score_line(sample, endl="\n"))
f.write(get_score_row(sample, endl="\n"))
def pad_predicted_sample_to_score_line(sample, endl=""):
def score_row_four_columns(sample, endl=""):
claimed_id, test_label, score = sample.subject, sample.key, sample.data
# # use the model_label field to indicate frame number
# model_label = None
# if hasattr(sample, "frame_id"):
# model_label = sample.frame_id
# model_label = getattr(sample, "frame_id", None)
real_id = claimed_id if sample.is_bonafide else sample.attack_type
......@@ -229,3 +267,58 @@ def pad_predicted_sample_to_score_line(sample, endl=""):
return f"{claimed_id} {real_id} {test_label} {score}{endl}"
# return f"{claimed_id} {model_label} {real_id} {test_label} {score}{endl}"
def _get_csv_columns(sample):
"""Returns a dict of {csv_column_name: sample_attr_name} given a sample."""
# Mandatory columns and their corresponding fields
columns_attr = {
"claimed_id": "subject",
"test_label": "key",
"is_bonafide": "is_bonafide",
"attack_type": "attack_type",
"score": "data",
}
# Preventing duplicates and unwanted data
ignored_fields = list(columns_attr.values()) + ["annotations"]
# Retrieving custom metadata attribute names
metadata_fields = [
k
for k in sample.__dict__.keys()
if not k.startswith("_") and k not in ignored_fields
]
for field in metadata_fields:
columns_attr[field] = field
return columns_attr
def score_row_csv(sample, endl=""):
"""Returns a str representing one row of a CSV for the sample.
If endl is empty, it is assumed that the row will be stored in a temporary file
without header, thus a `_header` column is added at the end, containing the header
as a list. This field can be used to reconstruct the final file.
"""
columns_fields = _get_csv_columns(sample)
string_stream = StringIO()
csv_writer = csv.DictWriter(
string_stream,
fieldnames=list(columns_fields.keys()) + (["_header"] if endl == "" else []),
)
row_values = {
col: getattr(sample, attr, None) for col, attr in columns_fields.items()
}
if row_values["score"] is None:
row_values["score"] = "nan"
# Add a `_header` field to store the current CSV header (used in the dask Bag case)
if endl == "":
row_values["_header"] = list(columns_fields.keys())
csv_writer.writerow(row_values)
out_string = string_stream.getvalue()
if endl == "":
return out_string.rstrip()
else:
return out_string
claimed_id,test_label,is_bonafide,attack_type,sample_n,score
0,client/real/000,True,,0,11.764052345967665
0,client/real/000,True,,1,10.400157208367224
1,client/real/001,True,,0,10.97873798410574
1,client/real/001,True,,1,12.240893199201459
2,client/real/002,True,,0,11.867557990149967
2,client/real/002,True,,1,9.02272212012359
3,client/real/003,True,,0,10.95008841752559
3,client/real/003,True,,1,9.848642791702302
4,client/real/004,True,,0,9.896781148206442
4,client/real/004,True,,1,10.410598501938372
5,client/real/005,True,,0,10.144043571160879
5,client/real/005,True,,1,11.454273506962975
6,client/real/006,True,,0,10.761037725146993
6,client/real/006,True,,1,10.121675016492828
7,client/real/007,True,,0,10.443863232745425
7,client/real/007,True,,1,10.333674327374267
8,client/real/008,True,,0,11.494079073157605
8,client/real/008,True,,1,9.794841736234199
9,client/real/009,True,,0,10.313067701650901
9,client/real/009,True,,1,9.145904260698275
0,client/attack/000,False,type_0,0,6.447010184165921
0,client/attack/000,False,type_0,1,9.65361859544036
1,client/attack/001,False,type_0,0,9.864436198859506
1,client/attack/001,False,type_0,1,8.257834979593557
2,client/attack/002,False,type_0,0,11.269754623987607
2,client/attack/002,False,type_0,1,7.5456343254012355
3,client/attack/003,False,type_0,0,9.045758517301445
3,client/attack/003,False,type_0,1,8.812816149974166
4,client/attack/004,False,type_0,0,10.532779214358458
4,client/attack/004,False,type_0,1,10.469358769900285
5,client/attack/005,False,type_0,0,9.154947425696916
5,client/attack/005,False,type_0,1,9.378162519602174
6,client/attack/006,False,type_0,0,8.112214252369887
6,client/attack/006,False,type_0,1,7.019203531776073
7,client/attack/007,False,type_0,0,8.652087850673848
7,client/attack/007,False,type_0,1,9.15634896910398
8,client/attack/008,False,type_0,0,10.230290680727721
8,client/attack/008,False,type_0,1,10.20237984878441
9,client/attack/009,False,type_0,0,8.612673182592047
9,client/attack/009,False,type_0,1,8.697697249424664
0,client/attack/000,False,type_1,0,4.951447034932907
0,client/attack/000,False,type_1,1,4.579982062821025
1,client/attack/001,False,type_1,0,4.293729809374987
1,client/attack/001,False,type_1,1,7.95077539523179
2,client/attack/002,False,type_1,0,5.490347818248346
2,client/attack/002,False,type_1,1,5.561925698388814
3,client/attack/003,False,type_1,0,4.747204639950073
3,client/attack/003,False,type_1,1,6.77749035583191
4,client/attack/004,False,type_1,0,4.3861021524420485
4,client/attack/004,False,type_1,1,5.787259719786031
5,client/attack/005,False,type_1,0,5.104533438806325
5,client/attack/005,False,type_1,1,6.386902497859262
6,client/attack/006,False,type_1,0,5.489194862431127
6,client/attack/006,False,type_1,1,4.8193678158775874
7,client/attack/007,False,type_1,0,5.971817771661345
7,client/attack/007,False,type_1,1,6.4283318705304175
8,client/attack/008,False,type_1,0,6.066517222383168
8,client/attack/008,False,type_1,1,6.302471897739782
9,client/attack/009,False,type_1,0,5.3656779063190365
9,client/attack/009,False,type_1,1,5.6372588340128615
claimed_id,test_label,is_bonafide,attack_type,sample_n,score
0,client/real/000,True,,0,9.32753955222405
0,client/real/000,True,,1,9.640446838459459
1,client/real/001,True,,0,9.186853717955547
1,client/real/001,True,,1,8.273717397668323
2,client/real/002,True,,0,10.177426142253752
2,client/real/002,True,,1,9.598219063791738
3,client/real/003,True,,0,8.369801653033955
3,client/real/003,True,,1,10.462782255525774
4,client/real/004,True,,0,9.092701635616757
4,client/real/004,True,,1,10.051945395796139
5,client/real/005,True,,0,10.729090562177538
5,client/real/005,True,,1,10.128982910757411
6,client/real/006,True,,0,11.1394006845433
6,client/real/006,True,,1,8.765174179646348
7,client/real/007,True,,0,10.402341641177548
7,client/real/007,True,,1,9.315189909059686
8,client/real/008,True,,0,9.129202850818118
8,client/real/008,True,,1,9.421150335235584
9,client/real/009,True,,0,9.688447467872628
9,client/real/009,True,,1,10.056165342229745
0,client/attack/000,False,type_0,0,7.834850159216644
0,client/attack/000,False,type_0,1,9.900826486954188
1,client/attack/001,False,type_0,0,9.46566243973046
1,client/attack/001,False,type_0,1,7.463756313722776
2,client/attack/002,False,type_0,0,10.4882521937956
2,client/attack/002,False,type_0,1,10.895889176030582
3,client/attack/003,False,type_0,0,10.178779571159652
3,client/attack/003,False,type_0,1,8.82007516418765
4,client/attack/004,False,type_0,0,7.929247378489458
4,client/attack/004,False,type_0,1,10.054451726931136
5,client/attack/005,False,type_0,0,8.59682305302682
5,client/attack/005,False,type_0,1,10.222445070382427
6,client/attack/006,False,type_0,0,9.20827497807686
6,client/attack/006,False,type_0,1,9.976639036483713
7,client/attack/007,False,type_0,0,9.356366397174401
7,client/attack/007,False,type_0,1,9.706573168191948
8,client/attack/008,False,type_0,0,9.010500020720821
8,client/attack/008,False,type_0,1,10.785870493905835
9,client/attack/009,False,type_0,0,9.12691209270362
9,client/attack/009,False,type_0,1,9.401989363444702
0,client/attack/000,False,type_1,0,7.883150697056254