Skip to content
Snippets Groups Projects
Commit b33248be authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

[qa] Update pre-commit and style

parent c79bafba
No related branches found
No related tags found
No related merge requests found
Pipeline #69837 canceled
Showing
with 34 additions and 84 deletions
......@@ -136,7 +136,6 @@ def make_dataset(
t_transforms += post_transforms
for key in subsets.keys():
retval[key] = make_subset(
subsets[key], transforms=transforms, suffixes=post_transforms
)
......@@ -187,10 +186,8 @@ def get_samples_weights(dataset):
if isinstance(dataset, torch.utils.data.ConcatDataset):
for ds in dataset.datasets:
# Weighting only for binary labels
if isinstance(ds._samples[0].label, int):
# Groundtruth
targets = []
for s in ds._samples:
......@@ -272,7 +269,6 @@ def get_positive_weights(dataset):
targets = []
if isinstance(dataset, torch.utils.data.ConcatDataset):
for ds in dataset.datasets:
for s in ds._samples:
targets.append(s.label)
......
......@@ -78,7 +78,6 @@ class ElasticDeformation:
def __call__(self, img):
if random.random() < self.p:
img = numpy.asarray(img)
assert img.ndim == 2
......
......@@ -292,7 +292,6 @@ def run(
fig.savefig(fullpath)
if f1_thresh is not None and eer_thresh is not None:
# get the closest possible threshold we have
index = int(round(steps * f1_thresh))
f1_a_priori = data_df["f1_score"][index]
......
......@@ -114,7 +114,6 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
leave=False,
disable=None,
):
names = samples[0]
images = samples[1].to(
device=device, non_blocking=torch.cuda.is_available()
......@@ -133,10 +132,8 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
topk = 1
for i in range(topk):
# Keep only "positive" signs
if probs[:, [i]] > 0.5:
# Grad-CAM
b = ids[:, [i]]
gcam.backward(ids=ids[:, [i]])
......@@ -145,7 +142,6 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
)
for j in range(len(images)):
current_cam = regions[j, 0].cpu().numpy()
current_cam[current_cam < 0.75] = 0.0
current_cam[current_cam >= 0.75] = 1.0
......@@ -158,14 +154,12 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
]
if len(cams_img) > 0:
# Convert original image tensor into PIL Image
original_image = transforms.ToPILImage(mode="RGB")(
images[0]
)
for sign_id, label_prob in cams_img.items():
label = label_prob[0]
# Create the colored overlay for current sign
......@@ -222,7 +216,6 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
fig.savefig(cam_filename)
with torch.no_grad():
start_time = time.perf_counter()
outputs = model(images)
probabilities = torch.sigmoid(outputs)
......
......@@ -225,7 +225,6 @@ def train_epoch(loader, model, optimizer, device, criterion, batch_chunk_count):
for idx, samples in enumerate(
tqdm(loader, desc="train", leave=False, disable=None)
):
images = samples[1].to(
device=device, non_blocking=torch.cuda.is_available()
)
......@@ -310,7 +309,6 @@ def validate_epoch(loader, model, device, criterion, pbar_desc):
samples_in_batch = []
with torch.no_grad(), torch_evaluation(model):
for samples in tqdm(loader, desc=pbar_desc, leave=False, disable=None):
images = samples[1].to(
device=device,
......@@ -599,7 +597,6 @@ def run(
leave=False,
disable=None,
):
with ResourceMonitor(
interval=monitoring_interval,
has_gpu=(device.type == "cuda"),
......
......@@ -46,7 +46,6 @@ def aggregpred(label_path, output_folder) -> None:
# loads all data
series = []
for predictions_path in label_path:
# Load predictions
logger.info(f"Loading predictions from {predictions_path}...")
pred_data = pandas.read_csv(predictions_path)
......
......@@ -62,7 +62,6 @@ def _load(data, threshold):
# loads all data
retval = {}
for name, predictions_path in data.items():
# Load predictions
logger.info(f"Loading predictions from {predictions_path}...")
pred_data = pandas.read_csv(predictions_path)
......
......@@ -96,7 +96,6 @@ def list(verbose) -> None:
entry_points_by_module[k][name] = ep
for config_type in sorted(entry_points_by_module):
# calculates the longest config name so we offset the printing
longest_name_length = max(
len(k) for k in entry_points_by_module[config_type].keys()
......
......@@ -150,7 +150,6 @@ def predict(
pdf.close()
for k, v in dataset.items():
if k.startswith("_"):
logger.info(f"Skipping dataset '{k}' (not to be evaluated)")
continue
......@@ -186,7 +185,6 @@ def predict(
all_mse = []
for f in range(nb_features):
v_original = copy.deepcopy(v)
# Randomly permute feature values from all samples
......
......@@ -44,7 +44,6 @@ def _load(data):
# loads all data
retval = {}
for name, predictions_path in data.items():
# Load predictions
logger.info(f"Loading predictions from {predictions_path}...")
pred_data = pandas.read_csv(predictions_path)
......@@ -110,7 +109,6 @@ def predtojson(label_path, output_folder) -> None:
logger.info("Saving JSON file...")
with open(output_file, "a+", newline="") as f:
f.write("{")
for i, (name, value) in enumerate(data.items()):
if i > 0:
......
......@@ -458,7 +458,6 @@ def train(
valid_loader = None
if validation_dataset is not None:
# Redefine a weighted valid criterion if possible
if (
isinstance(criterion_valid, torch.nn.BCEWithLogitsLoss)
......
......@@ -185,7 +185,6 @@ def train_analysis(
# now, do the analysis
with PdfPages(output_pdf) as pdf:
figure = _loss_evolution(data)
pdf.savefig(figure)
plt.close(figure)
......
......@@ -149,11 +149,9 @@ def precision_recall_f1iso(data):
linecycler = cycle(lines)
with _precision_recall_canvas(title=None) as (fig, axes):
legend = []
for name, value in data.items():
df = value["df"]
# plots Recall/Precision curve
......@@ -249,7 +247,6 @@ def roc_curve(data, title=None):
legend = []
for name, value in data.items():
df = value["df"]
# plots roc curve
......
......@@ -54,7 +54,6 @@ def run_nvidia_smi(query, rename=None):
memory information is transformed to gigabytes (floating-point).
"""
if _nvidia_smi is not None:
if rename is None:
rename = query
else:
......
......@@ -9,6 +9,40 @@ from functools import reduce
from torch.nn.modules.module import _addindent
# ignore this space!
def _repr(model):
# We treat the extra repr like the sub-module, one item per line
extra_lines = []
extra_repr = model.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split("\n")
child_lines = []
total_params = 0
for key, module in model._modules.items():
mod_str, num_params = _repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append("(" + key + "): " + mod_str)
total_params += num_params
lines = extra_lines + child_lines
for _, p in model._parameters.items():
if hasattr(p, "dtype"):
total_params += reduce(lambda x, y: x * y, p.shape)
main_str = model._get_name() + "("
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
main_str += f", {total_params:,} params"
return main_str, total_params
def summary(model):
"""Counts the number of parameters in each model layer.
......@@ -27,37 +61,4 @@ def summary(model):
nparam : int
number of parameters
"""
# ignore this space!
def _repr(model):
# We treat the extra repr like the sub-module, one item per line
extra_lines = []
extra_repr = model.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split("\n")
child_lines = []
total_params = 0
for key, module in model._modules.items():
mod_str, num_params = _repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append("(" + key + "): " + mod_str)
total_params += num_params
lines = extra_lines + child_lines
for _, p in model._parameters.items():
if hasattr(p, "dtype"):
total_params += reduce(lambda x, y: x * y, p.shape)
main_str = model._get_name() + "("
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
main_str += f", {total_params:,} params"
return main_str, total_params
return _repr(model)
......@@ -191,13 +191,11 @@ def test_train_pasa_montgomery(temporary_basedir, montgomery_datadir):
# Temporarily modifies Montgomery datadir if need be
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.scripts.train import train
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "results")
result = runner.invoke(
train,
......@@ -252,17 +250,14 @@ def test_train_pasa_montgomery(temporary_basedir, montgomery_datadir):
def test_predict_pasa_montgomery(
temporary_basedir, montgomery_datadir, datadir
):
# Temporarily modifies Montgomery datadir if need be
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.scripts.predict import predict
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "predictions")
result = runner.invoke(
predict,
......@@ -312,13 +307,11 @@ def test_predtojson(datadir, temporary_basedir, montgomery_datadir):
# Temporarily modify Montgomery datadir if need be
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.scripts.predtojson import predtojson
runner = CliRunner()
with stdout_logging() as buf:
predictions = str(datadir / "test_predictions.csv")
output_folder = str(temporary_basedir / "pred_to_json")
result = runner.invoke(
......@@ -357,13 +350,11 @@ def test_evaluate_pasa_montgomery(temporary_basedir, montgomery_datadir):
# Temporarily modify Montgomery datadir if need be
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.scripts.evaluate import evaluate
runner = CliRunner()
with stdout_logging() as buf:
prediction_folder = str(temporary_basedir / "predictions")
output_folder = str(temporary_basedir / "evaluations")
result = runner.invoke(
......@@ -411,13 +402,11 @@ def test_compare_pasa_montgomery(temporary_basedir, montgomery_datadir):
# Temporarily modify Montgomery datadir if need be
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.scripts.compare import compare
runner = CliRunner()
with stdout_logging() as buf:
predictions_folder = str(temporary_basedir / "predictions")
output_folder = str(temporary_basedir / "comparisons")
result = runner.invoke(
......@@ -461,7 +450,6 @@ def test_train_signstotb_montgomery_rs(temporary_basedir, datadir):
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "results")
result = runner.invoke(
train,
......@@ -514,7 +502,6 @@ def test_predict_signstotb_montgomery_rs(temporary_basedir, datadir):
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "predictions")
result = runner.invoke(
predict,
......@@ -563,7 +550,6 @@ def test_train_logreg_montgomery_rs(temporary_basedir, datadir):
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "results")
result = runner.invoke(
train,
......@@ -616,7 +602,6 @@ def test_predict_logreg_montgomery_rs(temporary_basedir, datadir):
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "predictions")
result = runner.invoke(
predict,
......@@ -657,13 +642,11 @@ def test_aggregpred(temporary_basedir, montgomery_datadir):
# Temporarily modify Montgomery datadir if need be
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.scripts.aggregpred import aggregpred
runner = CliRunner()
with stdout_logging() as buf:
predictions = str(
temporary_basedir / "predictions" / "train" / "predictions.csv"
)
......
......@@ -64,7 +64,6 @@ def test_get_samples_weights(montgomery_datadir):
# Temporarily modify Montgomery datadir
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.configs.datasets.montgomery.default import dataset
train_samples_weights = get_samples_weights(
......@@ -92,7 +91,6 @@ def test_get_samples_weights_concat(montgomery_datadir):
# Temporarily modify Montgomery datadir
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.configs.datasets.montgomery.default import dataset
train_dataset = ConcatDataset(
......@@ -133,7 +131,6 @@ def test_get_positive_weights(montgomery_datadir):
# Temporarily modify Montgomery datadir
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.configs.datasets.montgomery.default import dataset
train_positive_weights = get_positive_weights(
......@@ -211,7 +208,6 @@ def test_get_positive_weights_concat(montgomery_datadir):
# Temporarily modify Montgomery datadir
new_value = {"datadir.montgomery": str(montgomery_datadir)}
with rc_context(**new_value):
from ptbench.configs.datasets.montgomery.default import dataset
train_dataset = ConcatDataset(
......
......@@ -26,7 +26,6 @@ def test_random_permute():
nb_equal = 0.0
for k, s in enumerate(test_set._samples):
if original[k] == s.data["data"][2]:
nb_equal += 1
else:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment