diff --git a/src/mednet/scripts/upload.py b/src/mednet/scripts/upload.py
index 615b81b25b08d9c8c07ae9e1dbaff1a9753f195e..1bd1add1c98b0bdc56e57dc559f073ff5c8ce030 100644
--- a/src/mednet/scripts/upload.py
+++ b/src/mednet/scripts/upload.py
@@ -125,9 +125,9 @@ def upload(
     )
 
     # get train files
+    train_log_file = experiment_folder / "trainlog.pdf"
     train_folder = experiment_folder / "model"
     train_meta_file = train_folder / "meta.json"
-    train_log_file = train_folder / "trainlog.pdf"
     train_model_file = get_checkpoint_to_run_inference(train_folder)
     train_files = [train_meta_file, train_model_file, train_log_file]
 
@@ -149,7 +149,6 @@ def upload(
             f"permitted maximum ({upload_limit_mb:.2f} MB)."
         )
 
-    # prepare experiment and run names
     with train_meta_file.open("r") as meta_file:
         train_data = json.load(meta_file)
 
@@ -157,6 +156,9 @@ def upload(
         evaluation_data = json.load(meta_file)
     evaluation_data = evaluation_data["test"]
 
+    # get lowest validation epoch
+    best_epoch = str(train_model_file).split('.')[0].split('=')[1]
+
     experiment_name = (
         experiment_name
         or f"{train_data['model-name']}-{train_data['database-name']}"
@@ -177,6 +179,18 @@ def upload(
 
         click.echo("Uploading metrics...")
 
+        for k in [
+            "epochs",
+            "batch-size",
+        ]:
+            click.secho(f"  -> `{k}` ({train_data[k]})")
+            mlflow.log_param(k, train_data[k])
+        
+        click.secho(f"  -> `#accumulations` ({train_data['batch-chunk-count']})")
+        mlflow.log_param("#Accumulations", train_data['batch-chunk-count'])
+        click.secho(f"  -> `epoch (best)` ({best_epoch})")
+        mlflow.log_param("Epoch (best)", best_epoch)
+
         for k in [
             "threshold",
             "precision",