From dbee52bf34f9993556c8e79c1fcee840a6d224c8 Mon Sep 17 00:00:00 2001 From: Gokhan Ozbulak <gokhan.ozbulak@idiap.ch> Date: Fri, 24 May 2024 11:46:33 +0200 Subject: [PATCH] Fixed help text for new usage. #25 --- src/mednet/scripts/train.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/mednet/scripts/train.py b/src/mednet/scripts/train.py index 78720362..efbdac88 100644 --- a/src/mednet/scripts/train.py +++ b/src/mednet/scripts/train.py @@ -83,15 +83,14 @@ def reusable_options(f): "-c", help="Number of chunks in every batch (this parameter affects " "memory requirements for the network). The number of samples " - "loaded for every iteration will be batch-size/batch-chunk-count. " - "batch-size needs to be divisible by batch-chunk-count, otherwise an " - "error will be raised. This parameter is used to reduce the number of " - "samples loaded in each iteration, in order to reduce the memory usage " - "in exchange for processing time (more iterations). This is especially " - "interesting when one is training on GPUs with limited RAM. The " - "default of 1 forces the whole batch to be processed at once. Otherwise " - "the batch is broken into batch-chunk-count pieces, and gradients are " - "accumulated to complete each batch.", + "loaded for every iteration will be batch-size*batch-chunk-count. " + "This parameter is used to reduce the number of samples loaded in each " + "iteration, in order to reduce the memory usage in exchange for " + "processing time (more iterations). This is especially interesting " + "when one is training on GPUs with limited RAM. The default of 1 forces " + "the whole batch to be processed at once. Otherwise the batch is " + "multiplied by batch-chunk-count pieces, and gradients are accumulated " + "to complete each batch.", required=True, show_default=True, default=1, -- GitLab