Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
mednet
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
medai
software
mednet
Commits
2aa77950
Commit
2aa77950
authored
1 year ago
by
Gokhan OZBULAK
Committed by
André Anjos
1 year ago
Browse files
Options
Downloads
Patches
Plain Diff
File limit flag is added
#60
.
parent
82d66ea5
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
src/mednet/scripts/upload.py
+45
-19
45 additions, 19 deletions
src/mednet/scripts/upload.py
with
45 additions
and
19 deletions
src/mednet/scripts/upload.py
+
45
−
19
View file @
2aa77950
...
@@ -89,6 +89,13 @@ def _create_temp_copy(source, target):
...
@@ -89,6 +89,13 @@ def _create_temp_copy(source, target):
.. code:: sh
.. code:: sh
mednet upload --experiment-folder=/path/to/results --run-name=run-1
mednet upload --experiment-folder=/path/to/results --run-name=run-1
4. Upload an existing experiment result with defining a size limit of 20MB for each file (set 0 for no limit):
.. code:: sh
mednet upload --experiment-folder=/path/to/results --file-size=20
"""
,
"""
,
)
)
@click.option
(
@click.option
(
...
@@ -116,11 +123,22 @@ def _create_temp_copy(source, target):
...
@@ -116,11 +123,22 @@ def _create_temp_copy(source, target):
help
=
'
A string indicating the run name (e.g.
"
run-1
"
)
'
,
help
=
'
A string indicating the run name (e.g.
"
run-1
"
)
'
,
cls
=
ResourceOption
,
cls
=
ResourceOption
,
)
)
@click.option
(
"
--file-limit
"
,
"
-l
"
,
help
=
'
Limit file size to be uploaded in MB (set 0 for no limit).
'
,
show_default
=
True
,
required
=
True
,
default
=
10
,
type
=
click
.
IntRange
(
min
=
0
),
cls
=
ResourceOption
,
)
@verbosity_option
(
logger
=
logger
,
cls
=
ResourceOption
,
expose_value
=
False
)
@verbosity_option
(
logger
=
logger
,
cls
=
ResourceOption
,
expose_value
=
False
)
def
upload
(
def
upload
(
experiment_folder
:
pathlib
.
Path
,
experiment_folder
:
pathlib
.
Path
,
experiment_name
:
str
,
experiment_name
:
str
,
run_name
:
str
,
run_name
:
str
,
file_limit
:
int
,
**
_
,
# ignored
**
_
,
# ignored
)
->
None
:
# numpydoc ignore=PR01
)
->
None
:
# numpydoc ignore=PR01
"""
Upload results from an experiment folder.
"""
"""
Upload results from an experiment folder.
"""
...
@@ -146,7 +164,8 @@ def upload(
...
@@ -146,7 +164,8 @@ def upload(
train_model_file
,
train_model_temp_file
train_model_file
,
train_model_temp_file
)
)
with
train_meta_file
.
open
(
"
r
"
)
as
f
:
with
train_meta_file
.
open
(
"
r
"
)
as
f
:
meta_data
=
json
.
load
(
f
)
train_data
=
json
.
load
(
f
)
train_files
=
[
train_meta_file
,
train_log_file
,
train_model_temp_file
]
# prepare evaluation files
# prepare evaluation files
evaluation_file
=
experiment_folder
/
"
evaluation.json
"
evaluation_file
=
experiment_folder
/
"
evaluation.json
"
...
@@ -154,39 +173,46 @@ def upload(
...
@@ -154,39 +173,46 @@ def upload(
evaluation_log_file
=
experiment_folder
/
"
evaluation.pdf
"
evaluation_log_file
=
experiment_folder
/
"
evaluation.pdf
"
with
evaluation_file
.
open
(
"
r
"
)
as
f
:
with
evaluation_file
.
open
(
"
r
"
)
as
f
:
evaluation_data
=
json
.
load
(
f
)
evaluation_data
=
json
.
load
(
f
)
test_data
=
evaluation_data
[
"
test
"
]
evaluation_data
=
evaluation_data
[
"
test
"
]
evaluation_files
=
[
evaluation_file
,
evaluation_meta_file
,
evaluation_log_file
]
# check for file sizes.
for
f
in
train_files
+
evaluation_files
:
file_size
=
f
.
stat
().
st_size
/
(
1024
**
2
)
if
file_limit
!=
0
and
file_size
>
file_limit
:
raise
RuntimeError
(
f
"
Size of
{
f
}
(
{
file_size
:
.
2
f
}
MB) must be less than or equal to
{
file_limit
}
MB.
"
)
# prepare experiment and run names
# prepare experiment and run names
experiment_name
=
(
experiment_name
=
(
experiment_name
experiment_name
if
experiment_name
if
experiment_name
else
f
'
{
meta
_data
[
"
model-name
"
]
}
_
{
meta
_data
[
"
database-name
"
]
}
'
else
f
'
{
train
_data
[
"
model-name
"
]
}
_
{
train
_data
[
"
database-name
"
]
}
'
)
)
run_name
=
run_name
if
run_name
else
meta
_data
[
"
datetime
"
]
run_name
=
run_name
if
run_name
else
train
_data
[
"
datetime
"
]
logger
.
info
(
"
Setting experiment and run names on the MLFlow server...
"
)
logger
.
info
(
"
Setting experiment and run names on the MLFlow server...
"
)
mlflow
.
set_experiment
(
experiment_name
=
experiment_name
)
mlflow
.
set_experiment
(
experiment_name
=
experiment_name
)
with
mlflow
.
start_run
(
run_name
=
run_name
):
with
mlflow
.
start_run
(
run_name
=
run_name
):
# upload metrics
# upload metrics
logger
.
info
(
"
Uploading metrics to MLFlow server...
"
)
logger
.
info
(
"
Uploading metrics to MLFlow server...
"
)
mlflow
.
log_metric
(
"
threshold
"
,
test
_data
[
"
threshold
"
])
mlflow
.
log_metric
(
"
threshold
"
,
evaluation
_data
[
"
threshold
"
])
mlflow
.
log_metric
(
"
precision
"
,
test
_data
[
"
precision
"
])
mlflow
.
log_metric
(
"
precision
"
,
evaluation
_data
[
"
precision
"
])
mlflow
.
log_metric
(
"
recall
"
,
test
_data
[
"
recall
"
])
mlflow
.
log_metric
(
"
recall
"
,
evaluation
_data
[
"
recall
"
])
mlflow
.
log_metric
(
"
f1_score
"
,
test
_data
[
"
f1_score
"
])
mlflow
.
log_metric
(
"
f1_score
"
,
evaluation
_data
[
"
f1_score
"
])
mlflow
.
log_metric
(
mlflow
.
log_metric
(
"
average_precision_score
"
,
test
_data
[
"
average_precision_score
"
]
"
average_precision_score
"
,
evaluation
_data
[
"
average_precision_score
"
]
)
)
mlflow
.
log_metric
(
"
specificity
"
,
test
_data
[
"
specificity
"
])
mlflow
.
log_metric
(
"
specificity
"
,
evaluation
_data
[
"
specificity
"
])
mlflow
.
log_metric
(
"
auc_score
"
,
test
_data
[
"
auc_score
"
])
mlflow
.
log_metric
(
"
auc_score
"
,
evaluation
_data
[
"
auc_score
"
])
mlflow
.
log_metric
(
"
accuracy
"
,
test
_data
[
"
accuracy
"
])
mlflow
.
log_metric
(
"
accuracy
"
,
evaluation
_data
[
"
accuracy
"
])
mlflow
.
log_param
(
"
version
"
,
meta
_data
[
"
package-version
"
])
mlflow
.
log_param
(
"
version
"
,
train
_data
[
"
package-version
"
])
# upload artifacts
# upload artifacts
logger
.
info
(
"
Uploading artifacts to MLFlow server...
"
)
logger
.
info
(
"
Uploading artifacts to MLFlow server...
"
)
mlflow
.
log_artifact
(
train_meta_file
)
for
f
in
train_files
:
mlflow
.
log_artifact
(
train_log_file
)
mlflow
.
log_artifact
(
f
)
mlflow
.
log_artifact
(
train_model_temp_file
)
for
f
in
evaluation_files
:
mlflow
.
log_artifact
(
evaluation_file
)
mlflow
.
log_artifact
(
f
)
mlflow
.
log_artifact
(
evaluation_meta_file
)
mlflow
.
log_artifact
(
evaluation_log_file
)
# delete temporary file as no need it after logging.
# delete temporary file as no need it after logging.
train_model_temp_file
.
unlink
()
train_model_temp_file
.
unlink
()
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment