Skip to content
Snippets Groups Projects

Making use of LightningDataModule and simplification of data loading

Merged Daniel CARRON requested to merge add-datamodule into main
Compare and Show latest version
2 files
+ 6
6
Compare changes
  • Side-by-side
  • Inline
Files
2
@@ -528,11 +528,11 @@ class ConcatDataModule(lightning.LightningDataModule):
self.parallel = parallel # immutable, otherwise would need to call
self.pin_memory = (
torch.cuda.is_available() or torch.backends.mps.is_available()
torch.cuda.is_available() or torch.backends.mps.is_available() # type: ignore
) # should only be true if GPU available and using it
# datasets that have been setup() for the current stage
self._datasets: CachingDataModule.DatasetDictionary = {}
self._datasets: ConcatDataModule.DatasetDictionary = {}
@property
def parallel(self) -> int:
@@ -584,7 +584,7 @@ class ConcatDataModule(lightning.LightningDataModule):
num_workers = value or multiprocessing.cpu_count()
self._dataloader_multiproc["num_workers"] = num_workers
if num_workers > 0 and sys.platform == "darwin":
if num_workers > 0: # and sys.platform == "darwin":
self._dataloader_multiproc[
"multiprocessing_context"
] = multiprocessing.get_context("spawn")
Loading