We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 99052bb commit 8600c49Copy full SHA for 8600c49
torchtune/training/_distributed.py
@@ -111,7 +111,7 @@ def set_torch_num_threads() -> None:
111
things like CPU affinity is set.
112
"""
113
num_threads = os.cpu_count() // (
114
- torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
+ torch.cuda.device_count() if torch.cuda.is_available() else 1
115
)
116
torch.set_num_threads(num_threads)
117
_log.info(f"Set intra op parallelism no. of threads to {num_threads}")
0 commit comments