forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdistributed_c10d.py
4315 lines (3675 loc) · 168 KB
/
distributed_c10d.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import itertools
import collections.abc
import contextlib
import hashlib
import io
import logging
import os
import pickle
import time
import warnings
from collections import namedtuple
from datetime import timedelta
from typing import Any, Callable, Dict, Optional, Tuple, Union, List
import torch
from torch._C._distributed_c10d import (
AllreduceCoalescedOptions,
AllreduceOptions,
AllToAllOptions,
_DistributedBackendOptions,
BarrierOptions,
BroadcastOptions,
GatherOptions,
PrefixStore,
ProcessGroup,
ReduceOp,
ReduceOptions,
ReduceScatterOptions,
ScatterOptions,
Store,
DebugLevel,
get_debug_level,
Work
)
from .constants import default_pg_timeout
from .c10d_logger import _exception_logger, _time_logger
from .rendezvous import register_rendezvous_handler, rendezvous # noqa: F401
DistStoreError = torch._C._DistStoreError
__all__ = [
'Backend', 'BackendConfig', 'GroupMember', 'P2POp', 'all_gather', 'all_gather_coalesced',
'all_gather_multigpu', 'all_gather_object', 'all_reduce',
'all_reduce_coalesced', 'all_reduce_multigpu', 'all_to_all',
'all_to_all_single', 'barrier', 'batch_isend_irecv', 'broadcast',
'broadcast_multigpu', 'broadcast_object_list', 'destroy_process_group',
'gather', 'gather_object', 'get_backend_config', 'get_backend', 'get_rank',
'get_world_size', 'group', 'init_process_group', 'irecv',
'is_gloo_available', 'is_initialized', 'is_mpi_available', 'is_backend_available',
'is_nccl_available', 'is_torchelastic_launched', 'is_ucc_available',
'isend', 'monitored_barrier', 'new_group', 'new_subgroups',
'new_subgroups_by_enumeration', 'recv', 'reduce', 'reduce_multigpu',
'reduce_scatter', 'reduce_scatter_multigpu', 'scatter',
'scatter_object_list', 'send', 'supports_complex',
'AllreduceCoalescedOptions', 'AllreduceOptions', 'AllToAllOptions',
'BarrierOptions', 'BroadcastOptions', 'GatherOptions', 'PrefixStore',
'ProcessGroup', 'ReduceOp', 'ReduceOptions', 'ReduceScatterOptions',
'ScatterOptions', 'Store', 'DebugLevel', 'get_debug_level', 'Work',
'default_pg_timeout', 'get_group_rank', 'get_global_rank', 'get_process_group_ranks',
'reduce_op', 'all_gather_into_tensor', 'reduce_scatter_tensor',
]
_MPI_AVAILABLE = True
_NCCL_AVAILABLE = True
_GLOO_AVAILABLE = True
_UCC_AVAILABLE = True
_pickler = pickle.Pickler
_unpickler = pickle.Unpickler
# Change __module__ of all imported types from torch._C._distributed_c10d that are public
def _export_c_types():
_public_types_to_change_module = [
AllreduceCoalescedOptions,
AllreduceOptions,
AllToAllOptions,
BarrierOptions,
BroadcastOptions,
GatherOptions,
PrefixStore,
ProcessGroup,
ReduceOp,
ReduceOptions,
ReduceScatterOptions,
ScatterOptions,
Store,
DebugLevel,
get_debug_level,
Work
]
for type in _public_types_to_change_module:
type.__module__ = "torch.distributed.distributed_c10d"
_export_c_types()
try:
from torch._C._distributed_c10d import ProcessGroupMPI
ProcessGroupMPI.__module__ = "torch.distributed.distributed_c10d"
__all__ += ["ProcessGroupMPI"]
except ImportError:
_MPI_AVAILABLE = False
try:
from torch._C._distributed_c10d import ProcessGroupNCCL
ProcessGroupNCCL.__module__ = "torch.distributed.distributed_c10d"
__all__ += ["ProcessGroupNCCL"]
except ImportError:
_NCCL_AVAILABLE = False
try:
from torch._C._distributed_c10d import ProcessGroupGloo
from torch._C._distributed_c10d import _ProcessGroupWrapper
ProcessGroupGloo.__module__ = "torch.distributed.distributed_c10d"
__all__ += ["ProcessGroupGloo"]
except ImportError:
_GLOO_AVAILABLE = False
try:
from torch._C._distributed_c10d import ProcessGroupUCC
ProcessGroupUCC.__module__ = "torch.distributed.distributed_c10d"
__all__ += ["ProcessGroupUCC"]
except ImportError:
_UCC_AVAILABLE = False
logger = logging.getLogger(__name__)
PG_WRAPPER_STORE_PREFIX = "pg_wrapper"
# Some reduce ops are not supported by complex numbers and will result in an error.
# We currently provide complex support to the distributed API by viewing
# complex tensors as real (torch.view_as_real), meaning that calling
# these unsupported ops will return garbage values rather than error out.
# (e.g. max(2+3i, 3+2i) = 3+3i)
# We'd like calls to unsupported ops to error out accordingly,
# rather than returning garbage values.
def supports_complex(reduceOp: ReduceOp) -> bool:
denyList = [
ReduceOp.MAX,
ReduceOp.MIN,
ReduceOp.PRODUCT,
ReduceOp.BAND,
ReduceOp.BOR,
ReduceOp.BXOR,
]
return reduceOp not in denyList
class Backend:
"""
An enum-like class of available backends: GLOO, NCCL, UCC, MPI, and other registered
backends.
The values of this class are lowercase strings, e.g., ``"gloo"``. They can
be accessed as attributes, e.g., ``Backend.NCCL``.
This class can be directly called to parse the string, e.g.,
``Backend(backend_str)`` will check if ``backend_str`` is valid, and
return the parsed lowercase string if so. It also accepts uppercase strings,
e.g., ``Backend("GLOO")`` returns ``"gloo"``.
.. note:: The entry ``Backend.UNDEFINED`` is present but only used as
initial value of some fields. Users should neither use it directly
nor assume its existence.
"""
UNDEFINED = "undefined"
GLOO = "gloo"
NCCL = "nccl"
UCC = "ucc"
MPI = "mpi"
_BackendPlugin = namedtuple("_BackendPlugin", ["creator_fn", "extended_api"])
_plugins: Dict[str, _BackendPlugin] = {}
backend_list = [UNDEFINED, GLOO, NCCL, UCC, MPI]
default_device_backend_map: Dict[str, str] = {
'cpu' : GLOO,
'cuda' : NCCL,
}
backend_capability: Dict[str, List[str]] = {
GLOO : ["cpu", "cuda"],
NCCL : ["cuda"],
UCC : ["cpu", "cuda"],
MPI : ["cpu", "cuda"],
}
backend_type_map: Dict[str, ProcessGroup.BackendType] = {
UNDEFINED: ProcessGroup.BackendType.UNDEFINED,
GLOO : ProcessGroup.BackendType.GLOO,
NCCL: ProcessGroup.BackendType.NCCL,
UCC: ProcessGroup.BackendType.UCC,
}
def __new__(cls, name: str):
if not isinstance(name, str):
raise ValueError(f"Backend name must be a string, but got: {name}")
value = getattr(Backend, name.upper(), Backend.UNDEFINED)
if value == Backend.UNDEFINED:
value = name.lower()
return value
@classmethod
def register_backend(cls, name, func, extended_api=False, devices: Optional[Union[str, List[str]]] = None):
"""
Registers a new backend with the given name and instantiating function.
This class method is used by 3rd party ``ProcessGroup`` extension to
register new backends.
Args:
name (str): Backend name of the ``ProcessGroup`` extension. It
should match the one in ``init_process_group()``.
func (function): Function handler that instantiates the backend.
The function should be implemented in the backend
extension and takes four arguments, including
``store``, ``rank``, ``world_size``, and ``timeout``.
extended_api (bool, optional): Whether the backend supports extended argument structure.
Default: ``False``. If set to ``True``, the backend
will get an instance of ``c10d::DistributedBackendOptions``, and
a process group options object as defined by the backend implementation.
device (str or list of str, optional): device type this backend
supports, e.g. "cpu", "cuda", etc. If `None`,
assuming both "cpu" and "cuda"
.. note:: This support of 3rd party backend is experimental and subject to change.
"""
# Allow UCC plugin if Pytorch is not built with native support.
# TODO: remove this exception once UCC plugin is fully deprecated.
if (name != Backend.UCC or (name == Backend.UCC and is_ucc_available())):
assert not hasattr(Backend, name.upper()), (
f"{name.upper()} c10d backend already exist"
)
assert name.upper() not in Backend._plugins, (
f"{name.upper()} c10d backend creator function already exist"
)
setattr(Backend, name.upper(), name.lower())
Backend.backend_list.append(name.lower())
if devices is not None:
for device in devices:
if device != 'cpu' and device != 'cuda':
Backend.default_device_backend_map[device] = name.lower()
Backend.backend_type_map[name.lower()] = ProcessGroup.BackendType.CUSTOM
# Update device capability matrix in Backend class
if devices is None:
# This is more of a backward support for groups like `threaded`:
# assume default devices "cpu" and "cuda", but warn
warnings.warn(
f"Device capability of {name} unspecified, assuming `cpu` and "
"`cuda`. Please specify it via the `devices` argument of "
"`register_backend`."
)
Backend.backend_capability[name.lower()] = ["cpu", "cuda"]
elif isinstance(devices, str):
# Single device string specified. Simply convert to list.
Backend.backend_capability[name.lower()] = [devices]
else:
Backend.backend_capability[name.lower()] = devices
Backend._plugins[name.upper()] = Backend._BackendPlugin(func, extended_api)
class BackendConfig:
def __init__(self, backend: Union[str, Backend]):
self.device_backend_map: Dict[torch.device, Backend] = {}
if backend == Backend.UNDEFINED:
# default config when backend is not specified
# supported since PyTorch 2.0
for device in Backend.default_device_backend_map:
if is_backend_available(Backend.default_device_backend_map[device]):
self.device_backend_map[device] = Backend.default_device_backend_map[device]
elif backend.lower() in Backend.backend_list:
# Cases for when backend is a single string (without device types)
# e.g. "nccl", "gloo", "ucc", "mpi"
supported_devices = Backend.backend_capability[backend.lower()]
backend_val = Backend(backend)
self.device_backend_map = {
device : backend_val for device in supported_devices
}
elif ":" in backend.lower():
# Backend specified in "device:backend" format
# make sure the backend string is in the correct format
# "{device_type1}:{backend1},{device_type2}:{backend2}"
# e.g. "cpu:gloo,cuda:nccl"
backend_str_error_message = f"""The custom backend string argument is invalid: {backend}.
Custom backend string is an experimental feature where the backend string must be in the format:
"<device_type1>:<backend1>,<device_type2>:<backend2>...". e.g. 'cpu:gloo,cuda:nccl'"""
# parse the backend string and populate the device_backend_map
for device_backend_pair_str in backend.lower().split(","):
device_backend_pair = device_backend_pair_str.split(":")
if len(device_backend_pair) != 2:
raise ValueError(f"Invalid device:backend pairing: \
{device_backend_pair_str}. {backend_str_error_message}")
device, backend = device_backend_pair
if device in self.device_backend_map:
raise ValueError(f"Duplicate device type {device} \
in backend string: {backend}. {backend_str_error_message}")
self.device_backend_map[device] = Backend(backend)
else:
# User specified a single backend name whose device capability is
# unknown, assuming it can support the default devices of PyTorch
# (cpu and cuda)
warnings.warn(
f"Device capability of {backend} unknown, assuming `cpu` and "
"`cuda`. You can specify it in `device:backend` format in "
"`init_process_group` call."
)
backend_val = Backend(backend)
self.device_backend_map = {
"cpu" : backend_val,
"cuda" : backend_val,
"xpu" : backend_val,
}
logger.info(
f"Using backend config: {self.device_backend_map}" # noqa: G004
)
def __repr__(self):
# string with all the device:backend pairs separated by commas
return ",".join(f"{device}:{backend}" for device, backend in self.device_backend_map.items())
def get_device_backend_map(self):
return self.device_backend_map
class _reduce_op:
r"""
Deprecated enum-like class for reduction operations: ``SUM``, ``PRODUCT``,
``MIN``, and ``MAX``.
:class:`~torch.distributed.ReduceOp` is recommended to use instead.
"""
def __init__(self):
# __members__ is a dict storing key-value pairs for enum classes
for k, v in ReduceOp.RedOpType.__members__.items():
setattr(self, k, v)
self.__members__ = ReduceOp.RedOpType.__members__
def __getattribute__(self, key):
warnings.warn(
"torch.distributed.reduce_op is deprecated, please use "
"torch.distributed.ReduceOp instead"
)
return object.__getattribute__(self, key)
reduce_op = _reduce_op()
class P2POp:
"""
A class to build point-to-point operations for ``batch_isend_irecv``.
This class builds the type of P2P operation, communication buffer, peer rank,
Process Group, and tag. Instances of this class will be passed to
``batch_isend_irecv`` for point-to-point communications.
Args:
op (Callable): A function to send data to or receive data from a peer process.
The type of ``op`` is either ``torch.distributed.isend`` or
``torch.distributed.irecv``.
tensor (Tensor): Tensor to send or receive.
peer (int): Destination or source rank.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
tag (int, optional): Tag to match send with recv.
"""
def __init__(self, op: Callable, tensor: torch.Tensor, peer: int,
group: Optional[ProcessGroup] = None, tag: int = 0):
self.op = op
self.tensor = tensor
self.peer = peer
self.group = group
self.tag = tag
def __new__(cls, op: Callable, tensor: torch.Tensor, peer: int,
group: Optional[ProcessGroup] = None, tag: int = 0):
_check_op(op)
_check_single_tensor(tensor, "tensor")
return object.__new__(cls)
class _CollOp:
"""
A class to capture collective operations.
Args:
op (Callable): A collective function, e.g. ``torch.distributed.all_reduce``.
tensor (Tensor): Tensor to operate on.
dst_tensor (Tensor, optional): Provided when source and destinaton tensors are not the same.
redop (ReduceOp, optional): reduce operation.
root (int, optional): root of broadcast or reduce.
"""
def __init__(self, op: Callable, tensor: torch.Tensor, dst_tensor: Optional[torch.Tensor] = None,
redop: Optional[ReduceOp] = None, root: Optional[int] = None):
self.op = op
self.tensor = tensor
self.dst_tensor = dst_tensor
self.redop = redop
self.root = root
# DO NOT USE THESE FIELDS DIRECTLY.
# Use them through the _world object to make sure the _world override mechanism
_pg_map: Dict[ProcessGroup, Tuple[str, Optional[Store]]] = {}
_pg_names: Dict[ProcessGroup, str] = {}
_pg_group_ranks: Dict[ProcessGroup, Dict[int, int]] = {}
# For a pg, it is a map from ProcessGroup to BackendConfig
_pg_backend_config: Dict[ProcessGroup, str] = {}
_group_count = 0
_tags_to_pg: Dict[str, List[ProcessGroup]] = {}
_pg_to_tag: Dict[ProcessGroup, str] = {}
class _World:
"""
Container class for c10d process group state.
This is used during registration and lookup of PG state.
.. warning:: This is an experimental API intended to expose the inner workings
of c10d and is subject to change..
"""
def __init__(self):
self._default_pg = None
self._pg_coalesce_state: Dict[ProcessGroup, List[Union[_CollOp, P2POp]]] = {}
self._pg_default_device: Dict[ProcessGroup, torch.device] = {}
@property
def default_pg(self):
"""
The default ProcessGroup includes all ranks of the cluster.
This is used by c10d APIs when a ProcessGroup is needed but None is provided.
"""
return self._default_pg
@default_pg.setter
def default_pg(self, value):
self._default_pg = value
@property
def pg_map(self) -> Dict[ProcessGroup, Tuple[str, Optional[Store]]]:
"""
Cached process groups
For NCCL and GLOO pg, it is a map from ProcessGroup to (Backend, Store)
For MPI pg, it is a map from ProcessGroup to (Backend, None)
TODO don't expose the map, expose fine grained ops
"""
global _pg_map
return _pg_map
@property
def pg_names(self) -> Dict[ProcessGroup, str]:
"""
Process group's names, map from ProcessGroup to str.
TODO don't expose the map, expose fine grained ops
"""
global _pg_names
return _pg_names
@property
def pg_group_ranks(self) -> Dict[ProcessGroup, Dict[int, int]]:
"""
Process group's global rank to local rank mapping
TODO don't expose the map, expose fine grained ops
"""
global _pg_group_ranks
return _pg_group_ranks
@property
def pg_backend_config(self) -> Dict[ProcessGroup, str]:
"""
Process group's backend config
TODO don't expose the map, expose fine grained ops
"""
global _pg_backend_config
return _pg_backend_config
@property
def group_count(self) -> int:
"""
Process group count for default naming.
TODO don't expose group_count, use something else instead
"""
global _group_count
return _group_count
@group_count.setter
def group_count(self, value):
"""
Count is used when computing the name of ProcessGroups when using global synchronization.
"""
global _group_count
_group_count = value
@property
def tags_to_pg(self) -> Dict[str, List[ProcessGroup]]:
global _tags_to_pg
return _tags_to_pg
@property
def pg_to_tag(self) -> Dict[ProcessGroup, str]:
global _pg_to_tag
return _pg_to_tag
@property
def pg_coalesce_state(self) -> Dict[ProcessGroup, List[Union[_CollOp, P2POp]]]:
return self._pg_coalesce_state
@property
def pg_default_device(self) -> Dict[ProcessGroup, torch.device]:
return self._pg_default_device
@property
def pg_config_info(self) -> List[Dict[str, Union[int, str]]]:
"""
Returns a list of dict with process groups and backends with their unique IDs
and configurations (types and ranks).
"""
config_info = []
for pg, backend in self.pg_map.items():
# backend is a tuple with the first element being the backend type ("nccl", etc.)
backend_type = Backend.backend_type_map[backend[0]]
config_info.append(
{
"pg_id": pg._id(),
"backend_id": pg._backend_id(backend_type),
"backend_config": self.pg_backend_config[pg],
"ranks": self.pg_group_ranks[pg],
}
)
return config_info
_world = _World()
"""Holds the singleton instance of ``_World`` used by c10. Experimental extension point to override it"""
class _WorldMeta(type):
"""
Meta class of ``group`` and ``GroupMember`` so they
can have the class property ``WORLD``.
"""
# Points to the default PG once initialized.
@property
def WORLD(cls) -> Optional[ProcessGroup]:
return _world.default_pg
@WORLD.setter
def WORLD(cls, pg: Optional[ProcessGroup]):
_world.default_pg = pg
class group(metaclass=_WorldMeta):
pass
class GroupMember(metaclass=_WorldMeta):
NON_GROUP_MEMBER = -100
# Default process group state
_default_pg_init_method = None
STORE_BASED_BARRIER_PREFIX = "store_based_barrier_key"
def _get_pg_default_device(group: Optional[ProcessGroup] = None):
"""
Returns the device to use with ``group`` for control flow usage (object collectives, barrier).
There are selection rules:
1. If user specifies exactly one backend in ``init_process_group`` call:
use that backend
2. Else if user specifies multiple "device:backend" pairs in init_process_group:
If "cpu" is among those pairs, use "cpu" (because the object is in cpu memory);
Otherwise, use the first backend (sort of a random pick).
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
torch.device: The device to use with ``group``.
"""
group = group or _get_default_group()
if group in _world.pg_default_device:
# Previously searched and cached; just return
return _world.pg_default_device[group]
if not isinstance(group, ProcessGroup):
# Provide backward compatibility to cases where `group` passed in is
# actually a Backend (like `ProcessGroupGloo`) rather than a
# `ProcessGroup` in PT 2.0 sense
warnings.warn(
f"You are using a Backend {type(group)} as a ProcessGroup. "
"This usage is deprecated since PyTorch 2.0. Please use a public API "
"of PyTorch Distributed instead."
)
# Most users create Gloo with private API for object collectives
_world.pg_default_device[group] = torch.device("cpu")
return _world.pg_default_device[group]
"""
``group._device_types`` is a property pybind that returns the devices
("cpu", "cuda", etc) supported by ``group``. Can be multiple if the
``group`` supports multiple devices.
"""
devices = group._device_types
if len(devices) == 1:
# User fixed exactly one backend in `init_process_group`
_world.pg_default_device[group] = devices[0]
elif len(devices) == 0:
# No backend has been registered with this PG (maybe because no
# collective has been run?) We pick cpu as the default and hopefully
# this would lazily init Gloo or other available cpu backend.
_world.pg_default_device[group] = torch.device("cpu")
elif torch.device("cpu") in devices:
# There are multiple backends in this PG and cpu is among them.
# cpu is preferred as the object is in cpu memory. No need for device
# copy.
_world.pg_default_device[group] = torch.device("cpu")
else:
# No cpu in the backend list. Randomly pick the first backend
_world.pg_default_device[group] = devices[0]
logger.info(
f"Using device {_world.pg_default_device[group]} for object " # noqa: G004
"collectives."
)
return _world.pg_default_device[group]
@_time_logger
def _store_based_barrier(rank, store, group_name, rendezvous_count, timeout, logging_interval=timedelta(seconds=10)):
"""
Barrier based on store which is used for synchronizing processes after
``init_process_group`` or ``new_group``. Intended to be used only with
those two methods and is not a generic alternative to ``barrier()``.
"""
store_key = f"{STORE_BASED_BARRIER_PREFIX}:{group_name}"
store.add(store_key, 1)
logger.info("Added key: %s to store for rank: %s", store_key, rank)
# Now wait for all workers to check in with the store.
world_size = rendezvous_count
worker_count = store.add(store_key, 0)
last_worker_key = f"{store_key}:last_worker"
if worker_count == world_size:
store.set(last_worker_key, "1")
# adjust the timeout to be at least 10secs + 1sec per thousand ranks to reduce the odds of timeout
# this value was empirically found while scale testing.
logging_interval = max(logging_interval, timedelta(seconds=10 + world_size / 1000))
start = time.time()
while True:
try:
# This will throw an exception after the logging_interval in which we print out
# the status of the group or time out officially, throwing runtime error
store.wait([last_worker_key], logging_interval)
break
except RuntimeError as e:
worker_count = store.add(store_key, 0)
# Print status periodically to keep track.
logger.info(
"Waiting in store based barrier to initialize process group for "
"rank: %s, key: %s (world_size=%s, num_workers_joined=%s, timeout=%s)",
rank, store_key, world_size, worker_count, timeout
)
if timedelta(seconds=(time.time() - start)) > timeout:
raise DistStoreError(
"Timed out initializing process group in store based barrier on "
"rank {}, for key: {} (world_size={}, num_workers_joined={}, timeout={})".format(
rank, store_key, world_size, worker_count, timeout
)
)
logger.info(
"Rank %s: Completed store-based barrier for key:%s with %s nodes.", rank, store_key, world_size
)
def _rank_not_in_group(group: ProcessGroup):
"""
Helper that checks if the current process's rank is not in a given group.
"""
if group is None:
return False
return group == GroupMember.NON_GROUP_MEMBER
def _warn_not_in_group(op_name):
global_rank = -1 if GroupMember.WORLD is None else GroupMember.WORLD.rank()
warnings.warn(
f"Running {op_name} on global rank {global_rank} which does not "
"belong to the given group."
)
def get_group_rank(group: ProcessGroup, global_rank: int) -> int:
"""
Translate a global rank into a group rank.
``global_rank`` must be part of ``group`` otherwise this raises RuntimeError.
Args:
group (ProcessGroup): ProcessGroup to find the relative rank.
global_rank (int): Global rank to query.
Returns:
Group rank of ``global_rank`` relative to ``group``
N.B. calling this function on the default process group returns identity
"""
if group is GroupMember.WORLD:
return global_rank
if group not in _world.pg_group_ranks:
raise ValueError(f"Group {group} is not registered, please create group with torch.distributed.new_group API")
group_ranks = _world.pg_group_ranks[group]
if global_rank not in group_ranks:
raise ValueError(f"Global rank {global_rank} is not part of group {group}")
return group_ranks[global_rank]
def get_global_rank(group: ProcessGroup, group_rank: int) -> int:
"""
Translate a group rank into a global rank.
``group_rank`` must be part of `group` otherwise this raises RuntimeError.
Args:
group (ProcessGroup): ProcessGroup to find the global rank from.
group_rank (int): Group rank to query.
Returns:
Global rank of ``group_rank`` relative to ``group``
N.B. calling this function on the default process group returns identity
"""
if group is GroupMember.WORLD:
return group_rank
if group not in _world.pg_group_ranks:
raise ValueError(f"Group {group} is not registered, please create group with torch.distributed.new_group API")
for rank, grp_rank in _world.pg_group_ranks[group].items():
if grp_rank == group_rank:
return rank
raise ValueError(f"Group rank {group_rank} is not part of group {group}")
# TODO: remove this once the ecosystem moves away from it.
def _get_global_rank(group, rank):
"""
This method is deprecated, please use get_global_rank.
"""
warnings.warn(
"torch.distributed.distributed_c10d._get_global_rank is deprecated "
"please use torch.distributed.distributed_c10d.get_global_rank instead"
)
return get_global_rank(group, rank)
def get_process_group_ranks(group: ProcessGroup):
"""
Get all ranks associated with ``group``.
Args:
group (ProcessGroup): ProcessGroup to get all ranks from.
Returns:
List of global ranks ordered by group rank.
"""
return list(_world.pg_group_ranks[group].keys())
def _get_group_size(group):
"""
Helper that gets a given group's world size.
"""
if group is GroupMember.WORLD or group is None:
default_pg = _get_default_group()
return default_pg.size()
return group.size()
def _check_single_tensor(param, param_name):
"""
Helper to check that the parameter ``param_name`` is a single tensor.
"""
if not isinstance(param, torch.Tensor):
raise TypeError(
f"Invalid function argument. Expected parameter `{param_name}` to be of type torch.Tensor."
)
def _check_tensor_list(param, param_name):
"""
Helper to check that the parameter ``param_name`` is a list of tensors.
"""
if not isinstance(param, list) or not all(
isinstance(p, torch.Tensor) for p in param
):
raise TypeError(
f"Invalid function argument. Expected parameter `{param_name}` to be of type List[torch.Tensor]."
)
def _as_iterable(obj) -> collections.abc.Iterable:
return obj if isinstance(obj, list) else (obj,)
def _ensure_all_tensors_same_dtype(*tensors) -> None:
last_dtype = None
for tensor in itertools.chain(*map(_as_iterable, tensors)):
tensor_dtype = tensor.dtype
# Mixing complex and its element type is allowed
if tensor_dtype.is_complex:
tensor_dtype = torch.float32 if tensor_dtype == torch.complex64 else torch.complex128
if last_dtype is None:
last_dtype = tensor_dtype
else:
if last_dtype != tensor_dtype:
raise ValueError(
"Invalid usage of tensors with different dtypes"
f"Found {last_dtype} and {tensor.dtype}"
)
def _check_op(op):
"""
Helper to check that the ``op`` is either isend or irecv.
"""
if op not in [isend, irecv]:
raise ValueError(
"Invalid ``op``. Expected ``op`` "
"to be of type ``torch.distributed.isend`` or "
"``torch.distributed.irecv``."
)
def _check_p2p_op_list(p2p_op_list):
"""
Helper to check that the ``p2p_op_list`` is a list of P2POp instances and
all ops use the same group.
"""
if not isinstance(p2p_op_list, list) or not all(
isinstance(p2p_op, P2POp) for p2p_op in p2p_op_list
):
raise ValueError(
"Invalid ``p2p_op_list``. Each op is expected to "
"to be of type ``torch.distributed.P2POp``."
)
group = p2p_op_list[0].group
if not all(group == p2p_op.group for p2p_op in p2p_op_list):
raise ValueError("All ops need to use the same group.")
def is_mpi_available() -> bool:
"""
Checks if the MPI backend is available.
"""
return _MPI_AVAILABLE
def is_nccl_available() -> bool:
"""
Checks if the NCCL backend is available.
"""
return _NCCL_AVAILABLE
def is_gloo_available() -> bool:
"""
Checks if the Gloo backend is available.
"""
return _GLOO_AVAILABLE
def is_ucc_available() -> bool:
"""
Checks if the UCC backend is available.
"""
return _UCC_AVAILABLE
def is_backend_available(backend: str) -> bool:
"""
Checks if the given backend is available and supports the built-in backends or
third-party backends through function ``Backend.register_backend``.
Args:
backend (str): Backend name.
Returns:
bool: Returns true if the backend is available otherwise false.
"""
# If the backend has an ``is_backend_available`` function, return the result of that function directly
available_func = getattr(torch.distributed, f"is_{backend.lower()}_available", None)
if available_func:
return available_func()
return backend.lower() in Backend.backend_list
def is_initialized() -> bool:
"""
Checking if the default process group has been initialized
"""
return GroupMember.WORLD is not None
def is_torchelastic_launched() -> bool:
"""
Checks whether this process was launched with ``torch.distributed.elastic``
(aka torchelastic). The existence of ``TORCHELASTIC_RUN_ID`` environment
variable is used as a proxy to determine whether the current process
was launched with torchelastic. This is a reasonable proxy since
``TORCHELASTIC_RUN_ID`` maps to the rendezvous id which is always a
non-null value indicating the job id for peer discovery purposes..
"""
return os.getenv("TORCHELASTIC_RUN_ID") is not None
def _is_barrier_after_init() -> int:
# Environment variable to control whether process group should perform a
# barrier after its init. Default value is 0, i.e. no barrier. If you
# experience issue with this setting, you may set
# `TORCH_DIST_INIT_BARRIER=1` to add the barrier.
return int(os.getenv("TORCH_DIST_INIT_BARRIER", "0"))
def _get_default_group():
"""
Getting the default process group created by init_process_group
"""
if not is_initialized():
raise ValueError(
"Default process group has not been initialized, "
"please make sure to call init_process_group."
)
return GroupMember.WORLD
def _get_default_store():
"""
Getting the default store created by init_process_group
"""
if not is_initialized():
raise ValueError(
"Default process group has not been initialized, "
"please make sure to call init_process_group."
)
default_pg = _get_default_group()
_, default_store = _world.pg_map[default_pg]
return default_store
def _update_default_pg(pg):
_world.default_pg = pg
def get_backend_config(group: Optional[ProcessGroup] = None) -> str:
if group is None:
pg = _get_default_group()
else:
pg = group
if _rank_not_in_group(pg):
raise ValueError("Invalid process group specified")
backend_config = _world.pg_backend_config.get(pg)
assert backend_config is not None
return str(backend_config)
def get_backend(group: Optional[ProcessGroup] = None) -> str:
"""
Returns the backend of the given process group.
Args:
group (ProcessGroup, optional): The process group to work on. The
default is the general main process group. If another specific group
is specified, the calling process must be part of :attr:`group`.
Returns:
The backend of the given process group as a lower case string.
"""
if group is None:
pg = _get_default_group()
else:
pg = group
if _rank_not_in_group(pg):
raise ValueError("Invalid process group specified")
pg_store = _world.pg_map[pg] if pg in _world.pg_map else None
assert pg_store is not None