-
Notifications
You must be signed in to change notification settings - Fork 155
/
Copy pathstateful_dataloader.py
1622 lines (1487 loc) · 79.1 KB
/
stateful_dataloader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
r"""Definition of the StatefulDataLoader and associated iterators.
This file is a stand-in for torch.utils.data.dataloader, and includes a
StatefulDataLoader, which inherits from DataLoader and adds
state_dict/load_state_dict methods, as well as implementations for
single and multi-process iterators which are also stateful.
Where possible, we import the original definitions from torch.utils.data.dataloader,
and use inheritance for base classes only (StatefulDataLoader, _StatefulBaseDataLoaderIter).
For the single and multi-process iterator implementations, we fork the code to avoid a
diamond-shaped multiple-inheritance scheme.
"""
import collections
import functools
import itertools
import logging
import queue
import threading
from typing import Any, Dict, Iterable, List, Optional, TypeVar, Union
import torch
import torch.multiprocessing as multiprocessing
import torch.utils.data._utils.worker
import torch.utils.data.graph_settings
from torch._utils import ExceptionWrapper
from torch.utils.data import (
_utils,
DataLoader,
Dataset,
IterableDataset,
IterDataPipe,
MapDataPipe,
Sampler,
SequentialSampler,
)
from torch.utils.data.dataloader import _BaseDataLoaderIter, _InfiniteConstantSampler
from torch.utils.data.datapipes.datapipe import _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper
from .incremental_state import (
_DATASET_ITER_STATE,
_DATASET_STATE,
_FETCHER_ENDED,
_FETCHER_STATE,
_IncrementalWorkerState,
_WORKER_ID,
)
from .sampler import BatchSampler, RandomSampler
from .stateful import Stateful
from .worker import _AckStartup, _worker_loop, try_to_deserialize, try_to_serialize
__all__ = [
"StatefulDataLoader",
"get_worker_info",
"default_collate",
"default_convert",
]
from torch.utils.data.dataloader import (
_collate_fn_t,
_DatasetKind,
_sharding_worker_init_fn,
_worker_init_fn_t,
default_collate,
default_convert,
get_worker_info,
)
_T_co = TypeVar("_T_co", covariant=True)
logger = logging.getLogger(__name__)
_INDEX_SAMPLER_STATE = "_index_sampler_state"
_SAMPLER_ITER_STATE = "_sampler_iter_state"
_SAMPLER_ITER_YIELDED = "_sampler_iter_yielded"
_ITERABLEDATASET_LEN_CALLED = "_IterableDataset_len_called"
_SHARED_SEED = "_shared_seed"
_ITERATOR_FINISHED = "_iterator_finished"
class StatefulDataLoader(DataLoader[_T_co]):
r"""
This is a drop in replacement for ``torch.utils.data.DataLoader``
that implements state_dict and load_state_dict methods, enabling mid-epoch
checkpointing.
All arguments are identical to ``torch.utils.data.DataLoader``, with
a new kwarg: ``snapshot_every_n_steps``.
Args:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: ``1``).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
sampler (Sampler or Iterable, optional): defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented. If specified, :attr:`shuffle` must not be specified.
batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but
returns a batch of indices at a time. Mutually exclusive with
:attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`,
and :attr:`drop_last`.
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
collate_fn (Callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
worker_init_fn (Callable, optional): If not ``None``, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: ``None``)
multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If
``None``, the default `multiprocessing context`_ of your operating system will
be used. (default: ``None``)
generator (torch.Generator, optional): If not ``None``, this RNG will be used
by RandomSampler to generate random indexes and multiprocessing to generate
``base_seed`` for workers. (default: ``None``)
prefetch_factor (int, optional, keyword-only arg): Number of batches loaded
in advance by each worker. ``2`` means there will be a total of
2 * num_workers batches prefetched across all workers. (default value depends
on the set value for num_workers. If value of num_workers=0 default is ``None``.
Otherwise, if value of ``num_workers > 0`` default is ``2``).
persistent_workers (bool, optional): If ``True``, the data loader will not shut down
the worker processes after a dataset has been consumed once. This allows to
maintain the workers `Dataset` instances alive. (default: ``False``)
pin_memory_device (str, optional): the device to :attr:`pin_memory` to if ``pin_memory`` is
``True``.
snapshot_every_n_steps (int, optional): Defines how often the state is
transferred from the dataloader workers to the dataloader. By default, it is set to ``1``, i.e., state is transferred every step. If the state is large, this value can be increased (and ideally set to the frequency of training checkpointing) to reduce the overhead of transferring state every step.
.. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn`
cannot be an unpicklable object, e.g., a lambda function. See
`multiprocessing-best-practices <https://pytorch.org/docs/stable/notes/multiprocessing.html#multiprocessing-best-practices>`_ on more details related
to multiprocessing in PyTorch.
.. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used.
When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`,
it instead returns an estimate based on ``len(dataset) / batch_size``, with proper
rounding depending on :attr:`drop_last`, regardless of multi-process loading
configurations. This represents the best guess PyTorch can make because PyTorch
trusts user :attr:`dataset` code in correctly handling multi-process
loading to avoid duplicate data.
However, if sharding results in multiple workers having incomplete last batches,
this estimate can still be inaccurate, because (1) an otherwise complete batch can
be broken into multiple ones and (2) more than one batch worth of samples can be
dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such
cases in general.
See `Dataset Types <https://pytorch.org/docs/stable/data.html>`_ for more details on these two types of datasets and how
:class:`~torch.utils.data.IterableDataset` interacts with
`Multi-process data loading <https://pytorch.org/docs/stable/data.html#multi-process-data-loading>`_.
.. warning:: See `Reproducibility <https://pytorch.org/docs/stable/notes/randomness.html#reproducibility>`_, and `Dataloader-workers-random-seed <https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed>`_, and
`Data-loading-randomness <https://pytorch.org/docs/stable/data.html#data-loading-randomness>`_ notes for random seed related questions.
.. _multiprocessing context:
https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
"""
_iterator: Optional["_StatefulBaseDataLoaderIter"]
def __init__(
self,
dataset: Dataset[_T_co],
batch_size: Optional[int] = 1,
shuffle: Optional[bool] = None,
sampler: Union[Sampler, Iterable, None] = None,
batch_sampler: Union[Sampler[List], Iterable[List], None] = None,
num_workers: int = 0,
collate_fn: Optional[_collate_fn_t] = None,
pin_memory: bool = False,
drop_last: bool = False,
timeout: float = 0,
worker_init_fn: Optional[_worker_init_fn_t] = None,
multiprocessing_context=None,
generator=None,
*,
prefetch_factor: Optional[int] = None,
persistent_workers: bool = False,
pin_memory_device: str = "",
snapshot_every_n_steps: Optional[int] = 1,
):
torch._C._log_api_usage_once("python.stateful_data_loader")
if num_workers < 0:
raise ValueError(
"num_workers option should be non-negative; " "use num_workers=0 to disable multiprocessing."
)
if timeout < 0:
raise ValueError("timeout option should be non-negative")
if num_workers == 0 and prefetch_factor is not None:
raise ValueError(
"prefetch_factor option could only be specified in multiprocessing."
"let num_workers > 0 to enable multiprocessing, otherwise set prefetch_factor to None."
)
elif num_workers > 0 and prefetch_factor is None:
prefetch_factor = 2
elif prefetch_factor is not None and prefetch_factor < 0:
raise ValueError("prefetch_factor option should be non-negative")
if persistent_workers and num_workers == 0:
raise ValueError("persistent_workers option needs num_workers > 0")
self.dataset = dataset
self.num_workers = num_workers
self.prefetch_factor = prefetch_factor
self.pin_memory = pin_memory
self.pin_memory_device = pin_memory_device
self.timeout = timeout
self.worker_init_fn = worker_init_fn
self.multiprocessing_context = multiprocessing_context
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
# _DataPipeSerializationWrapper container makes it easier to serialize without redefining pickler
if isinstance(self.dataset, IterDataPipe):
self.dataset = _IterDataPipeSerializationWrapper(self.dataset)
elif isinstance(self.dataset, MapDataPipe):
self.dataset = _MapDataPipeSerializationWrapper(self.dataset)
# Arg-check dataset related before checking samplers because we want to
# tell users that iterable-style datasets are incompatible with custom
# samplers first, so that they don't learn that this combo doesn't work
# after spending time fixing the custom sampler errors.
if isinstance(dataset, IterableDataset):
self._dataset_kind = _DatasetKind.Iterable
# NOTE [ Custom Samplers and IterableDataset ]
#
# `IterableDataset` does not support custom `batch_sampler` or
# `sampler` since the key is irrelevant (unless we support
# generator-style dataset one day...).
#
# For `sampler`, we always create a dummy sampler. This is an
# infinite sampler even when the dataset may have an implemented
# finite `__len__` because in multi-process data loading, naive
# settings will return duplicated data (which may be desired), and
# thus using a sampler with length matching that of dataset will
# cause data lost (you may have duplicates of the first couple
# batches, but never see anything afterwards). Therefore,
# `Iterabledataset` always uses an infinite sampler, an instance of
# `_InfiniteConstantSampler` defined above.
#
# A custom `batch_sampler` essentially only controls the batch size.
# However, it is unclear how useful it would be since an iterable-style
# dataset can handle that within itself. Moreover, it is pointless
# in multi-process data loading as the assignment order of batches
# to workers is an implementation detail so users can not control
# how to batchify each worker's iterable. Thus, we disable this
# option. If this turns out to be useful in future, we can re-enable
# this, and support custom samplers that specify the assignments to
# specific workers.
if isinstance(dataset, IterDataPipe):
if shuffle is not None:
dataset = torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
# We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default.
elif shuffle not in {False, None}:
raise ValueError(
f"DataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={shuffle}"
)
if sampler is not None:
# See NOTE [ Custom Samplers and IterableDataset ]
raise ValueError(
f"DataLoader with IterableDataset: expected unspecified sampler option, but got sampler={sampler}"
)
elif batch_sampler is not None:
# See NOTE [ Custom Samplers and IterableDataset ]
raise ValueError(
"DataLoader with IterableDataset: expected unspecified "
f"batch_sampler option, but got batch_sampler={batch_sampler}"
)
else:
shuffle = bool(shuffle)
self._dataset_kind = _DatasetKind.Map
if sampler is not None and shuffle:
raise ValueError("sampler option is mutually exclusive with " "shuffle")
if batch_sampler is not None:
# auto_collation with custom batch_sampler
if batch_size != 1 or shuffle or sampler is not None or drop_last:
raise ValueError(
"batch_sampler option is mutually exclusive " "with batch_size, shuffle, sampler, and " "drop_last"
)
batch_size = None
drop_last = False
elif batch_size is None:
# no auto_collation
if drop_last:
raise ValueError(
"batch_size=None option disables auto-batching " "and is mutually exclusive with drop_last"
)
if sampler is None: # give default samplers
if self._dataset_kind == _DatasetKind.Iterable:
# See NOTE [ Custom Samplers and IterableDataset ]
sampler = _InfiniteConstantSampler()
else: # map-style
if shuffle:
sampler = RandomSampler(dataset, generator=generator) # type: ignore[arg-type]
else:
sampler = SequentialSampler(dataset) # type: ignore[arg-type]
if batch_size is not None and batch_sampler is None:
# auto_collation without custom batch_sampler
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
self.batch_size = batch_size
self.drop_last = drop_last
self.sampler = sampler
self.batch_sampler = batch_sampler
self.generator = generator
if collate_fn is None:
if self._auto_collation:
collate_fn = _utils.collate.default_collate
else:
collate_fn = _utils.collate.default_convert
self.collate_fn = collate_fn
self.persistent_workers = persistent_workers
# set DataLoader's __initialized attribute.
self._DataLoader__initialized = True
self._IterableDataset_len_called = None # See NOTE [ IterableDataset and __len__ ]
self._iterator = None
self.check_worker_number_rationality()
self.snapshot_every_n_steps = snapshot_every_n_steps
self.next_iter_state: Optional[Dict[str, Any]] = None
# When a state_dict is requested before __iter__ is called,
# we create the __iter__ so we can get a copy of the initial state from
# its workers. In those cases, we can avoid creating a new multiprocessing
# iterator on the next __iter__ call, and this flag is used for those cases.
self._initial_iter_for_state_dict = False
torch.set_vital("Dataloader", "enabled", "True") # type: ignore[attr-defined]
def _get_iterator(self) -> "_StatefulBaseDataLoaderIter":
it: _StatefulBaseDataLoaderIter
if self.num_workers == 0:
it = _StatefulSingleProcessDataLoaderIter(self, self.next_iter_state)
else:
self.check_worker_number_rationality()
it = _StatefulMultiProcessingDataLoaderIter(self, self.next_iter_state)
self.next_iter_state = None
return it
def __iter__(self) -> "_BaseDataLoaderIter":
# When using a single worker the returned iterator should be
# created everytime to avoid resetting its state
# However, in the case of a multiple workers iterator
# the iterator is only created once in the lifetime of the
# DataLoader object so that workers can be reused
if self._initial_iter_for_state_dict:
self._initial_iter_for_state_dict = False
assert self._iterator is not None
elif self.persistent_workers and self.num_workers > 0:
if self._iterator is None:
self._iterator = self._get_iterator()
else:
self._iterator._reset(self)
else:
self._iterator = self._get_iterator()
if self._iterator._finished:
if self.persistent_workers:
self._iterator._reset(self)
else:
self._iterator = self._get_iterator()
return self._iterator
def state_dict(self) -> Dict[str, Any]:
if self._iterator is None:
self._iterator = self._get_iterator()
self._initial_iter_for_state_dict = True
return self._iterator.state_dict()
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self._iterator = None
self._initial_iter_for_state_dict = False
if state_dict == {}:
return
self.next_iter_state = state_dict
class _StatefulBaseDataLoaderIter(_BaseDataLoaderIter):
def __init__(self, loader: StatefulDataLoader) -> None:
super().__init__(loader)
self._sampler_iter_yielded = 0
self._finished = False
def _reset(self, loader, first_iter=False):
super()._reset(loader, first_iter)
self._sampler_iter_yielded = 0
self._finished = False
def _next_index(self):
idx = super()._next_index() # may raise StopIteration
self._sampler_iter_yielded += 1
return idx
def state_dict(self):
pass
def __next__(self):
try:
return super().__next__()
except StopIteration:
self._finished = True
raise
class _StatefulSingleProcessDataLoaderIter(_StatefulBaseDataLoaderIter):
"""We avoid using inheritance here to share code because we quickly run into
a diamond which becomes difficult to reason about, so instead we fork the
code from torch.utils.data.dataloader for _SingleProcessDataLoaderIter and
_MultiProcessDataLoaderIter. This allows us to satisfy the original
dataloader __iter__'s return type of _BaseDataLoaderIter (since
_StatefulBaseDataLoader inherits from _BaseDataLoaderIter).
"""
_NUM_YIELDED = "_num_yielded"
def __init__(self, loader, next_iter_state=None):
super().__init__(loader)
assert self._timeout == 0
assert self._num_workers == 0
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
# Taking care of distributed sharding
if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
# For BC, use default SHARDING_PRIORITIES
torch.utils.data.graph_settings.apply_sharding(self._dataset, self._world_size, self._rank)
if next_iter_state is not None:
self.load_state_dict(next_iter_state)
else:
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last
)
def _next_data(self):
index = self._next_index() # may raise StopIteration
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
if self._pin_memory:
data = _utils.pin_memory.pin_memory(data, self._pin_memory_device)
return data
def state_dict(self):
if self._dataset_kind == _DatasetKind.Iterable:
fetcher_state = {
_DATASET_ITER_STATE: try_to_serialize(self._dataset_fetcher.dataset_iter),
_FETCHER_ENDED: self._dataset_fetcher.ended,
}
dataset_state = None
if self._dataset_fetcher.dataset_iter is not self._dataset_fetcher.dataset:
dataset_state = try_to_serialize(self._dataset_fetcher.dataset)
else:
fetcher_state = None
dataset_state = try_to_serialize(self._dataset_fetcher.dataset)
state_dict = {
_INDEX_SAMPLER_STATE: try_to_serialize(self._index_sampler),
_SAMPLER_ITER_STATE: try_to_serialize(self._sampler_iter),
_SAMPLER_ITER_YIELDED: self._sampler_iter_yielded,
self._NUM_YIELDED: self._num_yielded,
_ITERABLEDATASET_LEN_CALLED: self._IterableDataset_len_called,
_SHARED_SEED: self._shared_seed,
_FETCHER_STATE: fetcher_state,
_DATASET_STATE: dataset_state,
_ITERATOR_FINISHED: self._finished,
}
return state_dict
def load_state_dict(self, state_dict):
assert (
self._NUM_YIELDED in state_dict
), f"State doesn't contain key '{self._NUM_YIELDED}' expected for single process dataloader"
self._sampler_iter_yielded = state_dict[_SAMPLER_ITER_YIELDED]
# Try to restore from either _index_sampler state_dict or _sampler_iter state_dict
if isinstance(self._index_sampler, Stateful) or isinstance(self._sampler_iter, Stateful):
self._index_sampler = try_to_deserialize(self._index_sampler, state_dict[_INDEX_SAMPLER_STATE])
self._sampler_iter = iter(self._index_sampler)
if state_dict[_SAMPLER_ITER_STATE] is not None:
self._sampler_iter = try_to_deserialize(self._sampler_iter, state_dict[_SAMPLER_ITER_STATE])
else:
if not isinstance(self._index_sampler, torch.utils.data.dataloader._InfiniteConstantSampler):
# Fallback to fastforward
self._sampler_iter = itertools.islice(self._index_sampler, self._sampler_iter_yielded, None)
self._num_yielded = state_dict[self._NUM_YIELDED]
self._IterableDataset_len_called = state_dict[_ITERABLEDATASET_LEN_CALLED]
self._shared_seed = state_dict[_SHARED_SEED]
# Always restore in this order:
# 1. try to restore dataset state
# 2. generate dataset iterator
# 3. try to restore iterator state
if state_dict[_DATASET_STATE] is not None and isinstance(self._dataset, Stateful):
self._dataset = try_to_deserialize(self._dataset, state_dict[_DATASET_STATE])
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last
)
if self._dataset_kind == _DatasetKind.Iterable:
# If either dataset or it's iter is stateful, we don't fast-forward
if isinstance(self._dataset, Stateful) or isinstance(self._dataset_fetcher.dataset_iter, Stateful):
if state_dict[_FETCHER_STATE] is not None:
if state_dict[_FETCHER_STATE][_DATASET_ITER_STATE] is not None:
self._dataset_fetcher.dataset_iter = try_to_deserialize(
self._dataset_fetcher.dataset_iter,
state_dict[_FETCHER_STATE][_DATASET_ITER_STATE],
)
self._dataset_fetcher.ended = state_dict[_FETCHER_STATE][_FETCHER_ENDED]
else:
# No state, just try to fastforward
if self._num_yielded > 0:
logger.warning(
f"Neither dataset nor iter(dataset) defines state_dict/load_state_dict so we are "
f"naively fast-forwarding your dataset by {self._num_yielded} steps. For more efficient "
f"resumes, please implement `state_dict` and `load_state_dict` in your IterableDataset and/or iterator."
)
for _ in range(self._num_yielded):
next(self)
self._finished = state_dict[_ITERATOR_FINISHED]
class _StatefulMultiProcessingDataLoaderIter(_StatefulBaseDataLoaderIter):
r"""Iterates once over the DataLoader's dataset, as specified by the sampler."""
# NOTE [ Data Loader Multiprocessing Shutdown Logic ]
#
# Preliminary:
#
# Our data model looks like this (queues are indicated with curly brackets):
#
# main process ||
# | ||
# {index_queue} ||
# | ||
# worker processes || DATA
# | ||
# {worker_result_queue} || FLOW
# | ||
# pin_memory_thread of main process || DIRECTION
# | ||
# {data_queue} ||
# | ||
# data output \/
#
# P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if
# `pin_memory=False`.
#
#
# Terminating multiprocessing logic requires very careful design. In
# particular, we need to make sure that
#
# 1. The iterator gracefully exits the workers when its last reference is
# gone or it is depleted.
#
# In this case, the workers should be gracefully exited because the
# main process may still need to continue to run, and we want cleaning
# up code in the workers to be executed (e.g., releasing GPU memory).
# Naturally, we implement the shutdown logic in `__del__` of
# DataLoaderIterator.
#
# We delay the discussion on the logic in this case until later.
#
# 2. The iterator exits the workers when the loader process and/or worker
# processes exits normally or with error.
#
# We set all workers and `pin_memory_thread` to have `daemon=True`.
#
# You may ask, why can't we make the workers non-daemonic, and
# gracefully exit using the same logic as we have in `__del__` when the
# iterator gets deleted (see 1 above)?
#
# First of all, `__del__` is **not** guaranteed to be called when
# interpreter exits. Even if it is called, by the time it executes,
# many Python core library resources may already be freed, and even
# simple things like acquiring an internal lock of a queue may hang.
# Therefore, in this case, we actually need to prevent `__del__` from
# being executed, and rely on the automatic termination of daemonic
# children.
#
# Thus, we register an `atexit` hook that sets a global flag
# `_utils.python_exit_status`. Since `atexit` hooks are executed in the
# reverse order of registration, we are guaranteed that this flag is
# set before library resources we use are freed (which, at least in
# CPython, is done via an `atexit` handler defined in
# `multiprocessing/util.py`
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/util.py#L320-L362
# registered when an object requiring this mechanism is first
# created, e.g., `mp.Queue`
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/context.py#L100-L103
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/queues.py#L29
# )
#
# So in `__del__`, we check if `_utils.python_exit_status` is set or
# `None` (freed), and perform no-op if so.
#
# However, simply letting library clean-up codes run can also be bad,
# because such codes (i.e., `multiprocessing.util._exit_function()`)
# include join putting threads for `mp.Queue`, which can be blocking.
# Hence, the main process putting threads are called with
# `cancel_join_thread` at creation. See later section
# [ 3b. A process won't hang when putting into a queue; ]
# for more details.
#
# Here are two example cases where library clean-up codes can run
# before `__del__` is called:
#
# 1. If we hold onto a reference to the iterator, it more often
# than not tries to do `multiprocessing` library cleaning before
# clearing the alive referenced objects (https://github.com/pytorch/pytorch/issues/48666)
# and thus prevents our cleaning-up code to run first.
#
# 2. A similar issue araises when a `DataLoader` is used in a subprocess.
# When a process ends, it shuts the all its daemonic children
# down with a SIGTERM (instead of joining them without a timeout).
# Simiarly for threads, but by a different mechanism. This fact,
# together with a few implementation details of multiprocessing, forces
# us to make workers daemonic. All of our problems arise when a
# DataLoader is used in a subprocess, and are caused by multiprocessing
# code which looks more or less like this:
#
# try:
# your_function_using_a_dataloader()
# finally:
# multiprocessing.util._exit_function()
#
# The joining/termination mentioned above happens inside
# `_exit_function()`. Now, if `your_function_using_a_dataloader()`
# throws, the stack trace stored in the exception will prevent the
# frame which uses `DataLoaderIter` to be freed. If the frame has any
# reference to the `DataLoaderIter` (e.g., in a method of the iter),
# its `__del__`, which starts the shutdown procedure, will not be
# called. That, in turn, means that workers aren't notified. Attempting
# to join in `_exit_function` will then result in a hang.
#
# For context, `_exit_function` is also registered as an `atexit` call.
# So it is unclear to me (@ssnl) why this is needed in a finally block.
# The code dates back to 2008 and there is no comment on the original
# PEP 371 or patch https://bugs.python.org/issue3050 (containing both
# the finally block and the `atexit` registration) that explains this.
#
#
# Finally, another choice is to just shutdown workers with logic in 1
# above whenever we see an error in `next`. This isn't ideal because
# a. It prevents users from using try-catch to resume data loading.
# b. It doesn't prevent hanging if users have references to the
# iterator.
#
# 3. All processes exit if any of them die unexpectedly by fatal signals.
#
# As shown above, the workers are set as daemonic children of the main
# process. However, automatic cleaning-up of such child processes only
# happens if the parent process exits gracefully (e.g., not via fatal
# signals like SIGKILL). So we must ensure that each process will exit
# even the process that should send/receive data to/from it were
# killed, i.e.,
#
# a. A process won't hang when getting from a queue.
#
# Even with carefully designed data dependencies (i.e., a `put()`
# always corresponding to a `get()`), hanging on `get()` can still
# happen when data in queue is corrupted (e.g., due to
# `cancel_join_thread` or unexpected exit).
#
# For child exit, we set a timeout whenever we try to get data
# from `data_queue`, and check the workers' status on each timeout
# and error.
# See `_DataLoaderiter._get_batch()` and
# `_DataLoaderiter._try_get_data()` for details.
#
# Additionally, for child exit on non-Windows platforms, we also
# register a SIGCHLD handler (which is supported on Windows) on
# the main process, which checks if any of the workers fail in the
# (Python) handler. This is more efficient and faster in detecting
# worker failures, compared to only using the above mechanism.
# See `DataLoader.cpp` and `_utils/signal_handling.py` for details.
#
# For `.get()` calls where the sender(s) is not the workers, we
# guard them with timeouts, and check the status of the sender
# when timeout happens:
# + in the workers, the `_utils.worker.ManagerWatchdog` class
# checks the status of the main process.
# + if `pin_memory=True`, when getting from `pin_memory_thread`,
# check `pin_memory_thread` status periodically until `.get()`
# returns or see that `pin_memory_thread` died.
#
# b. A process won't hang when putting into a queue;
#
# We use `mp.Queue` which has a separate background thread to put
# objects from an unbounded buffer array. The background thread is
# daemonic and usually automatically joined when the process
# *exits*.
#
# In case that the receiver has ended abruptly while
# reading from the pipe, the join will hang forever. The usual
# solution for this in Python is calling `q.cancel_join_thread`,
# which prevents automatically joining it when finalizing
# (exiting).
#
# Nonetheless, `cancel_join_thread` must only be called when the
# queue is **not** going to be read from or write into by another
# process, because it may hold onto a lock or leave corrupted data
# in the queue, leading other readers/writers to hang.
#
# Hence,
# + For worker processes, we only do so (for their output
# queues, i.e., `worker_result_queue`) before exiting.
# + For `pin_memory_thread`, its output queue `data_queue` is a
# `queue.Queue` that does blocking `put` if the queue is full.
# So there is no above problem, but as a result, in
# `_pin_memory_loop`, we do need to wrap the `put` in a loop
# that breaks not only upon success, but also when the main
# process stops reading, i.e., is shutting down.
# + For loader process, we `cancel_join_thread()` for all
# `_index_queues` because the whole purpose of workers and
# `pin_memory_thread` is to serve the loader process. If
# loader process is already exiting, we don't really care if
# the queues are corrupted.
#
#
# Now let's get back to 1:
# how we gracefully exit the workers when the last reference to the
# iterator is gone.
#
# To achieve this, we implement the following logic along with the design
# choices mentioned above:
#
# `workers_done_event`:
# A `multiprocessing.Event` shared among the main process and all worker
# processes. This is used to signal the workers that the iterator is
# shutting down. After it is set, they will not send processed data to
# queues anymore, and only wait for the final `None` before exiting.
# `done_event` isn't strictly needed. I.e., we can just check for `None`
# from the input queue, but it allows us to skip wasting resources
# processing data if we are already shutting down.
#
# `pin_memory_thread_done_event`:
# A `threading.Event` for a similar purpose to that of
# `workers_done_event`, but is for the `pin_memory_thread`. The reason
# that separate events are needed is that `pin_memory_thread` reads from
# the output queue of the workers. But the workers, upon seeing that
# `workers_done_event` is set, only wants to see the final `None`, and is
# not required to flush all data in the output queue (e.g., it may call
# `cancel_join_thread` on that queue if its `IterableDataset` iterator
# happens to exhaust coincidentally, which is out of the control of the
# main process). Thus, since we will exit `pin_memory_thread` before the
# workers (see below), two separete events are used.
#
# NOTE: In short, the protocol is that the main process will set these
# `done_event`s and then the corresponding processes/threads a `None`,
# and that they may exit at any time after receiving the `None`.
#
# NOTE: Using `None` as the final signal is valid, since normal data will
# always be a 2-tuple with the 1st element being the index of the data
# transferred (different from dataset index/key), and the 2nd being
# either the dataset key or the data sample (depending on which part
# of the data model the queue is at).
#
# [ worker processes ]
# While loader process is alive:
# Get from `index_queue`.
# If get anything else,
# Check `workers_done_event`.
# If set, continue to next iteration
# i.e., keep getting until see the `None`, then exit.
# Otherwise, process data:
# If is fetching from an `IterableDataset` and the iterator
# is exhausted, send an `_IterableDatasetStopIteration`
# object to signal iteration end. The main process, upon
# receiving such an object, will send `None` to this
# worker and not use the corresponding `index_queue`
# anymore.
# If timed out,
# No matter `workers_done_event` is set (still need to see `None`)
# or not, must continue to next iteration.
# (outside loop)
# If `workers_done_event` is set, (this can be False with `IterableDataset`)
# `data_queue.cancel_join_thread()`. (Everything is ending here:
# main process won't read from it;
# other workers will also call
# `cancel_join_thread`.)
#
# [ pin_memory_thread ]
# # No need to check main thread. If this thread is alive, the main loader
# # thread must be alive, because this thread is set as daemonic.
# While `pin_memory_thread_done_event` is not set:
# Get from `worker_result_queue`.
# If timed out, continue to get in the next iteration.
# Otherwise, process data.
# While `pin_memory_thread_done_event` is not set:
# Put processed data to `data_queue` (a `queue.Queue` with blocking put)
# If timed out, continue to put in the next iteration.
# Otherwise, break, i.e., continuing to the out loop.
#
# NOTE: we don't check the status of the main thread because
# 1. if the process is killed by fatal signal, `pin_memory_thread`
# ends.
# 2. in other cases, either the cleaning-up in __del__ or the
# automatic exit of daemonic thread will take care of it.
# This won't busy-wait either because `.get(timeout)` does not
# busy-wait.
#
# [ main process ]
# In the DataLoader Iter's `__del__`
# b. Exit `pin_memory_thread`
# i. Set `pin_memory_thread_done_event`.
# ii Put `None` in `worker_result_queue`.
# iii. Join the `pin_memory_thread`.
# iv. `worker_result_queue.cancel_join_thread()`.
#
# c. Exit the workers.
# i. Set `workers_done_event`.
# ii. Put `None` in each worker's `index_queue`.
# iii. Join the workers.
# iv. Call `.cancel_join_thread()` on each worker's `index_queue`.
#
# NOTE: (c) is better placed after (b) because it may leave corrupted
# data in `worker_result_queue`, which `pin_memory_thread`
# reads from, in which case the `pin_memory_thread` can only
# happen at timing out, which is slow. Nonetheless, same thing
# happens if a worker is killed by signal at unfortunate times,
# but in other cases, we are better off having a non-corrupted
# `worker_result_queue` for `pin_memory_thread`.
#
# NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b)
# can be omitted
#
# NB: `done_event`s isn't strictly needed. E.g., we can just check for
# `None` from `index_queue`, but it allows us to skip wasting resources
# processing indices already in `index_queue` if we are already shutting
# down.
_last_yielded_worker_id: int
_NUM_WORKERS = "_num_workers"
_SNAPSHOT = "_snapshot"
_MAIN_SNAPSHOT = "_main_snapshot"
_WORKER_SNAPSHOTS = "_worker_snapshots"
_SNAPSHOT_STEP = "_snapshot_step"
_STEPS_SINCE_SNAPSHOT = "_steps_since_snapshot"
_LAST_YIELDED_WORKER_ID = "_last_yielded_worker_id"
_BASE_SEED = "_base_seed"
def __init__(self, loader, next_iter_state):
super().__init__(loader)
self._snapshot_interval = loader.snapshot_every_n_steps
self._prefetch_factor = loader.prefetch_factor
assert self._num_workers > 0
assert self._prefetch_factor > 0
if loader.multiprocessing_context is None:
multiprocessing_context = multiprocessing
else:
multiprocessing_context = loader.multiprocessing_context
self._worker_init_fn = loader.worker_init_fn
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
# Additional worker init function will take care of sharding in MP and Distributed
if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
self._worker_init_fn = functools.partial(
_sharding_worker_init_fn, self._worker_init_fn, self._world_size, self._rank
)
# No certainty which module multiprocessing_context is
self._worker_result_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
self._worker_pids_set = False
self._shutdown = False
self._workers_done_event = multiprocessing_context.Event()
self._index_queues = []
self._workers = []
worker_states = {self._worker_key(i): None for i in range(self._num_workers)}
if next_iter_state is not None:
assert (
self._SNAPSHOT in next_iter_state
), f"State doesn't contain key '{self._SNAPSHOT}' expected for multiprocess dataloader"
wstates = next_iter_state[self._SNAPSHOT].get(self._WORKER_SNAPSHOTS, {})
assert set(map(self._worker_key, range(len(wstates)))) == set(wstates.keys()), (
len(wstates),
wstates.keys(),
)
for worker_key, sd in wstates.items():
worker_states[worker_key] = sd
self._base_seed = next_iter_state[self._SNAPSHOT][self._MAIN_SNAPSHOT].get(self._BASE_SEED, self._base_seed)
self._shared_seed = next_iter_state[self._SNAPSHOT][self._MAIN_SNAPSHOT].get(
_SHARED_SEED, self._shared_seed
)
for i in range(self._num_workers):
# No certainty which module multiprocessing_context is
index_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
# Need to `cancel_join_thread` here!
# See sections (2) and (3b) above.
index_queue.cancel_join_thread()
w = multiprocessing_context.Process(
target=_worker_loop,
args=(
self._dataset_kind,
self._dataset,
index_queue,
self._worker_result_queue,
self._workers_done_event,
self._auto_collation,
self._collate_fn,
self._drop_last,
self._base_seed,
self._worker_init_fn,
i,
self._num_workers,
self._persistent_workers,
self._shared_seed,
worker_states[self._worker_key(i)],
),
)
w.daemon = True
# NB: Process.start() actually take some time as it needs to
# start a process and pass the arguments over via a pipe.
# Therefore, we only add a worker to self._workers list after
# it started, so that we do not call .join() if program dies
# before it starts, and __del__ tries to join but will get:
# AssertionError: can only join a started process.
w.start()
self._index_queues.append(index_queue)
self._workers.append(w)
if self._pin_memory:
self._pin_memory_thread_done_event = threading.Event()
# Queue is not type-annotated
self._data_queue = queue.Queue() # type: ignore[var-annotated]
if self._pin_memory_device == "xpu":
current_device = torch.xpu.current_device() # type: ignore[attr-defined]
elif self._pin_memory_device == torch._C._get_privateuse1_backend_name():
custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name())
current_device = custom_device_mod.current_device()
else:
current_device = torch.cuda.current_device() # choose cuda for default
pin_memory_thread = threading.Thread(
target=_utils.pin_memory._pin_memory_loop,
args=(
self._worker_result_queue,
self._data_queue,
current_device,
self._pin_memory_thread_done_event,
self._pin_memory_device,
),
)
pin_memory_thread.daemon = True
pin_memory_thread.start()
# Similar to workers (see comment above), we only register
# pin_memory_thread once it is started.
self._pin_memory_thread = pin_memory_thread
else:
self._data_queue = self._worker_result_queue # type: ignore[assignment]
# In some rare cases, persistent workers (daemonic processes)
# would be terminated before `__del__` of iterator is invoked
# when main process exits
# It would cause failure when pin_memory_thread tries to read
# corrupted data from worker_result_queue
# atexit is used to shutdown thread and child processes in the
# right sequence before main process exits
if self._persistent_workers and self._pin_memory:
import atexit