megatron-core 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megatron-core might be problematic. Click here for more details.

@@ -0,0 +1,279 @@
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import torch
4
+
5
+ from megatron.core.parallel_state import (
6
+ get_tensor_model_parallel_rank,
7
+ get_tensor_model_parallel_world_size,
8
+ get_tensor_model_parallel_group,
9
+ )
10
+ from .utils import split_tensor_along_last_dim
11
+
12
+
13
+ def _reduce(input_):
14
+ """All-reduce the input tensor across model parallel group."""
15
+
16
+ # Bypass the function if we are using only 1 GPU.
17
+ if get_tensor_model_parallel_world_size()==1:
18
+ return input_
19
+
20
+ # All-reduce.
21
+ torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group())
22
+
23
+ return input_
24
+
25
+
26
+ def _split_along_last_dim(input_):
27
+ """Split the tensor along its last dimension and keep the
28
+ corresponding slice."""
29
+
30
+ world_size = get_tensor_model_parallel_world_size()
31
+ # Bypass the function if we are using only 1 GPU.
32
+ if world_size == 1:
33
+ return input_
34
+
35
+ # Split along last dimension.
36
+ input_list = split_tensor_along_last_dim(input_, world_size)
37
+
38
+ # Note: torch.split does not create contiguous tensors by default.
39
+ rank = get_tensor_model_parallel_rank()
40
+ output = input_list[rank].contiguous()
41
+
42
+ return output
43
+
44
+
45
+ def _split_along_first_dim(input_):
46
+ """Split the tensor along its first dimension and keep the
47
+ corresponding slice."""
48
+
49
+ world_size = get_tensor_model_parallel_world_size()
50
+ # Bypass the function if we are using only 1 GPU.
51
+ if world_size == 1:
52
+ return input_
53
+
54
+ # Split along first dimension.
55
+ dim_size = input_.size()[0]
56
+ assert dim_size % world_size == 0, \
57
+ "First dimension of the tensor should be divisible by tensor parallel size"
58
+ local_dim_size = dim_size // world_size
59
+ rank = get_tensor_model_parallel_rank()
60
+ dim_offset = rank * local_dim_size
61
+
62
+ output = input_[dim_offset:dim_offset+local_dim_size].contiguous()
63
+
64
+ return output
65
+
66
+
67
+ def _gather_along_last_dim(input_):
68
+ """Gather tensors and concatinate along the last dimension."""
69
+
70
+ world_size = get_tensor_model_parallel_world_size()
71
+ # Bypass the function if we are using only 1 GPU.
72
+ if world_size == 1:
73
+ return input_
74
+
75
+ # Size and dimension.
76
+ last_dim = input_.dim() - 1
77
+ rank = get_tensor_model_parallel_rank()
78
+
79
+ tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
80
+ tensor_list[rank] = input_
81
+ torch.distributed.all_gather(tensor_list, input_, group=get_tensor_model_parallel_group())
82
+
83
+ # Note: torch.cat already creates a contiguous tensor.
84
+ output = torch.cat(tensor_list, dim=last_dim).contiguous()
85
+
86
+ return output
87
+
88
+
89
+ def _gather_along_first_dim(input_):
90
+ """Gather tensors and concatinate along the first dimension."""
91
+
92
+ world_size = get_tensor_model_parallel_world_size()
93
+ # Bypass the function if we are using only 1 GPU.
94
+ if world_size == 1:
95
+ return input_
96
+
97
+ dim_size = list(input_.size())
98
+ dim_size[0] = dim_size[0] * world_size
99
+
100
+ output = torch.empty(dim_size, dtype=input_.dtype,
101
+ device=torch.cuda.current_device())
102
+ torch.distributed._all_gather_base(output, input_.contiguous(),
103
+ group=get_tensor_model_parallel_group())
104
+
105
+ return output
106
+
107
+ def _reduce_scatter_along_first_dim(input_):
108
+ """Reduce-scatter the input tensor across model parallel group."""
109
+ world_size = get_tensor_model_parallel_world_size()
110
+ # Bypass the function if we are using only 1 GPU.
111
+ if world_size == 1:
112
+ return input_
113
+
114
+ dim_size = list(input_.size())
115
+ assert dim_size[0] % world_size == 0, \
116
+ "First dimension of the tensor should be divisible by tensor parallel size"
117
+
118
+ dim_size[0] = dim_size[0] // world_size
119
+
120
+ output = torch.empty(dim_size, dtype=input_.dtype,
121
+ device=torch.cuda.current_device())
122
+ torch.distributed._reduce_scatter_base(output, input_.contiguous(),
123
+ group=get_tensor_model_parallel_group())
124
+ return output
125
+
126
+
127
+ class _CopyToModelParallelRegion(torch.autograd.Function):
128
+ """Pass the input to the model parallel region."""
129
+
130
+ @staticmethod
131
+ def symbolic(graph, input_):
132
+ return input_
133
+
134
+ @staticmethod
135
+ def forward(ctx, input_):
136
+ return input_
137
+
138
+ @staticmethod
139
+ def backward(ctx, grad_output):
140
+ return _reduce(grad_output)
141
+
142
+
143
+ class _ReduceFromModelParallelRegion(torch.autograd.Function):
144
+ """All-reduce the input from the model parallel region."""
145
+
146
+ @staticmethod
147
+ def symbolic(graph, input_):
148
+ return _reduce(input_)
149
+
150
+ @staticmethod
151
+ def forward(ctx, input_):
152
+ return _reduce(input_)
153
+
154
+ @staticmethod
155
+ def backward(ctx, grad_output):
156
+ return grad_output
157
+
158
+
159
+ class _ScatterToModelParallelRegion(torch.autograd.Function):
160
+ """Split the input and keep only the corresponding chuck to the rank."""
161
+
162
+ @staticmethod
163
+ def symbolic(graph, input_):
164
+ return _split_along_last_dim(input_)
165
+
166
+ @staticmethod
167
+ def forward(ctx, input_):
168
+ return _split_along_last_dim(input_)
169
+
170
+ @staticmethod
171
+ def backward(ctx, grad_output):
172
+ return _gather_along_last_dim(grad_output)
173
+
174
+
175
+ class _GatherFromModelParallelRegion(torch.autograd.Function):
176
+ """Gather the input from model parallel region and concatinate."""
177
+
178
+ @staticmethod
179
+ def symbolic(graph, input_):
180
+ return _gather_along_last_dim(input_)
181
+
182
+ @staticmethod
183
+ def forward(ctx, input_):
184
+ return _gather_along_last_dim(input_)
185
+
186
+ @staticmethod
187
+ def backward(ctx, grad_output):
188
+ return _split_along_last_dim(grad_output)
189
+
190
+
191
+ class _ScatterToSequenceParallelRegion(torch.autograd.Function):
192
+ """Split the input and keep only the corresponding chuck to the rank."""
193
+
194
+ @staticmethod
195
+ def symbolic(graph, input_):
196
+ return _split_along_first_dim(input_)
197
+
198
+ @staticmethod
199
+ def forward(ctx, input_):
200
+ return _split_along_first_dim(input_)
201
+
202
+ @staticmethod
203
+ def backward(ctx, grad_output):
204
+ return _gather_along_first_dim(grad_output)
205
+
206
+
207
+ class _GatherFromSequenceParallelRegion(torch.autograd.Function):
208
+ """Gather the input from sequence parallel region and concatinate."""
209
+
210
+ @staticmethod
211
+ def symbolic(graph, input_, tensor_parallel_output_grad=True):
212
+ return _gather_along_first_dim(input_)
213
+
214
+ @staticmethod
215
+ def forward(ctx, input_, tensor_parallel_output_grad=True):
216
+ ctx.tensor_parallel_output_grad = tensor_parallel_output_grad
217
+ return _gather_along_first_dim(input_)
218
+
219
+ @staticmethod
220
+ def backward(ctx, grad_output):
221
+ tensor_parallel_output_grad = ctx.tensor_parallel_output_grad
222
+
223
+ # If the computation graph after the gather operation is
224
+ # in the tensor parallel mode, output gradients need to reduce
225
+ # scattered and whereas if the computation is duplicated,
226
+ # output gradients need to be scattered.
227
+ if tensor_parallel_output_grad:
228
+ return _reduce_scatter_along_first_dim(grad_output), None
229
+ else:
230
+ return _split_along_first_dim(grad_output), None
231
+
232
+
233
+ class _ReduceScatterToSequenceParallelRegion(torch.autograd.Function):
234
+ """Reduce scatter the input from the model parallel region."""
235
+
236
+ @staticmethod
237
+ def symbolic(graph, input_):
238
+ return _reduce_scatter_along_first_dim(input_)
239
+
240
+ @staticmethod
241
+ def forward(ctx, input_):
242
+ return _reduce_scatter_along_first_dim(input_)
243
+
244
+ @staticmethod
245
+ def backward(ctx, grad_output):
246
+ return _gather_along_first_dim(grad_output)
247
+
248
+
249
+ # -----------------
250
+ # Helper functions.
251
+ # -----------------
252
+
253
+ def copy_to_tensor_model_parallel_region(input_):
254
+ return _CopyToModelParallelRegion.apply(input_)
255
+
256
+
257
+ def reduce_from_tensor_model_parallel_region(input_):
258
+ return _ReduceFromModelParallelRegion.apply(input_)
259
+
260
+
261
+ def scatter_to_tensor_model_parallel_region(input_):
262
+ return _ScatterToModelParallelRegion.apply(input_)
263
+
264
+
265
+ def gather_from_tensor_model_parallel_region(input_):
266
+ return _GatherFromModelParallelRegion.apply(input_)
267
+
268
+
269
+ def scatter_to_sequence_parallel_region(input_):
270
+ return _ScatterToSequenceParallelRegion.apply(input_)
271
+
272
+
273
+ def gather_from_sequence_parallel_region(input_, tensor_parallel_output_grad=True):
274
+ return _GatherFromSequenceParallelRegion.apply(input_, tensor_parallel_output_grad)
275
+
276
+
277
+ def reduce_scatter_to_sequence_parallel_region(input_):
278
+ return _ReduceScatterToSequenceParallelRegion.apply(input_)
279
+
@@ -0,0 +1,253 @@
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ # Parts of the code here are adapted from PyTorch
4
+ # repo: https://github.com/pytorch/pytorch
5
+
6
+ import contextlib
7
+
8
+ import torch
9
+ from torch import _C
10
+ from torch.cuda import _lazy_call, device as device_ctx_manager
11
+ from torch.utils.checkpoint import detach_variable
12
+
13
+ from megatron.core.parallel_state import (
14
+ get_data_parallel_rank,
15
+ get_tensor_model_parallel_group,
16
+ get_tensor_model_parallel_rank,
17
+ get_tensor_model_parallel_world_size,
18
+ )
19
+
20
+ from .utils import (
21
+ split_tensor_into_1d_equal_chunks,
22
+ gather_split_1d_tensor,
23
+ )
24
+
25
+ from megatron.core.utils import safely_set_viewless_tensor_data
26
+
27
+ # Default name for the model parallel rng tracker.
28
+ _MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
29
+
30
+
31
+ def _set_cuda_rng_state(new_state, device=-1):
32
+ """Sets the random number generator state of the current GPU.
33
+
34
+ Argumentss:
35
+ new_state (torch.ByteTensor): The desired state
36
+ This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
37
+ with a single change: the input state is not cloned. Cloning caused
38
+ major performance issues for +4 GPU cases.
39
+ """
40
+ if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
41
+ # older PyTorch
42
+ def cb():
43
+ with device_ctx_manager(device):
44
+ _C._cuda_setRNGState(new_state)
45
+ else:
46
+ # newer PyTorch
47
+ if device == -1:
48
+ device = torch.device('cuda')
49
+ elif isinstance(device, str):
50
+ device = torch.device(device)
51
+ elif isinstance(device, int):
52
+ device = torch.device('cuda', device)
53
+
54
+ def cb():
55
+ idx = device.index
56
+ if idx is None:
57
+ idx = torch.cuda.current_device()
58
+ default_generator = torch.cuda.default_generators[idx]
59
+ default_generator.set_state(new_state)
60
+
61
+ _lazy_call(cb)
62
+
63
+
64
+
65
+ class CudaRNGStatesTracker:
66
+ """Tracker for the cuda RNG states.
67
+
68
+ Using the `add` method, a cuda rng state is initialized based on
69
+ the input `seed` and is assigned to `name`. Later, by forking the
70
+ rng state, we can perform operations and return to our starting
71
+ cuda state.
72
+ """
73
+
74
+ def __init__(self):
75
+ # Map from a string name to the cuda rng state.
76
+ self.states_ = {}
77
+ # Seeds are just for book keeping and ensure no seed is set twice.
78
+ self.seeds_ = set()
79
+
80
+ def reset(self):
81
+ """Set to the initial state (no tracker)."""
82
+ self.states_ = {}
83
+ self.seeds_ = set()
84
+
85
+ def get_states(self):
86
+ """Get rng states. Copy the dictionary so we have direct
87
+ pointers to the states, not just a pointer to the dictionary."""
88
+ states = {}
89
+ for name in self.states_:
90
+ states[name] = self.states_[name]
91
+ return states
92
+
93
+ def set_states(self, states):
94
+ """Set the rng states. For efficiency purposes, we do not check
95
+ the size of seed for compatibility."""
96
+ self.states_ = states
97
+
98
+ def add(self, name, seed):
99
+ """Track the rng state."""
100
+ # Check seed is not already used.
101
+ if seed in self.seeds_:
102
+ raise Exception('seed {} already exists'.format(seed))
103
+ self.seeds_.add(seed)
104
+ # Check that state is not already defined.
105
+ if name in self.states_:
106
+ raise Exception('cuda rng state {} already exists'.format(name))
107
+ # Get the current rng state.
108
+ orig_rng_state = torch.cuda.get_rng_state()
109
+ # Set the new state and store it.
110
+ torch.cuda.manual_seed(seed)
111
+ self.states_[name] = torch.cuda.get_rng_state()
112
+ # Reset rng state to what it was.
113
+ _set_cuda_rng_state(orig_rng_state)
114
+
115
+ @contextlib.contextmanager
116
+ def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
117
+ """Fork the cuda rng state, perform operations, and exit with
118
+ the original state."""
119
+ # Check if we have added the state
120
+ if name not in self.states_:
121
+ raise Exception('cuda rng state {} is not added'.format(name))
122
+ # Store current rng state.
123
+ orig_cuda_rng_state = torch.cuda.get_rng_state()
124
+ # Set rng state to the desired one
125
+ _set_cuda_rng_state(self.states_[name])
126
+ # Do the stuff we wanted to do.
127
+ try:
128
+ yield
129
+ finally:
130
+ # Update the current rng state for later use.
131
+ self.states_[name] = torch.cuda.get_rng_state()
132
+ # And set the state to the original state we started with.
133
+ _set_cuda_rng_state(orig_cuda_rng_state)
134
+
135
+
136
+ # RNG tracker object.
137
+ _CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
138
+
139
+
140
+ def get_cuda_rng_tracker():
141
+ """Get cuda rng tracker."""
142
+ return _CUDA_RNG_STATE_TRACKER
143
+
144
+
145
+ def model_parallel_cuda_manual_seed(seed):
146
+ """Initialize model parallel cuda seed.
147
+
148
+ This function should be called after the model parallel is
149
+ initialized. Also, no torch.cuda.manual_seed should be called
150
+ after this function. Basically, this is replacement for that
151
+ function.
152
+ Two set of RNG states are tracked:
153
+ default state: This is for data parallelism and is the same among a
154
+ set of model parallel GPUs but different across
155
+ different model paralle groups. This is used for
156
+ example for dropout in the non-tensor-model-parallel regions.
157
+ tensor-model-parallel state: This state is different among a set of model
158
+ parallel GPUs, but the same across data parallel
159
+ groups. This is used for example for dropout in
160
+ model parallel regions.
161
+ """
162
+ # 2718 is just for fun and any POSITIVE value will work.
163
+ offset = seed + 2718
164
+ tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank()
165
+ # Data parallel gets the original seed.
166
+ data_parallel_seed = seed
167
+
168
+ _CUDA_RNG_STATE_TRACKER.reset()
169
+ # Set the default state.
170
+ torch.cuda.manual_seed(data_parallel_seed)
171
+ # and model parallel state.
172
+ _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME,
173
+ tensor_model_parallel_seed)
174
+
175
+
176
+ class CheckpointFunction(torch.autograd.Function):
177
+ """This function is adapted from torch.utils.checkpoint with
178
+ two main changes:
179
+ 1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
180
+ 2) the states in the model parallel tracker are also properly
181
+ tracked/set/reset.
182
+ """
183
+ @staticmethod
184
+ def forward(ctx, run_function, distribute_saved_activations, *args):
185
+ ctx.run_function = run_function
186
+ ctx.distribute_saved_activations \
187
+ = distribute_saved_activations
188
+
189
+ # Copy the rng states.
190
+ ctx.fwd_cpu_rng_state = torch.get_rng_state()
191
+ ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
192
+ ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
193
+
194
+ with torch.no_grad():
195
+ outputs = run_function(*args)
196
+
197
+ # Divide hidden states across model parallel group and only keep
198
+ # the chunk corresponding to the current rank.
199
+ if distribute_saved_activations:
200
+ ctx.input_0_shape = args[0].data.shape
201
+ safely_set_viewless_tensor_data(
202
+ args[0],
203
+ split_tensor_into_1d_equal_chunks(args[0].data, new_buffer=True))
204
+
205
+ # Store everything.
206
+ ctx.save_for_backward(*args)
207
+
208
+ return outputs
209
+
210
+ @staticmethod
211
+ def backward(ctx, *args):
212
+ if not torch.autograd._is_checkpoint_valid():
213
+ raise RuntimeError("Checkpointing is not compatible with .grad(), "
214
+ "please use .backward() if possible")
215
+ inputs = ctx.saved_tensors
216
+ if ctx.distribute_saved_activations:
217
+ safely_set_viewless_tensor_data(
218
+ inputs[0],
219
+ gather_split_1d_tensor(inputs[0].data).view(ctx.input_0_shape))
220
+
221
+ # Store the current states.
222
+ bwd_cpu_rng_state = torch.get_rng_state()
223
+ bwd_cuda_rng_state = torch.cuda.get_rng_state()
224
+ bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
225
+
226
+ # Set the states to what it used to be before the forward pass.
227
+ torch.set_rng_state(ctx.fwd_cpu_rng_state)
228
+ _set_cuda_rng_state(ctx.fwd_cuda_rng_state)
229
+ get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
230
+
231
+ # Compute the forward pass.
232
+ detached_inputs = detach_variable(inputs)
233
+ with torch.enable_grad():
234
+ outputs = ctx.run_function(*detached_inputs)
235
+
236
+ # Set the states back to what it was at the start of this function.
237
+ torch.set_rng_state(bwd_cpu_rng_state)
238
+ _set_cuda_rng_state(bwd_cuda_rng_state)
239
+ get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
240
+
241
+ if isinstance(outputs, torch.Tensor):
242
+ outputs = (outputs,)
243
+ torch.autograd.backward(outputs, args)
244
+ grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp
245
+ for inp in detached_inputs)
246
+ return (None, None) + grads
247
+
248
+
249
+ def checkpoint(function, distribute_saved_activations, *args):
250
+ """Checkpoint a model or part of the model.
251
+ This has been directly copied from torch.utils.checkpoint."""
252
+ return CheckpointFunction.apply(function,
253
+ distribute_saved_activations, *args)
@@ -0,0 +1,108 @@
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import torch
4
+ from typing import List, Sequence
5
+
6
+ from megatron.core.utils import divide
7
+ from megatron.core import parallel_state
8
+
9
+ def split_tensor_along_last_dim(
10
+ tensor: torch.Tensor,
11
+ num_partitions: int,
12
+ contiguous_split_chunks: bool = False,
13
+ ) -> List[torch.Tensor]:
14
+ """ Split a tensor along its last dimension.
15
+
16
+ Arguments:
17
+ tensor: input tensor.
18
+ num_partitions: number of partitions to split the tensor
19
+ contiguous_split_chunks: If True, make each chunk contiguous
20
+ in memory.
21
+
22
+ Returns:
23
+ A list of Tensors
24
+ """
25
+ # Get the size and dimension.
26
+ last_dim = tensor.dim() - 1
27
+ last_dim_size = divide(tensor.size()[last_dim], num_partitions)
28
+ # Split.
29
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
30
+ # Note: torch.split does not create contiguous tensors by default.
31
+ if contiguous_split_chunks:
32
+ return tuple(chunk.contiguous() for chunk in tensor_list)
33
+
34
+ return tensor_list
35
+
36
+ def split_tensor_into_1d_equal_chunks(tensor, new_buffer=False):
37
+ """ Break a tensor into equal 1D chunks across tensor parallel ranks.
38
+
39
+ Returns a Tensor or View with this rank's portion of the data.
40
+
41
+ Arguments:
42
+ tensor: The tensor to split
43
+
44
+ Keyword Arguments:
45
+ new_buffer (bool): If True, returns a new Tensor.
46
+ If False, returns a view into the existing Tensor.
47
+ Default is False
48
+
49
+ """
50
+ partition_size = torch.numel(tensor) // \
51
+ parallel_state.get_tensor_model_parallel_world_size()
52
+ start_index = partition_size * parallel_state.get_tensor_model_parallel_rank()
53
+ end_index = start_index + partition_size
54
+ if new_buffer:
55
+ data = torch.empty(partition_size, dtype=tensor.dtype,
56
+ device=torch.cuda.current_device(),
57
+ requires_grad=False)
58
+ data.copy_(tensor.view(-1)[start_index:end_index])
59
+ else:
60
+ data = tensor.view(-1)[start_index:end_index]
61
+ return data
62
+
63
+
64
+ def gather_split_1d_tensor(tensor):
65
+ """ Opposite of split_tensor_into_1d_equal_chunks. Gather values from tensor
66
+ model parallel ranks.
67
+
68
+ Returns a new Tensor with the gathered data.
69
+
70
+ Arguments:
71
+ tensor: A Tensor or view of this rank's portion of the data.
72
+ """
73
+ numel_gathered = torch.numel(tensor) * \
74
+ parallel_state.get_tensor_model_parallel_world_size()
75
+ gathered = torch.empty(numel_gathered, dtype=tensor.dtype,
76
+ device=torch.cuda.current_device(),
77
+ requires_grad=False)
78
+ # TODO: This API is experimental in pytorch (as of Feb 2022) and
79
+ # this might break in future pytorch releases. We chose this API
80
+ # as opposed to torch.distributed.all_gather for efficiency reasons.
81
+ # This API calls directly NCCL all-gather versus the former does
82
+ # internal copies and can potentially cause slow down.
83
+ torch.distributed._all_gather_base(gathered, tensor,
84
+ group=parallel_state.get_tensor_model_parallel_group())
85
+ return gathered
86
+
87
+
88
+ class VocabUtility:
89
+ """ Split the vocabulary into `world_size` chunks and return the first
90
+ and last index of the vocabulary belonging to the `rank`
91
+ partition: Note that indices in [fist, last)
92
+
93
+ """
94
+
95
+ @staticmethod
96
+ def vocab_range_from_per_partition_vocab_size(
97
+ per_partition_vocab_size: int, rank, world_size: int
98
+ ) -> Sequence[int]:
99
+ index_f = rank * per_partition_vocab_size
100
+ index_l = index_f + per_partition_vocab_size
101
+ return index_f, index_l
102
+
103
+ @staticmethod
104
+ def vocab_range_from_global_vocab_size(global_vocab_size: int, rank: int, world_size: int) -> Sequence[int]:
105
+ per_partition_vocab_size = divide(global_vocab_size, world_size)
106
+ return VocabUtility.vocab_range_from_per_partition_vocab_size(
107
+ per_partition_vocab_size, rank, world_size
108
+ )