nshtrainer 0.14.2__py3-none-any.whl → 0.14.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nshtrainer/_experimental/__init__.py +1 -2
- nshtrainer/trainer/signal_connector.py +3 -1
- {nshtrainer-0.14.2.dist-info → nshtrainer-0.14.4.dist-info}/METADATA +1 -1
- {nshtrainer-0.14.2.dist-info → nshtrainer-0.14.4.dist-info}/RECORD +5 -8
- nshtrainer/_experimental/flops/__init__.py +0 -48
- nshtrainer/_experimental/flops/flop_counter.py +0 -787
- nshtrainer/_experimental/flops/module_tracker.py +0 -140
- {nshtrainer-0.14.2.dist-info → nshtrainer-0.14.4.dist-info}/WHEEL +0 -0
|
@@ -1,2 +1 @@
|
|
|
1
|
-
from .
|
|
2
|
-
from .flops import measure_flops as measure_flops
|
|
1
|
+
from lightning.fabric.utilities.throughput import measure_flops as measure_flops
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import os
|
|
3
|
+
import platform
|
|
3
4
|
import re
|
|
4
5
|
import signal
|
|
5
6
|
import subprocess
|
|
@@ -25,6 +26,7 @@ log = logging.getLogger(__name__)
|
|
|
25
26
|
|
|
26
27
|
_SIGNUM = int | signal.Signals
|
|
27
28
|
_HANDLER: TypeAlias = Callable[[_SIGNUM, FrameType], Any] | int | signal.Handlers | None
|
|
29
|
+
_IS_WINDOWS = platform.system() == "Windows"
|
|
28
30
|
|
|
29
31
|
|
|
30
32
|
def _resolve_requeue_signals():
|
|
@@ -57,7 +59,7 @@ class _SignalConnector(_LightningSignalConnector):
|
|
|
57
59
|
handlers: list[_HANDLER],
|
|
58
60
|
replace_existing: bool = False,
|
|
59
61
|
):
|
|
60
|
-
if
|
|
62
|
+
if _IS_WINDOWS:
|
|
61
63
|
log.info(
|
|
62
64
|
f"Signal {signum.name} has no handlers or is not supported on Windows."
|
|
63
65
|
)
|
|
@@ -2,10 +2,7 @@ nshtrainer/__init__.py,sha256=39loiLLXbaGiozEsAn8mPHopxaPsek8JsgR9DD2gxtY,583
|
|
|
2
2
|
nshtrainer/_checkpoint/loader.py,sha256=DSaNR8194kWon4O1svslNsCcN_8vlyLbF0LNCPfUpzI,13789
|
|
3
3
|
nshtrainer/_checkpoint/metadata.py,sha256=onmetLp5eKbA86abq1PTkwAOO7bWj7Pa1EGUjl2TEjQ,5153
|
|
4
4
|
nshtrainer/_checkpoint/saver.py,sha256=DkbCH0YeOJ71m32vAARiQdGBf0hvwwdoAV8LOFGy-0Y,1428
|
|
5
|
-
nshtrainer/_experimental/__init__.py,sha256=
|
|
6
|
-
nshtrainer/_experimental/flops/__init__.py,sha256=edo9Ez3LlrnxkNRX9W6YBhPkRPKYGLpkpnl5gx7sEX8,1550
|
|
7
|
-
nshtrainer/_experimental/flops/flop_counter.py,sha256=-sL0Fy6poXa__hyzUMdZScjPULp4coQELQpPU6p6dXU,25736
|
|
8
|
-
nshtrainer/_experimental/flops/module_tracker.py,sha256=bUL-IRTd0aF_DwmXkZjHZAA31p4ZEhyqhc26XWKQUUY,4922
|
|
5
|
+
nshtrainer/_experimental/__init__.py,sha256=pEXPyI184UuDHvfh4p9Kg9nQZQZI41e4_HvNd4BK-yg,81
|
|
9
6
|
nshtrainer/callbacks/__init__.py,sha256=4qocBDzQbLLhhbIEfvbA3SQB_Dy9ZJH7keMwPay-ZS8,2359
|
|
10
7
|
nshtrainer/callbacks/_throughput_monitor_callback.py,sha256=aJo_11rc4lo0IYOd-kHmPDtzdC4ctgXyRudkRJqH4m4,23184
|
|
11
8
|
nshtrainer/callbacks/actsave.py,sha256=qbnaKts4_dvjPeAaPtv7Ds12_vEWzaHUfg_--49NB9I,4041
|
|
@@ -78,7 +75,7 @@ nshtrainer/scripts/find_packages.py,sha256=FbdlfmAefttFSMfaT0A46a-oHLP_ioaQKihwB
|
|
|
78
75
|
nshtrainer/trainer/__init__.py,sha256=P2rmr8oBVTHk-HJHYPcUwWqDEArMbPR4_rPpATbWK3E,40
|
|
79
76
|
nshtrainer/trainer/_runtime_callback.py,sha256=sd2cUdRJG-UCdQr9ruZvEYpNGNF1t2W2fuxwwVlQD9E,4164
|
|
80
77
|
nshtrainer/trainer/checkpoint_connector.py,sha256=F2tkHogbMAa5U7335sm77sZBkjEDa5v46XbJCH9Mg6c,2167
|
|
81
|
-
nshtrainer/trainer/signal_connector.py,sha256=
|
|
78
|
+
nshtrainer/trainer/signal_connector.py,sha256=2EzkVktlasl8PgWAKNLDZRUMY__gRlDy1HdinAU-tfU,10740
|
|
82
79
|
nshtrainer/trainer/trainer.py,sha256=M97phnALfG18VxkMLoDr5AKFf4UaPBdc6S2BghdBtas,17103
|
|
83
80
|
nshtrainer/util/_environment_info.py,sha256=yPtAbgjCY4tkvh5wp9sjNsF0Z45TYwzEAM_N2_b5BbY,23123
|
|
84
81
|
nshtrainer/util/_useful_types.py,sha256=dwZokFkIe7M5i2GR3nQ9A1lhGw06DMAFfH5atyquqSA,8000
|
|
@@ -87,6 +84,6 @@ nshtrainer/util/seed.py,sha256=Or2wMPsnQxfnZ2xfBiyMcHFIUt3tGTNeMMyOEanCkqs,280
|
|
|
87
84
|
nshtrainer/util/slurm.py,sha256=rofIU26z3SdL79SF45tNez6juou1cyDLz07oXEZb9Hg,1566
|
|
88
85
|
nshtrainer/util/typed.py,sha256=NGuDkDzFlc1fAoaXjOFZVbmj0mRFjsQi1E_hPa7Bn5U,128
|
|
89
86
|
nshtrainer/util/typing_utils.py,sha256=8ptjSSLZxlmy4FY6lzzkoGoF5fGNClo8-B_c0XHQaNU,385
|
|
90
|
-
nshtrainer-0.14.
|
|
91
|
-
nshtrainer-0.14.
|
|
92
|
-
nshtrainer-0.14.
|
|
87
|
+
nshtrainer-0.14.4.dist-info/METADATA,sha256=Xg7ON7o-nR8N-4C8E59k2jsvIaleKPDas5VpdTVyr6s,860
|
|
88
|
+
nshtrainer-0.14.4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
89
|
+
nshtrainer-0.14.4.dist-info/RECORD,,
|
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
from collections.abc import Callable
|
|
2
|
-
|
|
3
|
-
import torch
|
|
4
|
-
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_1
|
|
5
|
-
|
|
6
|
-
MEASURE_FLOPS_AVAILABLE = _TORCH_GREATER_EQUAL_2_1
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def measure_flops(
|
|
10
|
-
forward_fn: Callable[[], torch.Tensor],
|
|
11
|
-
loss_fn: Callable[[torch.Tensor], torch.Tensor] | None = None,
|
|
12
|
-
display: bool = True,
|
|
13
|
-
) -> int:
|
|
14
|
-
"""Utility to compute the total number of FLOPs used by a module during training or during inference.
|
|
15
|
-
|
|
16
|
-
It's recommended to create a meta-device model for this:
|
|
17
|
-
|
|
18
|
-
Example::
|
|
19
|
-
|
|
20
|
-
with torch.device("meta"):
|
|
21
|
-
model = MyModel()
|
|
22
|
-
x = torch.randn(2, 32)
|
|
23
|
-
|
|
24
|
-
model_fwd = lambda: model(x)
|
|
25
|
-
fwd_flops = measure_flops(model, model_fwd)
|
|
26
|
-
|
|
27
|
-
model_loss = lambda y: y.sum()
|
|
28
|
-
fwd_and_bwd_flops = measure_flops(model, model_fwd, model_loss)
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
model: The model whose FLOPs should be measured.
|
|
32
|
-
forward_fn: A function that runs ``forward`` on the model and returns the result.
|
|
33
|
-
loss_fn: A function that computes the loss given the ``forward_fn`` output. If provided, the loss and `backward`
|
|
34
|
-
FLOPs will be included in the result.
|
|
35
|
-
|
|
36
|
-
"""
|
|
37
|
-
if not MEASURE_FLOPS_AVAILABLE:
|
|
38
|
-
raise ImportError("`measure_flops` requires PyTorch >= 2.1.")
|
|
39
|
-
|
|
40
|
-
from .flop_counter import FlopCounterMode
|
|
41
|
-
|
|
42
|
-
flop_counter = FlopCounterMode(display=display)
|
|
43
|
-
with flop_counter:
|
|
44
|
-
if loss_fn is None:
|
|
45
|
-
forward_fn()
|
|
46
|
-
else:
|
|
47
|
-
loss_fn(forward_fn()).backward()
|
|
48
|
-
return flop_counter.get_total_flops()
|
|
@@ -1,787 +0,0 @@
|
|
|
1
|
-
# type: ignore
|
|
2
|
-
import warnings
|
|
3
|
-
from collections import defaultdict
|
|
4
|
-
from functools import wraps
|
|
5
|
-
from math import prod
|
|
6
|
-
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
|
7
|
-
|
|
8
|
-
import torch
|
|
9
|
-
from torch._decomp import register_decomposition
|
|
10
|
-
from torch.utils._python_dispatch import TorchDispatchMode
|
|
11
|
-
from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
|
|
12
|
-
|
|
13
|
-
from .module_tracker import ModuleTracker
|
|
14
|
-
|
|
15
|
-
__all__ = ["FlopCounterMode", "register_flop_formula"]
|
|
16
|
-
|
|
17
|
-
aten = torch.ops.aten
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def get_shape(i):
|
|
21
|
-
if isinstance(i, torch.Tensor):
|
|
22
|
-
return i.shape
|
|
23
|
-
return i
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
flop_registry: Dict[Any, Any] = {}
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def shape_wrapper(f):
|
|
30
|
-
@wraps(f)
|
|
31
|
-
def nf(*args, out_val=None, **kwargs):
|
|
32
|
-
args, kwargs, out_shape = tree_map(get_shape, (args, kwargs, out_val))
|
|
33
|
-
return f(*args, out_shape=out_shape, **kwargs)
|
|
34
|
-
|
|
35
|
-
return nf
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
def register_flop_formula(targets, get_raw=False):
|
|
39
|
-
def register_fun(flop_formula):
|
|
40
|
-
if not get_raw:
|
|
41
|
-
flop_formula = shape_wrapper(flop_formula)
|
|
42
|
-
register_decomposition(targets, registry=flop_registry, unsafe=True)(
|
|
43
|
-
flop_formula
|
|
44
|
-
)
|
|
45
|
-
return flop_formula
|
|
46
|
-
|
|
47
|
-
return register_fun
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
@register_flop_formula(aten.mm)
|
|
51
|
-
def mm_flop(a_shape, b_shape, *args, out_shape=None, **kwargs) -> int:
|
|
52
|
-
"""Count flops for matmul."""
|
|
53
|
-
# Inputs should be a list of length 2.
|
|
54
|
-
# Inputs contains the shapes of two matrices.
|
|
55
|
-
m, k = a_shape
|
|
56
|
-
k2, n = b_shape
|
|
57
|
-
assert k == k2
|
|
58
|
-
# NB(chilli): Should be 2 * k - 1 technically for FLOPs.
|
|
59
|
-
return m * n * 2 * k
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
@register_flop_formula(aten.addmm)
|
|
63
|
-
def addmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int:
|
|
64
|
-
"""Count flops for addmm."""
|
|
65
|
-
return mm_flop(a_shape, b_shape)
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
@register_flop_formula(aten.bmm)
|
|
69
|
-
def bmm_flop(a_shape, b_shape, out_shape=None, **kwargs) -> int:
|
|
70
|
-
"""Count flops for the bmm operation."""
|
|
71
|
-
# Inputs should be a list of length 2.
|
|
72
|
-
# Inputs contains the shapes of two tensor.
|
|
73
|
-
b, m, k = a_shape
|
|
74
|
-
b2, k2, n = b_shape
|
|
75
|
-
assert b == b2
|
|
76
|
-
assert k == k2
|
|
77
|
-
# NB(chilli): Should be 2 * k - 1 technically for FLOPs.
|
|
78
|
-
flop = b * m * n * 2 * k
|
|
79
|
-
return flop
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
@register_flop_formula(aten.baddbmm)
|
|
83
|
-
def baddbmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int:
|
|
84
|
-
"""Count flops for the baddbmm operation."""
|
|
85
|
-
# Inputs should be a list of length 3.
|
|
86
|
-
# Inputs contains the shapes of three tensors.
|
|
87
|
-
return bmm_flop(a_shape, b_shape)
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
def conv_flop_count(
|
|
91
|
-
x_shape: List[int],
|
|
92
|
-
w_shape: List[int],
|
|
93
|
-
out_shape: List[int],
|
|
94
|
-
transposed: bool = False,
|
|
95
|
-
) -> int:
|
|
96
|
-
"""Count flops for convolution.
|
|
97
|
-
|
|
98
|
-
Note only multiplication is
|
|
99
|
-
counted. Computation for bias are ignored.
|
|
100
|
-
Flops for a transposed convolution are calculated as
|
|
101
|
-
flops = (x_shape[2:] * prod(w_shape) * batch_size).
|
|
102
|
-
Args:
|
|
103
|
-
x_shape (list(int)): The input shape before convolution.
|
|
104
|
-
w_shape (list(int)): The filter shape.
|
|
105
|
-
out_shape (list(int)): The output shape after convolution.
|
|
106
|
-
transposed (bool): is the convolution transposed
|
|
107
|
-
Returns:
|
|
108
|
-
int: the number of flops
|
|
109
|
-
"""
|
|
110
|
-
|
|
111
|
-
batch_size = x_shape[0]
|
|
112
|
-
conv_shape = (x_shape if transposed else out_shape)[2:]
|
|
113
|
-
c_out, c_in, *filter_size = w_shape
|
|
114
|
-
|
|
115
|
-
"""
|
|
116
|
-
General idea here is that for a regular conv, for each point in the output
|
|
117
|
-
spatial dimension we convolve the filter with something (hence
|
|
118
|
-
`prod(conv_shape) * prod(filter_size)` ops). Then, this gets multiplied by
|
|
119
|
-
1. batch_size, 2. the cross product of input and weight channels.
|
|
120
|
-
|
|
121
|
-
For the transpose, it's not each point in the *output* spatial dimension but
|
|
122
|
-
each point in the *input* spatial dimension.
|
|
123
|
-
"""
|
|
124
|
-
# NB(chilli): I don't think this properly accounts for padding :think:
|
|
125
|
-
# NB(chilli): Should be 2 * c_in - 1 technically for FLOPs.
|
|
126
|
-
flop = prod(conv_shape) * prod(filter_size) * batch_size * c_out * c_in * 2
|
|
127
|
-
return flop
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
@register_flop_formula([aten.convolution, aten._convolution])
|
|
131
|
-
def conv_flop(
|
|
132
|
-
x_shape,
|
|
133
|
-
w_shape,
|
|
134
|
-
_bias,
|
|
135
|
-
_stride,
|
|
136
|
-
_padding,
|
|
137
|
-
_dilation,
|
|
138
|
-
transposed,
|
|
139
|
-
*args,
|
|
140
|
-
out_shape=None,
|
|
141
|
-
**kwargs,
|
|
142
|
-
) -> int:
|
|
143
|
-
"""Count flops for convolution."""
|
|
144
|
-
return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed)
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
@register_flop_formula(aten.convolution_backward)
|
|
148
|
-
def conv_backward_flop(
|
|
149
|
-
grad_out_shape,
|
|
150
|
-
x_shape,
|
|
151
|
-
w_shape,
|
|
152
|
-
_bias,
|
|
153
|
-
_stride,
|
|
154
|
-
_padding,
|
|
155
|
-
_dilation,
|
|
156
|
-
transposed,
|
|
157
|
-
_output_padding,
|
|
158
|
-
_groups,
|
|
159
|
-
output_mask,
|
|
160
|
-
out_shape,
|
|
161
|
-
) -> int:
|
|
162
|
-
def t(shape):
|
|
163
|
-
return [shape[1], shape[0]] + list(shape[2:])
|
|
164
|
-
|
|
165
|
-
flop_count = 0
|
|
166
|
-
|
|
167
|
-
"""
|
|
168
|
-
Let's say we have a regular 1D conv
|
|
169
|
-
{A, B, C} [inp]
|
|
170
|
-
{i, j} [weight]
|
|
171
|
-
=> (conv)
|
|
172
|
-
{Ai + Bj, Bi + Cj} [out]
|
|
173
|
-
|
|
174
|
-
And as a reminder, the transposed conv of the above is
|
|
175
|
-
=> {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out]
|
|
176
|
-
|
|
177
|
-
For the backwards of conv, we now have
|
|
178
|
-
{D, E} [grad_out]
|
|
179
|
-
{A, B, C} [inp]
|
|
180
|
-
{i, j} [weight]
|
|
181
|
-
|
|
182
|
-
# grad_inp as conv_transpose(grad_out, weight)
|
|
183
|
-
Let's first compute grad_inp. To do so, we can simply look at all the
|
|
184
|
-
multiplications that each element of inp is involved in. For example, A is
|
|
185
|
-
only involved in the first element of the output (and thus only depends upon
|
|
186
|
-
D in grad_out), and C is only involved in the last element of the output
|
|
187
|
-
(and thus only depends upon E in grad_out)
|
|
188
|
-
|
|
189
|
-
{Di, Dj + Ei, Ej} [grad_inp]
|
|
190
|
-
|
|
191
|
-
Note that this corresponds to the below conv_transpose. This gives us the
|
|
192
|
-
output_mask[0] branch, which is grad_inp.
|
|
193
|
-
|
|
194
|
-
{D, E} [inp (grad_out)]
|
|
195
|
-
{i, j} [weight]
|
|
196
|
-
=> (conv_transpose)
|
|
197
|
-
{Di, Dj + Ei, Ej} [out (grad_inp)]
|
|
198
|
-
|
|
199
|
-
I leave the fact that grad_inp for a transposed conv is just conv(grad_out,
|
|
200
|
-
weight) as an exercise for the reader.
|
|
201
|
-
|
|
202
|
-
# grad_weight as conv(inp, grad_out)
|
|
203
|
-
To compute grad_weight, we again look at the terms in the output, which as
|
|
204
|
-
a reminder is:
|
|
205
|
-
=> {Ai + Bj, Bi + Cj} [out]
|
|
206
|
-
=> {D, E} [grad_out]
|
|
207
|
-
If we manually compute the gradient for the weights, we see it's
|
|
208
|
-
{AD + BE, BD + CE} [grad_weight]
|
|
209
|
-
|
|
210
|
-
This corresponds to the below conv
|
|
211
|
-
{A, B, C} [inp]
|
|
212
|
-
{D, E} [weight (grad_out)]
|
|
213
|
-
=> (conv)
|
|
214
|
-
{AD + BE, BD + CE} [out (grad_weight)]
|
|
215
|
-
|
|
216
|
-
# grad_weight of transposed conv as conv(grad_out, inp)
|
|
217
|
-
As a reminder, the terms of the output of a transposed conv are:
|
|
218
|
-
=> {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out]
|
|
219
|
-
=> {D, E, F, G} [grad_out]
|
|
220
|
-
|
|
221
|
-
Manually computing the gradient for the weights, we see it's
|
|
222
|
-
{AD + BE + CF, AE + BF + CG} [grad_weight]
|
|
223
|
-
|
|
224
|
-
This corresponds to the below conv
|
|
225
|
-
{D, E, F, G} [inp (grad_out)]
|
|
226
|
-
{A, B, C} [weight (inp)]
|
|
227
|
-
=> (conv)
|
|
228
|
-
{AD + BE + CF, AE + BF + CG} [out (grad_weight)]
|
|
229
|
-
|
|
230
|
-
For the full backwards formula, there are also some details involving
|
|
231
|
-
transpose of the batch/channel dimensions and groups, but I skip those for
|
|
232
|
-
the sake of brevity (and they're pretty similar to matmul backwards)
|
|
233
|
-
|
|
234
|
-
Check [conv backwards decomposition as conv forwards]
|
|
235
|
-
"""
|
|
236
|
-
# grad_inp as conv_transpose(grad_out, weight)
|
|
237
|
-
if output_mask[0]:
|
|
238
|
-
grad_input_shape = get_shape(out_shape[0])
|
|
239
|
-
flop_count += conv_flop_count(
|
|
240
|
-
grad_out_shape, w_shape, grad_input_shape, not transposed
|
|
241
|
-
)
|
|
242
|
-
|
|
243
|
-
if output_mask[1]:
|
|
244
|
-
grad_weight_shape = get_shape(out_shape[1])
|
|
245
|
-
if transposed:
|
|
246
|
-
# grad_weight of transposed conv as conv(grad_out, inp)
|
|
247
|
-
flop_count += conv_flop_count(
|
|
248
|
-
t(grad_out_shape), t(x_shape), t(grad_weight_shape), transposed=False
|
|
249
|
-
)
|
|
250
|
-
else:
|
|
251
|
-
# grad_weight as conv(inp, grad_out)
|
|
252
|
-
flop_count += conv_flop_count(
|
|
253
|
-
t(x_shape), t(grad_out_shape), t(grad_weight_shape), transposed=False
|
|
254
|
-
)
|
|
255
|
-
|
|
256
|
-
return flop_count
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
def sdpa_flop_count(query_shape, key_shape, value_shape):
|
|
260
|
-
"""
|
|
261
|
-
Count flops for self-attention.
|
|
262
|
-
|
|
263
|
-
NB: We can assume that value_shape == key_shape
|
|
264
|
-
"""
|
|
265
|
-
b, h, s_q, d_q = query_shape
|
|
266
|
-
_b2, _h2, s_k, _d2 = key_shape
|
|
267
|
-
_b3, _h3, _s3, d_v = value_shape
|
|
268
|
-
assert (
|
|
269
|
-
b == _b2 == _b3 and h == _h2 == _h3 and d_q == _d2 and s_k == _s3 and d_q == _d2
|
|
270
|
-
)
|
|
271
|
-
total_flops = 0
|
|
272
|
-
# q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k]
|
|
273
|
-
total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k))
|
|
274
|
-
# scores: [b, h, s_q, s_k] @ v: [b, h, s_k, d_v] -> out: [b, h, s_q, d_v]
|
|
275
|
-
total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_v))
|
|
276
|
-
return total_flops
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
@register_flop_formula(
|
|
280
|
-
[
|
|
281
|
-
aten._scaled_dot_product_efficient_attention,
|
|
282
|
-
aten._scaled_dot_product_flash_attention,
|
|
283
|
-
]
|
|
284
|
-
)
|
|
285
|
-
def sdpa_flop(
|
|
286
|
-
query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs
|
|
287
|
-
) -> int:
|
|
288
|
-
"""Count flops for self-attention."""
|
|
289
|
-
# NB: We aren't accounting for causal attention here
|
|
290
|
-
return sdpa_flop_count(query_shape, key_shape, value_shape)
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
def _unpack_flash_attention_nested_shapes(
|
|
294
|
-
*,
|
|
295
|
-
query,
|
|
296
|
-
key,
|
|
297
|
-
value,
|
|
298
|
-
grad_out=None,
|
|
299
|
-
cum_seq_q,
|
|
300
|
-
cum_seq_k,
|
|
301
|
-
max_q,
|
|
302
|
-
max_k,
|
|
303
|
-
) -> Iterator[
|
|
304
|
-
Tuple[Tuple[int, ...], Tuple[int, ...], Tuple[int, ...], Optional[Tuple[int, ...]]]
|
|
305
|
-
]:
|
|
306
|
-
"""
|
|
307
|
-
Given inputs to a flash_attention_(forward|backward) kernel, this will handle behavior for
|
|
308
|
-
NestedTensor inputs by effectively unbinding the NestedTensor and yielding the shapes for
|
|
309
|
-
each batch element.
|
|
310
|
-
|
|
311
|
-
In the case that this isn't a NestedTensor kernel, then it just yields the original shapes.
|
|
312
|
-
"""
|
|
313
|
-
if cum_seq_q is not None:
|
|
314
|
-
# This means we should be dealing with a Nested Jagged Tensor query.
|
|
315
|
-
# The inputs will have shape (sum(sequence len), heads, dimension)
|
|
316
|
-
# In comparison, non-Nested inputs have shape (batch, heads, sequence len, dimension)
|
|
317
|
-
# To deal with this, we convert to a shape of (batch, heads, max_seq_len, dimension)
|
|
318
|
-
# So the flops calculation in this case is an overestimate of the actual flops.
|
|
319
|
-
assert len(key.shape) == 3
|
|
320
|
-
assert len(value.shape) == 3
|
|
321
|
-
assert grad_out is None or grad_out.shape == query.shape
|
|
322
|
-
_, h_q, d_q = query.shape
|
|
323
|
-
_, h_k, d_k = key.shape
|
|
324
|
-
_, h_v, d_v = value.shape
|
|
325
|
-
assert cum_seq_q is not None
|
|
326
|
-
assert cum_seq_k is not None
|
|
327
|
-
assert cum_seq_q.shape == cum_seq_k.shape
|
|
328
|
-
seq_q_lengths = (cum_seq_q[1:] - cum_seq_q[:-1]).tolist()
|
|
329
|
-
seq_k_lengths = (cum_seq_k[1:] - cum_seq_k[:-1]).tolist()
|
|
330
|
-
for seq_q_len, seq_k_len in zip(seq_q_lengths, seq_k_lengths):
|
|
331
|
-
new_query_shape = (1, h_q, seq_q_len, d_q)
|
|
332
|
-
new_key_shape = (1, h_k, seq_k_len, d_k)
|
|
333
|
-
new_value_shape = (1, h_v, seq_k_len, d_v)
|
|
334
|
-
new_grad_out_shape = new_query_shape if grad_out is not None else None
|
|
335
|
-
yield new_query_shape, new_key_shape, new_value_shape, new_grad_out_shape
|
|
336
|
-
return
|
|
337
|
-
|
|
338
|
-
yield (
|
|
339
|
-
query.shape,
|
|
340
|
-
key.shape,
|
|
341
|
-
value.shape,
|
|
342
|
-
grad_out.shape if grad_out is not None else None,
|
|
343
|
-
)
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
def _unpack_efficient_attention_nested_shapes(
|
|
347
|
-
*,
|
|
348
|
-
query,
|
|
349
|
-
key,
|
|
350
|
-
value,
|
|
351
|
-
grad_out=None,
|
|
352
|
-
cu_seqlens_q,
|
|
353
|
-
cu_seqlens_k,
|
|
354
|
-
max_seqlen_q,
|
|
355
|
-
max_seqlen_k,
|
|
356
|
-
) -> Iterator[
|
|
357
|
-
Tuple[Tuple[int, ...], Tuple[int, ...], Tuple[int, ...], Optional[Tuple[int, ...]]]
|
|
358
|
-
]:
|
|
359
|
-
"""
|
|
360
|
-
Given inputs to a efficient_attention_(forward|backward) kernel, this will handle behavior for
|
|
361
|
-
NestedTensor inputs by effectively unbinding the NestedTensor and yielding the shapes for
|
|
362
|
-
each batch element.
|
|
363
|
-
|
|
364
|
-
In the case that this isn't a NestedTensor kernel, then it just yields the original shapes.
|
|
365
|
-
"""
|
|
366
|
-
if cu_seqlens_q is not None:
|
|
367
|
-
# Unlike flash_attention_forward, we get a 4D tensor instead of a 3D tensor for efficient attention.
|
|
368
|
-
#
|
|
369
|
-
# This means we should be dealing with a Nested Jagged Tensor query.
|
|
370
|
-
# The inputs will have shape (sum(sequence len), heads, dimension)
|
|
371
|
-
# In comparison, non-Nested inputs have shape (batch, heads, sequence len, dimension)
|
|
372
|
-
# To deal with this, we convert to a shape of (batch, heads, max_seq_len, dimension)
|
|
373
|
-
# So the flops calculation in this case is an overestimate of the actual flops.
|
|
374
|
-
assert len(key.shape) == 4
|
|
375
|
-
assert len(value.shape) == 4
|
|
376
|
-
assert grad_out is None or grad_out.shape == query.shape
|
|
377
|
-
_, _, h_q, d_q = query.shape
|
|
378
|
-
_, _, h_k, d_k = key.shape
|
|
379
|
-
_, _, h_v, d_v = value.shape
|
|
380
|
-
assert cu_seqlens_q is not None
|
|
381
|
-
assert cu_seqlens_k is not None
|
|
382
|
-
assert cu_seqlens_q.shape == cu_seqlens_k.shape
|
|
383
|
-
seqlens_q = (cu_seqlens_q[1:] - cu_seqlens_q[:-1]).tolist()
|
|
384
|
-
seqlens_k = (cu_seqlens_k[1:] - cu_seqlens_k[:-1]).tolist()
|
|
385
|
-
for len_q, len_k in zip(seqlens_q, seqlens_k):
|
|
386
|
-
new_query_shape = (1, h_q, len_q, d_q)
|
|
387
|
-
new_key_shape = (1, h_k, len_k, d_k)
|
|
388
|
-
new_value_shape = (1, h_v, len_k, d_v)
|
|
389
|
-
new_grad_out_shape = new_query_shape if grad_out is not None else None
|
|
390
|
-
yield new_query_shape, new_key_shape, new_value_shape, new_grad_out_shape
|
|
391
|
-
return
|
|
392
|
-
|
|
393
|
-
yield (
|
|
394
|
-
query.shape,
|
|
395
|
-
key.shape,
|
|
396
|
-
value.shape,
|
|
397
|
-
grad_out.shape if grad_out is not None else None,
|
|
398
|
-
)
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
@register_flop_formula(aten._flash_attention_forward, get_raw=True)
|
|
402
|
-
def _flash_attention_forward_flop(
|
|
403
|
-
query,
|
|
404
|
-
key,
|
|
405
|
-
value,
|
|
406
|
-
cum_seq_q,
|
|
407
|
-
cum_seq_k,
|
|
408
|
-
max_q,
|
|
409
|
-
max_k,
|
|
410
|
-
*args,
|
|
411
|
-
out_shape=None,
|
|
412
|
-
**kwargs,
|
|
413
|
-
) -> int:
|
|
414
|
-
"""Count flops for self-attention."""
|
|
415
|
-
# NB: We aren't accounting for causal attention here
|
|
416
|
-
# in case this is a nested tensor, we unpack the individual batch elements
|
|
417
|
-
# and then sum the flops per batch element
|
|
418
|
-
sizes = _unpack_flash_attention_nested_shapes(
|
|
419
|
-
query=query,
|
|
420
|
-
key=key,
|
|
421
|
-
value=value,
|
|
422
|
-
cum_seq_q=cum_seq_q,
|
|
423
|
-
cum_seq_k=cum_seq_k,
|
|
424
|
-
max_q=max_q,
|
|
425
|
-
max_k=max_k,
|
|
426
|
-
)
|
|
427
|
-
return sum(
|
|
428
|
-
sdpa_flop_count(query_shape, key_shape, value_shape)
|
|
429
|
-
for query_shape, key_shape, value_shape, _ in sizes
|
|
430
|
-
)
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
@register_flop_formula(aten._efficient_attention_forward, get_raw=True)
|
|
434
|
-
def _efficient_attention_forward_flop(
|
|
435
|
-
query,
|
|
436
|
-
key,
|
|
437
|
-
value,
|
|
438
|
-
bias,
|
|
439
|
-
cu_seqlens_q,
|
|
440
|
-
cu_seqlens_k,
|
|
441
|
-
max_seqlen_q,
|
|
442
|
-
max_seqlen_k,
|
|
443
|
-
*args,
|
|
444
|
-
**kwargs,
|
|
445
|
-
) -> int:
|
|
446
|
-
"""Count flops for self-attention."""
|
|
447
|
-
# NB: We aren't accounting for causal attention here
|
|
448
|
-
# in case this is a nested tensor, we unpack the individual batch elements
|
|
449
|
-
# and then sum the flops per batch element
|
|
450
|
-
sizes = _unpack_efficient_attention_nested_shapes(
|
|
451
|
-
query=query,
|
|
452
|
-
key=key,
|
|
453
|
-
value=value,
|
|
454
|
-
cu_seqlens_q=cu_seqlens_q,
|
|
455
|
-
cu_seqlens_k=cu_seqlens_k,
|
|
456
|
-
max_seqlen_q=max_seqlen_q,
|
|
457
|
-
max_seqlen_k=max_seqlen_k,
|
|
458
|
-
)
|
|
459
|
-
return sum(
|
|
460
|
-
sdpa_flop_count(query_shape, key_shape, value_shape)
|
|
461
|
-
for query_shape, key_shape, value_shape, _ in sizes
|
|
462
|
-
)
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
def sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape):
|
|
466
|
-
total_flops = 0
|
|
467
|
-
b, h, s_q, d_q = query_shape
|
|
468
|
-
_b2, _h2, s_k, _d2 = key_shape
|
|
469
|
-
_b3, _h3, _s3, d_v = value_shape
|
|
470
|
-
_b4, _h4, _s4, _d4 = grad_out_shape
|
|
471
|
-
assert b == _b2 == _b3 == _b4 and h == _h2 == _h3 == _h4 and d_q == _d2
|
|
472
|
-
assert d_v == _d4 and s_k == _s3 and s_q == _s4
|
|
473
|
-
total_flops = 0
|
|
474
|
-
# Step 1: We recompute the scores matrix.
|
|
475
|
-
# q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k]
|
|
476
|
-
total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k))
|
|
477
|
-
|
|
478
|
-
# Step 2: We propagate the gradients through the score @ v operation.
|
|
479
|
-
# gradOut: [b, h, s_q, d_v] @ v: [b, h, d_v, s_k] -> gradScores: [b, h, s_q, s_k]
|
|
480
|
-
total_flops += bmm_flop((b * h, s_q, d_v), (b * h, d_v, s_k))
|
|
481
|
-
# scores: [b, h, s_k, s_q] @ gradOut: [b, h, s_q, d_v] -> gradV: [b, h, s_k, d_v]
|
|
482
|
-
total_flops += bmm_flop((b * h, s_k, s_q), (b * h, s_q, d_v))
|
|
483
|
-
|
|
484
|
-
# Step 3: We propagate th gradients through the k @ v operation
|
|
485
|
-
# gradScores: [b, h, s_q, s_k] @ k: [b, h, s_k, d_q] -> gradQ: [b, h, s_q, d_q]
|
|
486
|
-
total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_q))
|
|
487
|
-
# q: [b, h, d_q, s_q] @ gradScores: [b, h, s_q, s_k] -> gradK: [b, h, d_q, s_k]
|
|
488
|
-
total_flops += bmm_flop((b * h, d_q, s_q), (b * h, s_q, s_k))
|
|
489
|
-
return total_flops
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
@register_flop_formula(
|
|
493
|
-
[
|
|
494
|
-
aten._scaled_dot_product_efficient_attention_backward,
|
|
495
|
-
aten._scaled_dot_product_flash_attention_backward,
|
|
496
|
-
]
|
|
497
|
-
)
|
|
498
|
-
def sdpa_backward_flop(
|
|
499
|
-
grad_out_shape, query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs
|
|
500
|
-
) -> int:
|
|
501
|
-
"""Count flops for self-attention backward."""
|
|
502
|
-
return sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape)
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
@register_flop_formula(aten._flash_attention_backward, get_raw=True)
|
|
506
|
-
def _flash_attention_backward_flop(
|
|
507
|
-
grad_out,
|
|
508
|
-
query,
|
|
509
|
-
key,
|
|
510
|
-
value,
|
|
511
|
-
out, # named _out_shape to avoid kwarg collision with out_shape created in wrapper
|
|
512
|
-
logsumexp,
|
|
513
|
-
cum_seq_q,
|
|
514
|
-
cum_seq_k,
|
|
515
|
-
max_q,
|
|
516
|
-
max_k,
|
|
517
|
-
*args,
|
|
518
|
-
**kwargs,
|
|
519
|
-
) -> int:
|
|
520
|
-
# in case this is a nested tensor, we unpack the individual batch elements
|
|
521
|
-
# and then sum the flops per batch element
|
|
522
|
-
shapes = _unpack_flash_attention_nested_shapes(
|
|
523
|
-
query=query,
|
|
524
|
-
key=key,
|
|
525
|
-
value=value,
|
|
526
|
-
grad_out=grad_out,
|
|
527
|
-
cum_seq_q=cum_seq_q,
|
|
528
|
-
cum_seq_k=cum_seq_k,
|
|
529
|
-
max_q=max_q,
|
|
530
|
-
max_k=max_k,
|
|
531
|
-
)
|
|
532
|
-
return sum(
|
|
533
|
-
sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape)
|
|
534
|
-
for query_shape, key_shape, value_shape, grad_out_shape in shapes
|
|
535
|
-
)
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
@register_flop_formula(aten._efficient_attention_backward, get_raw=True)
|
|
539
|
-
def _efficient_attention_backward_flop(
|
|
540
|
-
grad_out,
|
|
541
|
-
query,
|
|
542
|
-
key,
|
|
543
|
-
value,
|
|
544
|
-
bias,
|
|
545
|
-
out, # named _out to avoid kwarg collision with out created in wrapper
|
|
546
|
-
cu_seqlens_q,
|
|
547
|
-
cu_seqlens_k,
|
|
548
|
-
max_seqlen_q,
|
|
549
|
-
max_seqlen_k,
|
|
550
|
-
*args,
|
|
551
|
-
**kwargs,
|
|
552
|
-
) -> int:
|
|
553
|
-
# in case this is a nested tensor, we unpack the individual batch elements
|
|
554
|
-
# and then sum the flops per batch element
|
|
555
|
-
shapes = _unpack_efficient_attention_nested_shapes(
|
|
556
|
-
query=query,
|
|
557
|
-
key=key,
|
|
558
|
-
value=value,
|
|
559
|
-
grad_out=grad_out,
|
|
560
|
-
cu_seqlens_q=cu_seqlens_q,
|
|
561
|
-
cu_seqlens_k=cu_seqlens_k,
|
|
562
|
-
max_seqlen_q=max_seqlen_q,
|
|
563
|
-
max_seqlen_k=max_seqlen_k,
|
|
564
|
-
)
|
|
565
|
-
return sum(
|
|
566
|
-
sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape)
|
|
567
|
-
for query_shape, key_shape, value_shape, grad_out_shape in shapes
|
|
568
|
-
)
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
flop_registry = {
|
|
572
|
-
aten.mm: mm_flop,
|
|
573
|
-
aten.addmm: addmm_flop,
|
|
574
|
-
aten.bmm: bmm_flop,
|
|
575
|
-
aten.baddbmm: baddbmm_flop,
|
|
576
|
-
aten.convolution: conv_flop,
|
|
577
|
-
aten._convolution: conv_flop,
|
|
578
|
-
aten.convolution_backward: conv_backward_flop,
|
|
579
|
-
aten._scaled_dot_product_efficient_attention: sdpa_flop,
|
|
580
|
-
aten._scaled_dot_product_flash_attention: sdpa_flop,
|
|
581
|
-
aten._scaled_dot_product_efficient_attention_backward: sdpa_backward_flop,
|
|
582
|
-
aten._scaled_dot_product_flash_attention_backward: sdpa_backward_flop,
|
|
583
|
-
aten._flash_attention_forward: _flash_attention_forward_flop,
|
|
584
|
-
aten._efficient_attention_forward: _efficient_attention_forward_flop,
|
|
585
|
-
aten._flash_attention_backward: _flash_attention_backward_flop,
|
|
586
|
-
aten._efficient_attention_backward: _efficient_attention_backward_flop,
|
|
587
|
-
}
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
def normalize_tuple(x):
|
|
591
|
-
if not isinstance(x, tuple):
|
|
592
|
-
return (x,)
|
|
593
|
-
return x
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
# Define the suffixes for different orders of magnitude
|
|
597
|
-
suffixes = ["", "K", "M", "B", "T"]
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
# Thanks BingChat!
|
|
601
|
-
def get_suffix_str(number):
|
|
602
|
-
# Find the index of the appropriate suffix based on the number of digits
|
|
603
|
-
# with some additional overflow.
|
|
604
|
-
# i.e. 1.01B should be displayed as 1001M, not 1.001B
|
|
605
|
-
index = max(0, min(len(suffixes) - 1, (len(str(number)) - 2) // 3))
|
|
606
|
-
return suffixes[index]
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
def convert_num_with_suffix(number, suffix):
|
|
610
|
-
index = suffixes.index(suffix)
|
|
611
|
-
# Divide the number by 1000^index and format it to two decimal places
|
|
612
|
-
value = f"{number / 1000 ** index:.3f}"
|
|
613
|
-
# Return the value and the suffix as a string
|
|
614
|
-
return value + suffixes[index]
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
def convert_to_percent_str(num, denom):
|
|
618
|
-
if denom == 0:
|
|
619
|
-
return "0%"
|
|
620
|
-
return f"{num / denom:.2%}"
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
def _pytreeify_preserve_structure(f):
|
|
624
|
-
@wraps(f)
|
|
625
|
-
def nf(args):
|
|
626
|
-
flat_args, spec = tree_flatten(args)
|
|
627
|
-
out = f(*flat_args)
|
|
628
|
-
return tree_unflatten(out, spec)
|
|
629
|
-
|
|
630
|
-
return nf
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
class FlopCounterMode(TorchDispatchMode):
|
|
634
|
-
"""
|
|
635
|
-
``FlopCounterMode`` is a context manager that counts the number of flops within its context.
|
|
636
|
-
|
|
637
|
-
It does this using a ``TorchDispatchMode``.
|
|
638
|
-
|
|
639
|
-
It also supports hierarchical output by passing a module (or list of
|
|
640
|
-
modules) to FlopCounterMode on construction. If you do not need hierarchical
|
|
641
|
-
output, you do not need to use it with a module.
|
|
642
|
-
|
|
643
|
-
Example usage
|
|
644
|
-
|
|
645
|
-
.. code-block:: python
|
|
646
|
-
|
|
647
|
-
mod = ...
|
|
648
|
-
with FlopCounterMode(mod) as flop_counter:
|
|
649
|
-
mod.sum().backward()
|
|
650
|
-
|
|
651
|
-
"""
|
|
652
|
-
|
|
653
|
-
def __init__(
|
|
654
|
-
self,
|
|
655
|
-
mods: Optional[Union[torch.nn.Module, List[torch.nn.Module]]] = None,
|
|
656
|
-
depth: int = 2,
|
|
657
|
-
display: bool = True,
|
|
658
|
-
custom_mapping: Optional[Dict[Any, Any]] = None,
|
|
659
|
-
):
|
|
660
|
-
self.flop_counts: Dict[str, Dict[Any, int]] = defaultdict(
|
|
661
|
-
lambda: defaultdict(int)
|
|
662
|
-
)
|
|
663
|
-
self.depth = depth
|
|
664
|
-
self.display = display
|
|
665
|
-
if custom_mapping is None:
|
|
666
|
-
custom_mapping = {}
|
|
667
|
-
if mods is not None:
|
|
668
|
-
warnings.warn(
|
|
669
|
-
"mods argument is not needed anymore, you can stop passing it",
|
|
670
|
-
stacklevel=2,
|
|
671
|
-
)
|
|
672
|
-
self.flop_registry = {
|
|
673
|
-
**flop_registry,
|
|
674
|
-
**{
|
|
675
|
-
k: v if getattr(v, "_get_raw", False) else shape_wrapper(v)
|
|
676
|
-
for k, v in custom_mapping.items()
|
|
677
|
-
},
|
|
678
|
-
}
|
|
679
|
-
self.mod_tracker = ModuleTracker()
|
|
680
|
-
|
|
681
|
-
def get_total_flops(self) -> int:
|
|
682
|
-
return sum(self.flop_counts["Global"].values())
|
|
683
|
-
|
|
684
|
-
def get_flop_counts(self) -> Dict[str, Dict[Any, int]]:
|
|
685
|
-
"""Return the flop counts as a dictionary of dictionaries.
|
|
686
|
-
|
|
687
|
-
The outer
|
|
688
|
-
dictionary is keyed by module name, and the inner dictionary is keyed by
|
|
689
|
-
operation name.
|
|
690
|
-
|
|
691
|
-
Returns:
|
|
692
|
-
Dict[str, Dict[Any, int]]: The flop counts as a dictionary.
|
|
693
|
-
"""
|
|
694
|
-
return {k: dict(v) for k, v in self.flop_counts.items()}
|
|
695
|
-
|
|
696
|
-
def get_table(self, depth=None):
|
|
697
|
-
if depth is None:
|
|
698
|
-
depth = self.depth
|
|
699
|
-
if depth is None:
|
|
700
|
-
depth = 999999
|
|
701
|
-
|
|
702
|
-
import tabulate
|
|
703
|
-
|
|
704
|
-
tabulate.PRESERVE_WHITESPACE = True
|
|
705
|
-
header = ["Module", "FLOP", "% Total"]
|
|
706
|
-
values = []
|
|
707
|
-
global_flops = self.get_total_flops()
|
|
708
|
-
global_suffix = get_suffix_str(global_flops)
|
|
709
|
-
is_global_subsumed = False
|
|
710
|
-
|
|
711
|
-
def process_mod(mod_name, depth):
|
|
712
|
-
nonlocal is_global_subsumed
|
|
713
|
-
|
|
714
|
-
total_flops = sum(self.flop_counts[mod_name].values())
|
|
715
|
-
|
|
716
|
-
is_global_subsumed |= total_flops >= global_flops
|
|
717
|
-
|
|
718
|
-
padding = " " * depth
|
|
719
|
-
values = []
|
|
720
|
-
values.append(
|
|
721
|
-
[
|
|
722
|
-
padding + mod_name,
|
|
723
|
-
convert_num_with_suffix(total_flops, global_suffix),
|
|
724
|
-
convert_to_percent_str(total_flops, global_flops),
|
|
725
|
-
]
|
|
726
|
-
)
|
|
727
|
-
for k, v in self.flop_counts[mod_name].items():
|
|
728
|
-
values.append(
|
|
729
|
-
[
|
|
730
|
-
padding + " - " + str(k),
|
|
731
|
-
convert_num_with_suffix(v, global_suffix),
|
|
732
|
-
convert_to_percent_str(v, global_flops),
|
|
733
|
-
]
|
|
734
|
-
)
|
|
735
|
-
return values
|
|
736
|
-
|
|
737
|
-
for mod in sorted(self.flop_counts.keys()):
|
|
738
|
-
if mod == "Global":
|
|
739
|
-
continue
|
|
740
|
-
mod_depth = mod.count(".") + 1
|
|
741
|
-
if mod_depth > depth:
|
|
742
|
-
continue
|
|
743
|
-
|
|
744
|
-
cur_values = process_mod(mod, mod_depth - 1)
|
|
745
|
-
values.extend(cur_values)
|
|
746
|
-
|
|
747
|
-
# We do a bit of messing around here to only output the "Global" value
|
|
748
|
-
# if there are any FLOPs in there that aren't already fully contained by
|
|
749
|
-
# a module.
|
|
750
|
-
if "Global" in self.flop_counts and not is_global_subsumed:
|
|
751
|
-
for idx, value in enumerate(values):
|
|
752
|
-
values[idx][0] = " " + values[idx][0]
|
|
753
|
-
|
|
754
|
-
values = process_mod("Global", 0) + values
|
|
755
|
-
|
|
756
|
-
if len(values) == 0:
|
|
757
|
-
values = [["Global", "0", "0%"]]
|
|
758
|
-
|
|
759
|
-
return tabulate.tabulate(
|
|
760
|
-
values, headers=header, colalign=("left", "right", "right")
|
|
761
|
-
)
|
|
762
|
-
|
|
763
|
-
def __enter__(self):
|
|
764
|
-
self.flop_counts.clear()
|
|
765
|
-
self.mod_tracker.__enter__()
|
|
766
|
-
super().__enter__()
|
|
767
|
-
return self
|
|
768
|
-
|
|
769
|
-
def __exit__(self, *args):
|
|
770
|
-
super().__exit__(*args)
|
|
771
|
-
self.mod_tracker.__exit__()
|
|
772
|
-
if self.display:
|
|
773
|
-
print(self.get_table(self.depth))
|
|
774
|
-
|
|
775
|
-
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
|
|
776
|
-
kwargs = kwargs if kwargs else {}
|
|
777
|
-
out = func(*args, **kwargs)
|
|
778
|
-
return self._count_flops(func._overloadpacket, out, args, kwargs)
|
|
779
|
-
|
|
780
|
-
def _count_flops(self, func_packet, out, args, kwargs):
|
|
781
|
-
if func_packet in self.flop_registry:
|
|
782
|
-
flop_count_func = self.flop_registry[func_packet]
|
|
783
|
-
flop_count = flop_count_func(*args, **kwargs, out_val=out) # type: ignore[operator]
|
|
784
|
-
for par in set(self.mod_tracker.parents):
|
|
785
|
-
self.flop_counts[par][func_packet] += flop_count
|
|
786
|
-
|
|
787
|
-
return out
|
|
@@ -1,140 +0,0 @@
|
|
|
1
|
-
import weakref
|
|
2
|
-
from typing import Set
|
|
3
|
-
|
|
4
|
-
import torch
|
|
5
|
-
from torch.autograd.graph import register_multi_grad_hook
|
|
6
|
-
from torch.nn.modules.module import (
|
|
7
|
-
register_module_forward_hook,
|
|
8
|
-
register_module_forward_pre_hook,
|
|
9
|
-
)
|
|
10
|
-
from torch.utils._pytree import tree_flatten
|
|
11
|
-
|
|
12
|
-
__all__ = ["ModuleTracker"]
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class ModuleTracker:
|
|
16
|
-
"""
|
|
17
|
-
``ModuleTracker`` is a context manager that tracks the nn.Module hierarchy during execution
|
|
18
|
-
so that other system can query which Module is currently being executed (or its backward is being
|
|
19
|
-
executed).
|
|
20
|
-
|
|
21
|
-
You can access the ``parents`` attribute on this context manager to get the set of all the
|
|
22
|
-
Modules currently being executed via their fqn (fully qualified name, also used as the key within
|
|
23
|
-
the state_dict).
|
|
24
|
-
You can access the ``is_bw`` attribute to know if you are currently running in backward or not.
|
|
25
|
-
|
|
26
|
-
Note that ``parents`` is never empty and always contains the "Global" key. The ``is_bw`` flag
|
|
27
|
-
will remain ``True`` after the forward until another Module is executed. If you need it to be
|
|
28
|
-
more accurate, please submit an issue requesting this. Adding a map from fqn to the module instance
|
|
29
|
-
is possible but not done yet, please submit an issue requesting this if you need it.
|
|
30
|
-
|
|
31
|
-
Example usage
|
|
32
|
-
|
|
33
|
-
.. code-block:: python
|
|
34
|
-
|
|
35
|
-
mod = torch.nn.Linear(2, 2)
|
|
36
|
-
|
|
37
|
-
with ModuleTracker() as tracker:
|
|
38
|
-
# Access anything during the forward pass
|
|
39
|
-
def my_linear(m1, m2, bias):
|
|
40
|
-
print(f"Current modules: {tracker.parents}")
|
|
41
|
-
return torch.mm(m1, m2.t()) + bias
|
|
42
|
-
torch.nn.functional.linear = my_linear
|
|
43
|
-
|
|
44
|
-
mod(torch.rand(2, 2))
|
|
45
|
-
|
|
46
|
-
"""
|
|
47
|
-
|
|
48
|
-
parents: Set[str]
|
|
49
|
-
"""
|
|
50
|
-
A Set containing the fqn for each module currently running their forward
|
|
51
|
-
"""
|
|
52
|
-
|
|
53
|
-
def __init__(self):
|
|
54
|
-
self.parents = {"Global"}
|
|
55
|
-
self._known_modules: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
|
|
56
|
-
self._seen_modules: weakref.WeakSet = weakref.WeakSet()
|
|
57
|
-
self._has_callback = False
|
|
58
|
-
|
|
59
|
-
def _maybe_set_engine_callback(self):
|
|
60
|
-
# This assumes no concurrent calls to backward
|
|
61
|
-
if self._has_callback:
|
|
62
|
-
return
|
|
63
|
-
|
|
64
|
-
def callback():
|
|
65
|
-
self.parents = {"Global"}
|
|
66
|
-
self._has_callback = False
|
|
67
|
-
|
|
68
|
-
torch.autograd.Variable._execution_engine.queue_callback(callback)
|
|
69
|
-
self._has_callback = True
|
|
70
|
-
|
|
71
|
-
@property
|
|
72
|
-
def is_bw(self):
|
|
73
|
-
"""
|
|
74
|
-
A boolean marking if this is currently running during the backward pass or not
|
|
75
|
-
"""
|
|
76
|
-
return torch._C._current_graph_task_id() != -1
|
|
77
|
-
|
|
78
|
-
def _get_mod_name(self, mod):
|
|
79
|
-
if mod not in self._known_modules:
|
|
80
|
-
self._known_modules[mod] = type(mod).__name__
|
|
81
|
-
mod_name = self._known_modules[mod]
|
|
82
|
-
if mod not in self._seen_modules:
|
|
83
|
-
for name, submod in mod.named_children():
|
|
84
|
-
self._known_modules[submod] = f"{mod_name}.{name}"
|
|
85
|
-
self._get_mod_name(submod)
|
|
86
|
-
self._seen_modules.add(mod)
|
|
87
|
-
return mod_name
|
|
88
|
-
|
|
89
|
-
def _get_append_fn(self, name, is_bw):
|
|
90
|
-
def fn(*args):
|
|
91
|
-
if is_bw:
|
|
92
|
-
self._maybe_set_engine_callback()
|
|
93
|
-
if name in self.parents:
|
|
94
|
-
print(
|
|
95
|
-
"The module hierarchy tracking seems to be messed up."
|
|
96
|
-
"Please file a bug to PyTorch."
|
|
97
|
-
)
|
|
98
|
-
self.parents.add(name)
|
|
99
|
-
|
|
100
|
-
return fn
|
|
101
|
-
|
|
102
|
-
def _get_pop_fn(self, name, is_bw):
|
|
103
|
-
def fn(*args):
|
|
104
|
-
if name in self.parents:
|
|
105
|
-
self.parents.remove(name)
|
|
106
|
-
elif not is_bw:
|
|
107
|
-
# Due to some input/output not requiring gradients, we cannot enforce
|
|
108
|
-
# proper nesting in backward
|
|
109
|
-
raise RuntimeError(
|
|
110
|
-
"The Module hierarchy tracking is wrong. Report a bug to PyTorch"
|
|
111
|
-
)
|
|
112
|
-
|
|
113
|
-
return fn
|
|
114
|
-
|
|
115
|
-
def _fw_pre_hook(self, mod, input):
|
|
116
|
-
name = self._get_mod_name(mod)
|
|
117
|
-
self._get_append_fn(name, False)()
|
|
118
|
-
|
|
119
|
-
args, _ = tree_flatten(input)
|
|
120
|
-
tensors = [a for a in args if isinstance(a, torch.Tensor) and a.requires_grad]
|
|
121
|
-
if tensors:
|
|
122
|
-
register_multi_grad_hook(tensors, self._get_pop_fn(name, True))
|
|
123
|
-
|
|
124
|
-
def _fw_post_hook(self, mod, input, output):
|
|
125
|
-
name = self._get_mod_name(mod)
|
|
126
|
-
self._get_pop_fn(name, False)()
|
|
127
|
-
|
|
128
|
-
args, _ = tree_flatten(output)
|
|
129
|
-
tensors = [a for a in args if isinstance(a, torch.Tensor) and a.requires_grad]
|
|
130
|
-
if tensors:
|
|
131
|
-
register_multi_grad_hook(tensors, self._get_append_fn(name, True))
|
|
132
|
-
|
|
133
|
-
def __enter__(self):
|
|
134
|
-
self._fw_pre_handle = register_module_forward_pre_hook(self._fw_pre_hook)
|
|
135
|
-
self._fw_post_handle = register_module_forward_hook(self._fw_post_hook)
|
|
136
|
-
return self
|
|
137
|
-
|
|
138
|
-
def __exit__(self, *args):
|
|
139
|
-
self._fw_pre_handle.remove()
|
|
140
|
-
self._fw_post_handle.remove()
|
|
File without changes
|