monai-weekly 1.4.dev2435__py3-none-any.whl → 1.4.dev2436__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,569 @@
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import inspect
15
+ import os
16
+ import tempfile
17
+ import threading
18
+ from collections import OrderedDict
19
+ from pathlib import Path
20
+ from types import MethodType
21
+ from typing import Any, Dict, List, Union
22
+
23
+ import torch
24
+
25
+ from monai.apps.utils import get_logger
26
+ from monai.networks.utils import add_casts_around_norms, convert_to_onnx, convert_to_torchscript, get_profile_shapes
27
+ from monai.utils.module import optional_import
28
+
29
+ polygraphy, polygraphy_imported = optional_import("polygraphy")
30
+ if polygraphy_imported:
31
+ from polygraphy.backend.common import bytes_from_path
32
+ from polygraphy.backend.trt import (
33
+ CreateConfig,
34
+ Profile,
35
+ engine_bytes_from_network,
36
+ engine_from_bytes,
37
+ network_from_onnx_path,
38
+ )
39
+
40
+ trt, trt_imported = optional_import("tensorrt")
41
+ torch_tensorrt, _ = optional_import("torch_tensorrt", "1.4.0")
42
+ cudart, _ = optional_import("cuda.cudart")
43
+
44
+
45
+ lock_sm = threading.Lock()
46
+
47
+
48
+ # Map of TRT dtype -> Torch dtype
49
+ def trt_to_torch_dtype_dict():
50
+ return {
51
+ trt.int32: torch.int32,
52
+ trt.float32: torch.float32,
53
+ trt.float16: torch.float16,
54
+ trt.bfloat16: torch.float16,
55
+ trt.int64: torch.int64,
56
+ trt.int8: torch.int8,
57
+ trt.bool: torch.bool,
58
+ }
59
+
60
+
61
+ def get_dynamic_axes(profiles):
62
+ """
63
+ This method calculates dynamic_axes to use in onnx.export().
64
+ Args:
65
+ profiles: [[min,opt,max],...] list of profile dimensions
66
+ """
67
+ dynamic_axes: dict[str, list[int]] = {}
68
+ if not profiles:
69
+ return dynamic_axes
70
+ for profile in profiles:
71
+ for key in profile:
72
+ axes = []
73
+ vals = profile[key]
74
+ for i in range(len(vals[0])):
75
+ if vals[0][i] != vals[2][i]:
76
+ axes.append(i)
77
+ if len(axes) > 0:
78
+ dynamic_axes[key] = axes
79
+ return dynamic_axes
80
+
81
+
82
+ def cuassert(cuda_ret):
83
+ """
84
+ Error reporting method for CUDA calls.
85
+ Args:
86
+ cuda_ret: CUDA return code.
87
+ """
88
+ err = cuda_ret[0]
89
+ if err != 0:
90
+ raise RuntimeError(f"CUDA ERROR: {err}")
91
+ if len(cuda_ret) > 1:
92
+ return cuda_ret[1]
93
+ return None
94
+
95
+
96
+ class ShapeError(Exception):
97
+ """
98
+ Exception class to report errors from setting TRT plan input shapes
99
+ """
100
+
101
+ pass
102
+
103
+
104
+ class TRTEngine:
105
+ """
106
+ An auxiliary class to implement running of TRT optimized engines
107
+
108
+ """
109
+
110
+ def __init__(self, plan_path, logger=None):
111
+ """
112
+ Loads serialized engine, creates execution context and activates it
113
+ Args:
114
+ plan_path: path to serialized TRT engine.
115
+ logger: optional logger object
116
+ """
117
+ self.plan_path = plan_path
118
+ self.logger = logger or get_logger("trt_compile")
119
+ self.logger.info(f"Loading TensorRT engine: {self.plan_path}")
120
+ self.engine = engine_from_bytes(bytes_from_path(self.plan_path))
121
+ self.tensors = OrderedDict()
122
+ self.cuda_graph_instance = None # cuda graph
123
+ self.context = self.engine.create_execution_context()
124
+ self.input_names = []
125
+ self.output_names = []
126
+ self.dtypes = []
127
+ self.cur_profile = 0
128
+ dtype_dict = trt_to_torch_dtype_dict()
129
+ for idx in range(self.engine.num_io_tensors):
130
+ binding = self.engine[idx]
131
+ if self.engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
132
+ self.input_names.append(binding)
133
+ elif self.engine.get_tensor_mode(binding) == trt.TensorIOMode.OUTPUT:
134
+ self.output_names.append(binding)
135
+ dtype = dtype_dict[self.engine.get_tensor_dtype(binding)]
136
+ self.dtypes.append(dtype)
137
+
138
+ def allocate_buffers(self, device):
139
+ """
140
+ Allocates outputs to run TRT engine
141
+ Args:
142
+ device: GPU device to allocate memory on
143
+ """
144
+ ctx = self.context
145
+
146
+ for i, binding in enumerate(self.output_names):
147
+ shape = list(ctx.get_tensor_shape(binding))
148
+ if binding not in self.tensors or list(self.tensors[binding].shape) != shape:
149
+ t = torch.empty(shape, dtype=self.dtypes[i], device=device).contiguous()
150
+ self.tensors[binding] = t
151
+ ctx.set_tensor_address(binding, t.data_ptr())
152
+
153
+ def set_inputs(self, feed_dict, stream):
154
+ """
155
+ Sets input bindings for TRT engine according to feed_dict
156
+ Args:
157
+ feed_dict: a dictionary [str->Tensor]
158
+ stream: CUDA stream to use
159
+ """
160
+ e = self.engine
161
+ ctx = self.context
162
+
163
+ last_profile = self.cur_profile
164
+
165
+ def try_set_inputs():
166
+ for binding, t in feed_dict.items():
167
+ if t is not None:
168
+ t = t.contiguous()
169
+ shape = t.shape
170
+ ctx.set_input_shape(binding, shape)
171
+ ctx.set_tensor_address(binding, t.data_ptr())
172
+
173
+ while True:
174
+ try:
175
+ try_set_inputs()
176
+ break
177
+ except ShapeError:
178
+ next_profile = (self.cur_profile + 1) % e.num_optimization_profiles
179
+ if next_profile == last_profile:
180
+ raise
181
+ self.cur_profile = next_profile
182
+ ctx.set_optimization_profile_async(self.cur_profile, stream)
183
+
184
+ left = ctx.infer_shapes()
185
+ assert len(left) == 0
186
+
187
+ def infer(self, stream, use_cuda_graph=False):
188
+ """
189
+ Runs TRT engine.
190
+ Args:
191
+ stream: CUDA stream to run on
192
+ use_cuda_graph: use CUDA graph. Note: requires all inputs to be the same GPU memory between calls.
193
+ """
194
+ if use_cuda_graph:
195
+ if self.cuda_graph_instance is not None:
196
+ cuassert(cudart.cudaGraphLaunch(self.cuda_graph_instance, stream))
197
+ cuassert(cudart.cudaStreamSynchronize(stream))
198
+ else:
199
+ # do inference before CUDA graph capture
200
+ noerror = self.context.execute_async_v3(stream)
201
+ if not noerror:
202
+ raise ValueError("ERROR: inference failed.")
203
+ # capture cuda graph
204
+ cuassert(
205
+ cudart.cudaStreamBeginCapture(stream, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeThreadLocal)
206
+ )
207
+ self.context.execute_async_v3(stream)
208
+ graph = cuassert(cudart.cudaStreamEndCapture(stream))
209
+ self.cuda_graph_instance = cuassert(cudart.cudaGraphInstantiate(graph, 0))
210
+ self.logger.info("CUDA Graph captured!")
211
+ else:
212
+ noerror = self.context.execute_async_v3(stream)
213
+ cuassert(cudart.cudaStreamSynchronize(stream))
214
+ if not noerror:
215
+ raise ValueError("ERROR: inference failed.")
216
+
217
+ return self.tensors
218
+
219
+
220
+ class TrtCompiler:
221
+ """
222
+ This class implements:
223
+ - TRT lazy persistent export
224
+ - Running TRT with optional fallback to Torch
225
+ (for TRT engines with limited profiles)
226
+ """
227
+
228
+ def __init__(
229
+ self,
230
+ model,
231
+ plan_path,
232
+ precision="fp16",
233
+ method="onnx",
234
+ input_names=None,
235
+ output_names=None,
236
+ export_args=None,
237
+ build_args=None,
238
+ input_profiles=None,
239
+ dynamic_batchsize=None,
240
+ use_cuda_graph=False,
241
+ timestamp=None,
242
+ fallback=False,
243
+ logger=None,
244
+ ):
245
+ """
246
+ Initialization method:
247
+ Tries to load persistent serialized TRT engine
248
+ Saves its arguments for lazy TRT build on first forward() call
249
+ Args:
250
+ model: Model to "wrap".
251
+ plan_path : Path where to save persistent serialized TRT engine.
252
+ precision: TRT builder precision o engine model. Should be 'fp32'|'tf32'|'fp16'|'bf16'.
253
+ method: One of 'onnx'|'torch_trt'.
254
+ Default is 'onnx' (torch.onnx.export()->TRT). This is the most stable and efficient option.
255
+ 'torch_trt' may not work for some nets. Also AMP must be turned off for it to work.
256
+ input_names: Optional list of input names. If None, will be read from the function signature.
257
+ output_names: Optional list of output names. Note: If not None, patched forward() will return a dictionary.
258
+ export_args: Optional args to pass to export method. See onnx.export() and Torch-TensorRT docs for details.
259
+ build_args: Optional args to pass to TRT builder. See polygraphy.Config for details.
260
+ input_profiles: Optional list of profiles for TRT builder and ONNX export.
261
+ Each profile is a map of the form : {"input id" : [min_shape, opt_shape, max_shape], ...}.
262
+ dynamic_batchsize: A sequence with three elements to define the batch size range of the input for the model to be
263
+ converted. Should be a sequence like [MIN_BATCH, OPT_BATCH, MAX_BATCH].
264
+ [note]: If neither input_profiles nor dynamic_batchsize specified, static shapes will be used to build TRT engine.
265
+ use_cuda_graph: Use CUDA Graph for inference. Note: all inputs have to be the same GPU memory between calls!
266
+ timestamp: Optional timestamp to rebuild TRT engine (e.g. if config file changes).
267
+ fallback: Allow to fall back to Pytorch when TRT inference fails (e.g, shapes exceed max profile).
268
+ """
269
+
270
+ method_vals = ["onnx", "torch_trt"]
271
+ if method not in method_vals:
272
+ raise ValueError(f"trt_compile(): 'method' should be one of {method_vals}, got: {method}.")
273
+ precision_vals = ["fp32", "tf32", "fp16", "bf16"]
274
+ if precision not in precision_vals:
275
+ raise ValueError(f"trt_compile(): 'precision' should be one of {precision_vals}, got: {precision}.")
276
+
277
+ self.plan_path = plan_path
278
+ self.precision = precision
279
+ self.method = method
280
+ self.return_dict = output_names is not None
281
+ self.output_names = output_names or []
282
+ self.profiles = input_profiles or []
283
+ self.dynamic_batchsize = dynamic_batchsize
284
+ self.export_args = export_args or {}
285
+ self.build_args = build_args or {}
286
+ self.engine: TRTEngine | None = None
287
+ self.use_cuda_graph = use_cuda_graph
288
+ self.fallback = fallback
289
+ self.disabled = False
290
+
291
+ self.logger = logger or get_logger("trt_compile")
292
+
293
+ # Normally we read input_names from forward() but can be overridden
294
+ if input_names is None:
295
+ argspec = inspect.getfullargspec(model.forward)
296
+ input_names = argspec.args[1:]
297
+ self.input_names = input_names
298
+ self.old_forward = model.forward
299
+
300
+ # Force engine rebuild if older than the timestamp
301
+ if timestamp is not None and os.path.exists(self.plan_path) and os.path.getmtime(self.plan_path) < timestamp:
302
+ os.remove(self.plan_path)
303
+
304
+ def _inputs_to_dict(self, input_example):
305
+ trt_inputs = {}
306
+ for i, inp in enumerate(input_example):
307
+ input_name = self.input_names[i]
308
+ trt_inputs[input_name] = inp
309
+ return trt_inputs
310
+
311
+ def _load_engine(self):
312
+ """
313
+ Loads TRT plan from disk and activates its execution context.
314
+ """
315
+ try:
316
+ self.engine = TRTEngine(self.plan_path, self.logger)
317
+ self.input_names = self.engine.input_names
318
+ except Exception as e:
319
+ self.logger.debug(f"Exception while loading the engine:\n{e}")
320
+
321
+ def forward(self, model, argv, kwargs):
322
+ """
323
+ Main forward method:
324
+ Builds TRT engine if not available yet.
325
+ Tries to run TRT engine
326
+ If exception thrown and self.callback==True: falls back to original Pytorch
327
+
328
+ Args: Passing through whatever args wrapped module's forward() has
329
+ Returns: Passing through wrapped module's forward() return value(s)
330
+
331
+ """
332
+ if self.engine is None and not self.disabled:
333
+ # Restore original forward for export
334
+ new_forward = model.forward
335
+ model.forward = self.old_forward
336
+ try:
337
+ self._load_engine()
338
+ if self.engine is None:
339
+ build_args = kwargs.copy()
340
+ if len(argv) > 0:
341
+ build_args.update(self._inputs_to_dict(argv))
342
+ self._build_and_save(model, build_args)
343
+ # This will reassign input_names from the engine
344
+ self._load_engine()
345
+ assert self.engine is not None
346
+ except Exception as e:
347
+ if self.fallback:
348
+ self.logger.info(f"Failed to build engine: {e}")
349
+ self.disabled = True
350
+ else:
351
+ raise e
352
+ if not self.disabled and not self.fallback:
353
+ # Delete all parameters
354
+ for param in model.parameters():
355
+ del param
356
+ # Call empty_cache to release GPU memory
357
+ torch.cuda.empty_cache()
358
+ model.forward = new_forward
359
+ # Run the engine
360
+ try:
361
+ if len(argv) > 0:
362
+ kwargs.update(self._inputs_to_dict(argv))
363
+ argv = ()
364
+
365
+ if self.engine is not None:
366
+ # forward_trt is not thread safe as we do not use per-thread execution contexts
367
+ with lock_sm:
368
+ device = torch.cuda.current_device()
369
+ stream = torch.cuda.Stream(device=device)
370
+ self.engine.set_inputs(kwargs, stream.cuda_stream)
371
+ self.engine.allocate_buffers(device=device)
372
+ # Need this to synchronize with Torch stream
373
+ stream.wait_stream(torch.cuda.current_stream())
374
+ ret = self.engine.infer(stream.cuda_stream, use_cuda_graph=self.use_cuda_graph)
375
+ # if output_names is not None, return dictionary
376
+ if not self.return_dict:
377
+ ret = list(ret.values())
378
+ if len(ret) == 1:
379
+ ret = ret[0]
380
+ return ret
381
+ except Exception as e:
382
+ if model is not None:
383
+ self.logger.info(f"Exception: {e}\nFalling back to Pytorch ...")
384
+ else:
385
+ raise e
386
+ return self.old_forward(*argv, **kwargs)
387
+
388
+ def _onnx_to_trt(self, onnx_path):
389
+ """
390
+ Builds TRT engine from ONNX file at onnx_path and saves to self.plan_path
391
+ """
392
+
393
+ profiles = []
394
+ if self.profiles:
395
+ for input_profile in self.profiles:
396
+ if isinstance(input_profile, Profile):
397
+ profiles.append(input_profile)
398
+ else:
399
+ p = Profile()
400
+ for name, dims in input_profile.items():
401
+ assert len(dims) == 3
402
+ p.add(name, min=dims[0], opt=dims[1], max=dims[2])
403
+ profiles.append(p)
404
+
405
+ build_args = self.build_args.copy()
406
+ build_args["tf32"] = self.precision != "fp32"
407
+ if self.precision == "fp16":
408
+ build_args["fp16"] = True
409
+ elif self.precision == "bf16":
410
+ build_args["bf16"] = True
411
+
412
+ self.logger.info(f"Building TensorRT engine for {onnx_path}: {self.plan_path}")
413
+ network = network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM])
414
+ return engine_bytes_from_network(network, config=CreateConfig(profiles=profiles, **build_args))
415
+
416
+ def _build_and_save(self, model, input_example):
417
+ """
418
+ If TRT engine is not ready, exports model to ONNX,
419
+ builds TRT engine and saves serialized TRT engine to the disk.
420
+ Args:
421
+ input_example: passed to onnx.export()
422
+ """
423
+
424
+ if self.engine is not None:
425
+ return
426
+
427
+ export_args = self.export_args
428
+
429
+ add_casts_around_norms(model)
430
+
431
+ if self.method == "torch_trt":
432
+ enabled_precisions = [torch.float32]
433
+ if self.precision == "fp16":
434
+ enabled_precisions.append(torch.float16)
435
+ elif self.precision == "bf16":
436
+ enabled_precisions.append(torch.bfloat16)
437
+ inputs = list(input_example.values())
438
+ ir_model = convert_to_torchscript(model, inputs=inputs, use_trace=True)
439
+
440
+ def get_torch_trt_input(input_shape, dynamic_batchsize):
441
+ min_input_shape, opt_input_shape, max_input_shape = get_profile_shapes(input_shape, dynamic_batchsize)
442
+ return torch_tensorrt.Input(
443
+ min_shape=min_input_shape, opt_shape=opt_input_shape, max_shape=max_input_shape
444
+ )
445
+
446
+ tt_inputs = [get_torch_trt_input(i.shape, self.dynamic_batchsize) for i in inputs]
447
+ engine_bytes = torch_tensorrt.convert_method_to_trt_engine(
448
+ ir_model,
449
+ "forward",
450
+ inputs=tt_inputs,
451
+ ir="torchscript",
452
+ enabled_precisions=enabled_precisions,
453
+ **export_args,
454
+ )
455
+ else:
456
+ dbs = self.dynamic_batchsize
457
+ if dbs:
458
+ if len(self.profiles) > 0:
459
+ raise ValueError("ERROR: Both dynamic_batchsize and input_profiles set for TrtCompiler!")
460
+ if len(dbs) != 3:
461
+ raise ValueError("dynamic_batchsize has to have len ==3 ")
462
+ profiles = {}
463
+ for id, val in input_example.items():
464
+ sh = val.shape[1:]
465
+ profiles[id] = [[dbs[0], *sh], [dbs[1], *sh], [dbs[2], *sh]]
466
+ self.profiles = [profiles]
467
+
468
+ if len(self.profiles) > 0:
469
+ export_args.update({"dynamic_axes": get_dynamic_axes(self.profiles)})
470
+
471
+ # Use temporary directory for easy cleanup in case of external weights
472
+ with tempfile.TemporaryDirectory() as tmpdir:
473
+ onnx_path = Path(tmpdir) / "model.onnx"
474
+ self.logger.info(
475
+ f"Exporting to {onnx_path}:\n\toutput_names={self.output_names}\n\texport args: {export_args}"
476
+ )
477
+ convert_to_onnx(
478
+ model,
479
+ input_example,
480
+ filename=str(onnx_path),
481
+ input_names=self.input_names,
482
+ output_names=self.output_names,
483
+ **export_args,
484
+ )
485
+ self.logger.info("Export to ONNX successful.")
486
+ engine_bytes = self._onnx_to_trt(str(onnx_path))
487
+
488
+ open(self.plan_path, "wb").write(engine_bytes)
489
+
490
+
491
+ def trt_forward(self, *argv, **kwargs):
492
+ """
493
+ Patch function to replace original model's forward() with.
494
+ Redirects to TrtCompiler.forward()
495
+ """
496
+ return self._trt_compiler.forward(self, argv, kwargs)
497
+
498
+
499
+ def trt_compile(
500
+ model: torch.nn.Module,
501
+ base_path: str,
502
+ args: Dict[str, Any] | None = None,
503
+ submodule: Union[str, List[str]] | None = None,
504
+ logger: Any | None = None,
505
+ ) -> torch.nn.Module:
506
+ """
507
+ Instruments model or submodule(s) with TrtCompiler and replaces its forward() with TRT hook.
508
+ Note: TRT 10.3 is recommended for best performance. Some nets may even fail to work with TRT 8.x
509
+ Args:
510
+ model: module to patch with TrtCompiler object.
511
+ base_path: TRT plan(s) saved to f"{base_path}[.{submodule}].plan" path.
512
+ dirname(base_path) must exist, base_path does not have to.
513
+ If base_path does point to existing file (e.g. associated checkpoint),
514
+ that file becomes a dependency - its mtime is added to args["timestamp"].
515
+ args: Optional dict : unpacked and passed to TrtCompiler() - see TrtCompiler above for details.
516
+ submodule: Optional hierarchical id(s) of submodule to patch, e.g. ['image_decoder.decoder']
517
+ If None, TrtCompiler patch is applied to the whole model.
518
+ Otherwise, submodule (or list of) is being patched.
519
+ logger: Optional logger for diagnostics.
520
+ Returns:
521
+ Always returns same model passed in as argument. This is for ease of use in configs.
522
+ """
523
+
524
+ default_args: Dict[str, Any] = {
525
+ "method": "onnx",
526
+ "precision": "fp16",
527
+ "build_args": {"builder_optimization_level": 5, "precision_constraints": "obey"},
528
+ }
529
+
530
+ default_args.update(args or {})
531
+ args = default_args
532
+
533
+ if trt_imported and polygraphy_imported and torch.cuda.is_available():
534
+ # if "path" filename point to existing file (e.g. checkpoint)
535
+ # it's also treated as dependency
536
+ if os.path.exists(base_path):
537
+ timestamp = int(os.path.getmtime(base_path))
538
+ if "timestamp" in args:
539
+ timestamp = max(int(args["timestamp"]), timestamp)
540
+ args["timestamp"] = timestamp
541
+
542
+ def wrap(model, path):
543
+ wrapper = TrtCompiler(model, path + ".plan", logger=logger, **args)
544
+ model._trt_compiler = wrapper
545
+ model.forward = MethodType(trt_forward, model)
546
+
547
+ def find_sub(parent, submodule):
548
+ idx = submodule.find(".")
549
+ # if there is "." in name, call recursively
550
+ if idx != -1:
551
+ parent_name = submodule[:idx]
552
+ parent = getattr(parent, parent_name)
553
+ submodule = submodule[idx + 1 :]
554
+ return find_sub(parent, submodule)
555
+ return parent, submodule
556
+
557
+ if submodule is not None:
558
+ if isinstance(submodule, str):
559
+ submodule = [submodule]
560
+ for s in submodule:
561
+ parent, sub = find_sub(model, s)
562
+ wrap(getattr(parent, sub), base_path + "." + s)
563
+ else:
564
+ wrap(model, base_path)
565
+ else:
566
+ logger = logger or get_logger("trt_compile")
567
+ logger.warning("TensorRT and/or polygraphy packages are not available! trt_compile() has no effect.")
568
+
569
+ return model