inference-models 0.18.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. inference_models/__init__.py +36 -0
  2. inference_models/configuration.py +72 -0
  3. inference_models/constants.py +2 -0
  4. inference_models/entities.py +5 -0
  5. inference_models/errors.py +137 -0
  6. inference_models/logger.py +52 -0
  7. inference_models/model_pipelines/__init__.py +0 -0
  8. inference_models/model_pipelines/auto_loaders/__init__.py +0 -0
  9. inference_models/model_pipelines/auto_loaders/core.py +120 -0
  10. inference_models/model_pipelines/auto_loaders/pipelines_registry.py +36 -0
  11. inference_models/model_pipelines/face_and_gaze_detection/__init__.py +0 -0
  12. inference_models/model_pipelines/face_and_gaze_detection/mediapipe_l2cs.py +200 -0
  13. inference_models/models/__init__.py +0 -0
  14. inference_models/models/auto_loaders/__init__.py +0 -0
  15. inference_models/models/auto_loaders/access_manager.py +168 -0
  16. inference_models/models/auto_loaders/auto_negotiation.py +1329 -0
  17. inference_models/models/auto_loaders/auto_resolution_cache.py +129 -0
  18. inference_models/models/auto_loaders/constants.py +7 -0
  19. inference_models/models/auto_loaders/core.py +1341 -0
  20. inference_models/models/auto_loaders/dependency_models.py +52 -0
  21. inference_models/models/auto_loaders/entities.py +57 -0
  22. inference_models/models/auto_loaders/models_registry.py +497 -0
  23. inference_models/models/auto_loaders/presentation_utils.py +333 -0
  24. inference_models/models/auto_loaders/ranking.py +413 -0
  25. inference_models/models/auto_loaders/utils.py +31 -0
  26. inference_models/models/base/__init__.py +0 -0
  27. inference_models/models/base/classification.py +123 -0
  28. inference_models/models/base/depth_estimation.py +62 -0
  29. inference_models/models/base/documents_parsing.py +111 -0
  30. inference_models/models/base/embeddings.py +66 -0
  31. inference_models/models/base/instance_segmentation.py +87 -0
  32. inference_models/models/base/keypoints_detection.py +93 -0
  33. inference_models/models/base/object_detection.py +143 -0
  34. inference_models/models/base/semantic_segmentation.py +74 -0
  35. inference_models/models/base/types.py +5 -0
  36. inference_models/models/clip/__init__.py +0 -0
  37. inference_models/models/clip/clip_onnx.py +148 -0
  38. inference_models/models/clip/clip_pytorch.py +104 -0
  39. inference_models/models/clip/preprocessing.py +162 -0
  40. inference_models/models/common/__init__.py +0 -0
  41. inference_models/models/common/cuda.py +30 -0
  42. inference_models/models/common/model_packages.py +25 -0
  43. inference_models/models/common/onnx.py +379 -0
  44. inference_models/models/common/roboflow/__init__.py +0 -0
  45. inference_models/models/common/roboflow/model_packages.py +361 -0
  46. inference_models/models/common/roboflow/post_processing.py +436 -0
  47. inference_models/models/common/roboflow/pre_processing.py +1332 -0
  48. inference_models/models/common/torch.py +20 -0
  49. inference_models/models/common/trt.py +266 -0
  50. inference_models/models/deep_lab_v3_plus/__init__.py +0 -0
  51. inference_models/models/deep_lab_v3_plus/deep_lab_v3_plus_segmentation_onnx.py +282 -0
  52. inference_models/models/deep_lab_v3_plus/deep_lab_v3_plus_segmentation_torch.py +264 -0
  53. inference_models/models/deep_lab_v3_plus/deep_lab_v3_plus_segmentation_trt.py +313 -0
  54. inference_models/models/depth_anything_v2/__init__.py +0 -0
  55. inference_models/models/depth_anything_v2/depth_anything_v2_hf.py +77 -0
  56. inference_models/models/dinov3/__init__.py +0 -0
  57. inference_models/models/dinov3/dinov3_classification_onnx.py +348 -0
  58. inference_models/models/dinov3/dinov3_classification_torch.py +323 -0
  59. inference_models/models/doctr/__init__.py +0 -0
  60. inference_models/models/doctr/doctr_torch.py +304 -0
  61. inference_models/models/easy_ocr/__init__.py +0 -0
  62. inference_models/models/easy_ocr/easy_ocr_torch.py +222 -0
  63. inference_models/models/florence2/__init__.py +0 -0
  64. inference_models/models/florence2/florence2_hf.py +897 -0
  65. inference_models/models/grounding_dino/__init__.py +0 -0
  66. inference_models/models/grounding_dino/grounding_dino_torch.py +227 -0
  67. inference_models/models/l2cs/__init__.py +0 -0
  68. inference_models/models/l2cs/l2cs_onnx.py +216 -0
  69. inference_models/models/mediapipe_face_detection/__init__.py +0 -0
  70. inference_models/models/mediapipe_face_detection/face_detection.py +203 -0
  71. inference_models/models/moondream2/__init__.py +0 -0
  72. inference_models/models/moondream2/moondream2_hf.py +281 -0
  73. inference_models/models/owlv2/__init__.py +0 -0
  74. inference_models/models/owlv2/cache.py +182 -0
  75. inference_models/models/owlv2/entities.py +112 -0
  76. inference_models/models/owlv2/owlv2_hf.py +695 -0
  77. inference_models/models/owlv2/reference_dataset.py +291 -0
  78. inference_models/models/paligemma/__init__.py +0 -0
  79. inference_models/models/paligemma/paligemma_hf.py +209 -0
  80. inference_models/models/perception_encoder/__init__.py +0 -0
  81. inference_models/models/perception_encoder/perception_encoder_pytorch.py +197 -0
  82. inference_models/models/perception_encoder/vision_encoder/__init__.py +0 -0
  83. inference_models/models/perception_encoder/vision_encoder/config.py +160 -0
  84. inference_models/models/perception_encoder/vision_encoder/pe.py +742 -0
  85. inference_models/models/perception_encoder/vision_encoder/rope.py +344 -0
  86. inference_models/models/perception_encoder/vision_encoder/tokenizer.py +342 -0
  87. inference_models/models/perception_encoder/vision_encoder/transforms.py +33 -0
  88. inference_models/models/qwen25vl/__init__.py +1 -0
  89. inference_models/models/qwen25vl/qwen25vl_hf.py +285 -0
  90. inference_models/models/resnet/__init__.py +0 -0
  91. inference_models/models/resnet/resnet_classification_onnx.py +330 -0
  92. inference_models/models/resnet/resnet_classification_torch.py +305 -0
  93. inference_models/models/resnet/resnet_classification_trt.py +369 -0
  94. inference_models/models/rfdetr/__init__.py +0 -0
  95. inference_models/models/rfdetr/backbone_builder.py +101 -0
  96. inference_models/models/rfdetr/class_remapping.py +41 -0
  97. inference_models/models/rfdetr/common.py +115 -0
  98. inference_models/models/rfdetr/default_labels.py +108 -0
  99. inference_models/models/rfdetr/dinov2_with_windowed_attn.py +1330 -0
  100. inference_models/models/rfdetr/misc.py +26 -0
  101. inference_models/models/rfdetr/ms_deform_attn.py +180 -0
  102. inference_models/models/rfdetr/ms_deform_attn_func.py +60 -0
  103. inference_models/models/rfdetr/position_encoding.py +166 -0
  104. inference_models/models/rfdetr/post_processor.py +83 -0
  105. inference_models/models/rfdetr/projector.py +373 -0
  106. inference_models/models/rfdetr/rfdetr_backbone_pytorch.py +394 -0
  107. inference_models/models/rfdetr/rfdetr_base_pytorch.py +807 -0
  108. inference_models/models/rfdetr/rfdetr_instance_segmentation_onnx.py +206 -0
  109. inference_models/models/rfdetr/rfdetr_instance_segmentation_pytorch.py +373 -0
  110. inference_models/models/rfdetr/rfdetr_instance_segmentation_trt.py +227 -0
  111. inference_models/models/rfdetr/rfdetr_object_detection_onnx.py +244 -0
  112. inference_models/models/rfdetr/rfdetr_object_detection_pytorch.py +470 -0
  113. inference_models/models/rfdetr/rfdetr_object_detection_trt.py +270 -0
  114. inference_models/models/rfdetr/segmentation_head.py +273 -0
  115. inference_models/models/rfdetr/transformer.py +767 -0
  116. inference_models/models/roboflow_instant/__init__.py +0 -0
  117. inference_models/models/roboflow_instant/roboflow_instant_hf.py +141 -0
  118. inference_models/models/sam/__init__.py +0 -0
  119. inference_models/models/sam/cache.py +147 -0
  120. inference_models/models/sam/entities.py +25 -0
  121. inference_models/models/sam/sam_torch.py +675 -0
  122. inference_models/models/sam2/__init__.py +0 -0
  123. inference_models/models/sam2/cache.py +162 -0
  124. inference_models/models/sam2/entities.py +43 -0
  125. inference_models/models/sam2/sam2_torch.py +905 -0
  126. inference_models/models/sam2_rt/__init__.py +0 -0
  127. inference_models/models/sam2_rt/sam2_pytorch.py +119 -0
  128. inference_models/models/smolvlm/__init__.py +0 -0
  129. inference_models/models/smolvlm/smolvlm_hf.py +245 -0
  130. inference_models/models/trocr/__init__.py +0 -0
  131. inference_models/models/trocr/trocr_hf.py +53 -0
  132. inference_models/models/vit/__init__.py +0 -0
  133. inference_models/models/vit/vit_classification_huggingface.py +319 -0
  134. inference_models/models/vit/vit_classification_onnx.py +326 -0
  135. inference_models/models/vit/vit_classification_trt.py +365 -0
  136. inference_models/models/yolact/__init__.py +1 -0
  137. inference_models/models/yolact/yolact_instance_segmentation_onnx.py +336 -0
  138. inference_models/models/yolact/yolact_instance_segmentation_trt.py +361 -0
  139. inference_models/models/yolo_world/__init__.py +1 -0
  140. inference_models/models/yolonas/__init__.py +0 -0
  141. inference_models/models/yolonas/nms.py +44 -0
  142. inference_models/models/yolonas/yolonas_object_detection_onnx.py +204 -0
  143. inference_models/models/yolonas/yolonas_object_detection_trt.py +230 -0
  144. inference_models/models/yolov10/__init__.py +0 -0
  145. inference_models/models/yolov10/yolov10_object_detection_onnx.py +187 -0
  146. inference_models/models/yolov10/yolov10_object_detection_trt.py +215 -0
  147. inference_models/models/yolov11/__init__.py +0 -0
  148. inference_models/models/yolov11/yolov11_onnx.py +28 -0
  149. inference_models/models/yolov11/yolov11_torch_script.py +25 -0
  150. inference_models/models/yolov11/yolov11_trt.py +21 -0
  151. inference_models/models/yolov12/__init__.py +0 -0
  152. inference_models/models/yolov12/yolov12_onnx.py +7 -0
  153. inference_models/models/yolov12/yolov12_torch_script.py +7 -0
  154. inference_models/models/yolov12/yolov12_trt.py +7 -0
  155. inference_models/models/yolov5/__init__.py +0 -0
  156. inference_models/models/yolov5/nms.py +99 -0
  157. inference_models/models/yolov5/yolov5_instance_segmentation_onnx.py +225 -0
  158. inference_models/models/yolov5/yolov5_instance_segmentation_trt.py +255 -0
  159. inference_models/models/yolov5/yolov5_object_detection_onnx.py +192 -0
  160. inference_models/models/yolov5/yolov5_object_detection_trt.py +218 -0
  161. inference_models/models/yolov7/__init__.py +0 -0
  162. inference_models/models/yolov7/yolov7_instance_segmentation_onnx.py +226 -0
  163. inference_models/models/yolov7/yolov7_instance_segmentation_trt.py +253 -0
  164. inference_models/models/yolov8/__init__.py +0 -0
  165. inference_models/models/yolov8/yolov8_classification_onnx.py +181 -0
  166. inference_models/models/yolov8/yolov8_instance_segmentation_onnx.py +239 -0
  167. inference_models/models/yolov8/yolov8_instance_segmentation_torch_script.py +201 -0
  168. inference_models/models/yolov8/yolov8_instance_segmentation_trt.py +268 -0
  169. inference_models/models/yolov8/yolov8_key_points_detection_onnx.py +263 -0
  170. inference_models/models/yolov8/yolov8_key_points_detection_torch_script.py +218 -0
  171. inference_models/models/yolov8/yolov8_key_points_detection_trt.py +287 -0
  172. inference_models/models/yolov8/yolov8_object_detection_onnx.py +213 -0
  173. inference_models/models/yolov8/yolov8_object_detection_torch_script.py +166 -0
  174. inference_models/models/yolov8/yolov8_object_detection_trt.py +231 -0
  175. inference_models/models/yolov9/__init__.py +0 -0
  176. inference_models/models/yolov9/yolov9_onnx.py +7 -0
  177. inference_models/models/yolov9/yolov9_torch_script.py +7 -0
  178. inference_models/models/yolov9/yolov9_trt.py +7 -0
  179. inference_models/runtime_introspection/__init__.py +0 -0
  180. inference_models/runtime_introspection/core.py +410 -0
  181. inference_models/utils/__init__.py +0 -0
  182. inference_models/utils/download.py +608 -0
  183. inference_models/utils/environment.py +28 -0
  184. inference_models/utils/file_system.py +51 -0
  185. inference_models/utils/hashing.py +7 -0
  186. inference_models/utils/imports.py +48 -0
  187. inference_models/utils/onnx_introspection.py +17 -0
  188. inference_models/weights_providers/__init__.py +0 -0
  189. inference_models/weights_providers/core.py +20 -0
  190. inference_models/weights_providers/entities.py +159 -0
  191. inference_models/weights_providers/roboflow.py +601 -0
  192. inference_models-0.18.3.dist-info/METADATA +466 -0
  193. inference_models-0.18.3.dist-info/RECORD +195 -0
  194. inference_models-0.18.3.dist-info/WHEEL +5 -0
  195. inference_models-0.18.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,333 @@
1
+ import json
2
+ from concurrent.futures import ThreadPoolExecutor
3
+ from typing import List, Optional, Tuple, Union
4
+
5
+ from rich.table import Table
6
+
7
+ from inference_models.runtime_introspection.core import RuntimeXRayResult
8
+ from inference_models.utils.download import get_content_length
9
+ from inference_models.weights_providers.entities import (
10
+ FileDownloadSpecs,
11
+ JetsonEnvironmentRequirements,
12
+ ModelDependency,
13
+ ModelPackageMetadata,
14
+ Quantization,
15
+ ServerEnvironmentRequirements,
16
+ )
17
+
18
+ BYTES_IN_MB = 1024**2
19
+
20
+
21
+ def render_table_with_model_overview(
22
+ model_id: str,
23
+ requested_model_id: str,
24
+ model_architecture: str,
25
+ model_variant: Optional[str],
26
+ task_type: Optional[str],
27
+ weights_provider: str,
28
+ registered_packages: int,
29
+ model_dependencies: Optional[List[ModelDependency]],
30
+ ) -> Table:
31
+ table = Table(title="Model overview", show_header=False, box=None)
32
+ table.add_column(justify="left", no_wrap=True, style="bold green4")
33
+ table.add_column(justify="left")
34
+ model_id_str = model_id
35
+ if requested_model_id != model_id:
36
+ model_id_str = f"{model_id_str} (alias: {requested_model_id})"
37
+ table.add_row("Model ID:", model_id_str)
38
+ table.add_row("Architecture:", model_architecture)
39
+ table.add_row("Variant:", model_variant or "N/A")
40
+ table.add_row("Task:", task_type or "N/A")
41
+ table.add_row("Weights provider:", weights_provider)
42
+ table.add_row("Number of packages:", str(registered_packages))
43
+ if model_dependencies:
44
+ model_dependencies_str = ", ".join(
45
+ [
46
+ f"{dependency.name}: {dependency.model_id} (package: {dependency.model_package_id or 'N/A'})"
47
+ for dependency in model_dependencies
48
+ ]
49
+ )
50
+ table.add_row("Model dependencies", model_dependencies_str)
51
+ return table
52
+
53
+
54
+ def calculate_size_of_all_model_packages_artefacts(
55
+ model_packages: List[ModelPackageMetadata],
56
+ max_workers: int = 16,
57
+ ) -> List[Tuple[int, bool]]:
58
+ all_artefacts = [package.package_artefacts for package in model_packages]
59
+ with ThreadPoolExecutor(max_workers=max_workers) as pool:
60
+ return list(pool.map(calculate_artefacts_size, all_artefacts))
61
+
62
+
63
+ def calculate_artefacts_size(
64
+ package_artefacts: List[FileDownloadSpecs],
65
+ ) -> Tuple[int, bool]:
66
+ result = 0
67
+ success = True
68
+ for artefact in package_artefacts:
69
+ try:
70
+ result += get_content_length(url=artefact.download_url)
71
+ except Exception:
72
+ success = False
73
+ return result, success
74
+
75
+
76
+ def render_table_with_model_packages(
77
+ model_packages: List[ModelPackageMetadata],
78
+ model_packages_size: Optional[List[Tuple[int, bool]]],
79
+ ) -> Table:
80
+ table = Table(title="Model packages", show_lines=True)
81
+ table.add_column("ID", justify="center", no_wrap=True, style="bold")
82
+ table.add_column("backend", justify="center")
83
+ table.add_column("batch size", justify="center")
84
+ table.add_column("quantization", justify="center")
85
+ table.add_column("size", justify="center")
86
+ table.add_column("trusted", justify="center")
87
+ if model_packages_size is None:
88
+ model_packages_size = [None] * len(model_packages)
89
+ for model_package, package_size in zip(model_packages, model_packages_size):
90
+ if package_size is None:
91
+ size_str = "N/A"
92
+ else:
93
+ size, status = package_size
94
+ size_str = bytes_to_human_format(size=size)
95
+ if not status:
96
+ size_str = f"{size_str} ⚠️"
97
+ batch_size = (
98
+ str(model_package.static_batch_size)
99
+ if model_package.static_batch_size
100
+ else "N"
101
+ )
102
+ if model_package.quantization is Quantization.UNKNOWN:
103
+ quantization_str = "N/A"
104
+ else:
105
+ quantization_str = model_package.quantization.value
106
+ table.add_row(
107
+ model_package.package_id,
108
+ model_package.backend.value,
109
+ batch_size,
110
+ quantization_str,
111
+ size_str,
112
+ str(model_package.trusted_source),
113
+ )
114
+ return table
115
+
116
+
117
+ def render_model_package_details_table(
118
+ model_id: str,
119
+ requested_model_id: str,
120
+ model_package: ModelPackageMetadata,
121
+ artefacts_size: Optional[Tuple[int, bool]],
122
+ ) -> Table:
123
+ model_id_str = model_id
124
+ if requested_model_id != model_id:
125
+ model_id_str = f"{model_id_str} (alias: {requested_model_id})"
126
+ if artefacts_size is None:
127
+ size_str = "N/A"
128
+ else:
129
+ size, status = artefacts_size
130
+ size_str = bytes_to_human_format(size=size)
131
+ if not status:
132
+ size_str = f"{size_str} ⚠️"
133
+ batch_size = (
134
+ str(model_package.static_batch_size) if model_package.static_batch_size else "N"
135
+ )
136
+ if model_package.quantization is Quantization.UNKNOWN:
137
+ quantization_str = "N/A"
138
+ else:
139
+ quantization_str = model_package.quantization.value
140
+ table = Table(title="Model package overview", show_header=False, box=None)
141
+ table.add_column(justify="left", no_wrap=True, style="bold green4")
142
+ table.add_column(justify="left")
143
+ table.add_row("Model ID:", model_id_str)
144
+ table.add_row("Package ID:", model_package.package_id)
145
+ table.add_row("Backend:", model_package.backend.value)
146
+ table.add_row("Batch size:", batch_size)
147
+ table.add_row("Quantization:", quantization_str)
148
+ table.add_row("Package files:", str(len(model_package.package_artefacts)))
149
+ table.add_row("Package size:", size_str)
150
+ table.add_row("Trusted source:", str(model_package.trusted_source))
151
+ if model_package.trt_package_details is not None:
152
+ if model_package.dynamic_batch_size_supported:
153
+ dynamic_batch_size_str = f"min: {model_package.trt_package_details.min_dynamic_batch_size}, opt: {model_package.trt_package_details.opt_dynamic_batch_size}, max: {model_package.trt_package_details.max_dynamic_batch_size}"
154
+ table.add_row("TRT dynamic batch size:", dynamic_batch_size_str)
155
+ table.add_row(
156
+ "TRT same CUDA CC compatibility:",
157
+ str(model_package.trt_package_details.same_cc_compatible),
158
+ )
159
+ table.add_row(
160
+ "TRT forward compatibility:",
161
+ str(model_package.trt_package_details.trt_forward_compatible),
162
+ )
163
+ table.add_row(
164
+ "TRT lean runtime excluded:",
165
+ str(model_package.trt_package_details.trt_lean_runtime_excluded),
166
+ )
167
+ table = render_compilation_device_details(
168
+ table=table,
169
+ environment_requirements=model_package.environment_requirements,
170
+ )
171
+ if model_package.onnx_package_details:
172
+ if model_package.onnx_package_details.incompatible_providers:
173
+ incompatible_providers_str = ", ".join(
174
+ model_package.onnx_package_details.incompatible_providers
175
+ )
176
+ else:
177
+ incompatible_providers_str = "N/A"
178
+ table.add_row("ONNX opset:", str(model_package.onnx_package_details.opset))
179
+ table.add_row("Incompatible providers:", incompatible_providers_str)
180
+ if model_package.torch_script_package_details:
181
+ supported_device_types_str = ", ".join(
182
+ model_package.torch_script_package_details.supported_device_types
183
+ )
184
+ torch_version_str = str(
185
+ model_package.torch_script_package_details.torch_version
186
+ )
187
+ if model_package.torch_script_package_details.torch_vision_version:
188
+ torch_vision_version_str = str(
189
+ model_package.torch_script_package_details.torch_vision_version
190
+ )
191
+ else:
192
+ torch_vision_version_str = "N/A"
193
+ table.add_row("Supported devices:", supported_device_types_str)
194
+ table.add_row("Required torch version:", torch_version_str)
195
+ table.add_row("Required torchvision version:", torch_vision_version_str)
196
+ if model_package.model_features:
197
+ table.add_row("Model features:", json.dumps(model_package.model_features))
198
+ return table
199
+
200
+
201
+ def render_compilation_device_details(
202
+ table: Table,
203
+ environment_requirements: Optional[
204
+ Union[ServerEnvironmentRequirements, JetsonEnvironmentRequirements]
205
+ ],
206
+ ) -> Table:
207
+ if environment_requirements is None:
208
+ return table
209
+ if isinstance(environment_requirements, ServerEnvironmentRequirements):
210
+ table.add_row(
211
+ "Compilation device name:", str(environment_requirements.cuda_device_name)
212
+ )
213
+ table.add_row(
214
+ "Compilation device CUDA CC:", str(environment_requirements.cuda_device_cc)
215
+ )
216
+ cuda_version_str = (
217
+ str(environment_requirements.cuda_version)
218
+ if environment_requirements.cuda_version
219
+ else "N/A"
220
+ )
221
+ driver_version_str = (
222
+ str(environment_requirements.driver_version)
223
+ if environment_requirements.driver_version
224
+ else "N/A"
225
+ )
226
+ trt_version_str = (
227
+ str(environment_requirements.trt_version)
228
+ if environment_requirements.trt_version
229
+ else "N/A"
230
+ )
231
+ table.add_row("Compilation device CUDA version:", cuda_version_str)
232
+ table.add_row("Compilation device driver:", driver_version_str)
233
+ table.add_row("Compilation device TRT Version:", trt_version_str)
234
+ return table
235
+ jetson_product_name_str = (
236
+ str(environment_requirements.jetson_product_name)
237
+ if environment_requirements.jetson_product_name
238
+ else "N/A"
239
+ )
240
+ table.add_row("Compilation Jetson type:", jetson_product_name_str)
241
+ table.add_row(
242
+ "Compilation device name:", str(environment_requirements.cuda_device_name)
243
+ )
244
+ table.add_row(
245
+ "Compilation device CUDA CC:", str(environment_requirements.cuda_device_cc)
246
+ )
247
+ cuda_version_str = (
248
+ str(environment_requirements.cuda_version)
249
+ if environment_requirements.cuda_version
250
+ else "N/A"
251
+ )
252
+ l4t_version_str = (
253
+ str(environment_requirements.l4t_version)
254
+ if environment_requirements.l4t_version
255
+ else "N/A"
256
+ )
257
+ trt_version_str = (
258
+ str(environment_requirements.trt_version)
259
+ if environment_requirements.trt_version
260
+ else "N/A"
261
+ )
262
+ table.add_row("Compilation device CUDA version:", cuda_version_str)
263
+ table.add_row("Compilation device L4T:", l4t_version_str)
264
+ table.add_row("Compilation device TRT Version:", trt_version_str)
265
+ return table
266
+
267
+
268
+ def render_runtime_x_ray(runtime_x_ray: RuntimeXRayResult) -> Table:
269
+ table = Table(title="Compute environment details", show_header=False, box=None)
270
+ table.add_column(justify="left", no_wrap=True, style="bold green4")
271
+ table.add_column(justify="left")
272
+ detected_gpus = (
273
+ ", ".join(runtime_x_ray.gpu_devices) if runtime_x_ray.gpu_devices else "N/A"
274
+ )
275
+ table.add_row("Detected GPUs:", detected_gpus)
276
+ detected_gpus_cc = (
277
+ ", ".join([str(cc) for cc in runtime_x_ray.gpu_devices_cc])
278
+ if runtime_x_ray.gpu_devices_cc
279
+ else "N/A"
280
+ )
281
+ table.add_row("Detected GPUs CUDA CC:", detected_gpus_cc)
282
+ nvidia_driver = (
283
+ str(runtime_x_ray.driver_version) if runtime_x_ray.driver_version else "N/A"
284
+ )
285
+ table.add_row("NVIDIA driver:", nvidia_driver)
286
+ cuda_version = (
287
+ str(runtime_x_ray.cuda_version) if runtime_x_ray.cuda_version else "N/A"
288
+ )
289
+ table.add_row("CUDA version:", cuda_version)
290
+ trt_version = str(runtime_x_ray.trt_version) if runtime_x_ray.trt_version else "N/A"
291
+ table.add_row("TRT version:", trt_version)
292
+ table.add_row(
293
+ "TRT Python package available:", str(runtime_x_ray.trt_python_package_available)
294
+ )
295
+ if runtime_x_ray.jetson_type is not None:
296
+ table.add_row("Jetson device type:", runtime_x_ray.jetson_type)
297
+ if runtime_x_ray.l4t_version is not None:
298
+ table.add_row("L4T version:", str(runtime_x_ray.l4t_version))
299
+ os_version = runtime_x_ray.os_version if runtime_x_ray.os_version else "N/A"
300
+ table.add_row("OS version:", os_version)
301
+ torch_version = (
302
+ str(runtime_x_ray.torch_version) if runtime_x_ray.torch_version else "N/A"
303
+ )
304
+ table.add_row("torch version:", torch_version)
305
+ torchvision_version = (
306
+ str(runtime_x_ray.torchvision_version)
307
+ if runtime_x_ray.torchvision_version
308
+ else "N/A"
309
+ )
310
+ table.add_row("torchvision version:", torchvision_version)
311
+ onnxruntime_version = (
312
+ str(runtime_x_ray.onnxruntime_version)
313
+ if runtime_x_ray.onnxruntime_version
314
+ else "N/A"
315
+ )
316
+ table.add_row("ONNX runtime version:", onnxruntime_version)
317
+ available_onnx_execution_providers = (
318
+ ", ".join(runtime_x_ray.available_onnx_execution_providers)
319
+ if runtime_x_ray.available_onnx_execution_providers
320
+ else "N/A"
321
+ )
322
+ table.add_row(
323
+ "Detected ONNX execution providers:", available_onnx_execution_providers
324
+ )
325
+ return table
326
+
327
+
328
+ def bytes_to_human_format(size: int) -> str:
329
+ mega_bytes = size / BYTES_IN_MB
330
+ if mega_bytes <= 512:
331
+ return f"{round(mega_bytes, 2)} MB"
332
+ giga_bytes = mega_bytes / 1024
333
+ return f"{round(giga_bytes, 2)} GB"