supervisely 6.73.390__py3-none-any.whl → 6.73.391__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. supervisely/app/widgets/experiment_selector/experiment_selector.py +20 -3
  2. supervisely/app/widgets/experiment_selector/template.html +49 -70
  3. supervisely/app/widgets/report_thumbnail/report_thumbnail.py +19 -4
  4. supervisely/decorators/profile.py +20 -0
  5. supervisely/nn/benchmark/utils/detection/utlis.py +7 -0
  6. supervisely/nn/experiments.py +4 -0
  7. supervisely/nn/inference/gui/serving_gui_template.py +71 -11
  8. supervisely/nn/inference/inference.py +108 -6
  9. supervisely/nn/training/gui/classes_selector.py +246 -27
  10. supervisely/nn/training/gui/gui.py +318 -234
  11. supervisely/nn/training/gui/hyperparameters_selector.py +2 -2
  12. supervisely/nn/training/gui/model_selector.py +42 -1
  13. supervisely/nn/training/gui/tags_selector.py +1 -1
  14. supervisely/nn/training/gui/train_val_splits_selector.py +8 -7
  15. supervisely/nn/training/gui/training_artifacts.py +10 -1
  16. supervisely/nn/training/gui/training_process.py +17 -1
  17. supervisely/nn/training/train_app.py +227 -72
  18. supervisely/template/__init__.py +2 -0
  19. supervisely/template/base_generator.py +90 -0
  20. supervisely/template/experiment/__init__.py +0 -0
  21. supervisely/template/experiment/experiment.html.jinja +537 -0
  22. supervisely/template/experiment/experiment_generator.py +996 -0
  23. supervisely/template/experiment/header.html.jinja +154 -0
  24. supervisely/template/experiment/sidebar.html.jinja +240 -0
  25. supervisely/template/experiment/sly-style.css +397 -0
  26. supervisely/template/experiment/template.html.jinja +18 -0
  27. supervisely/template/extensions.py +172 -0
  28. supervisely/template/template_renderer.py +253 -0
  29. {supervisely-6.73.390.dist-info → supervisely-6.73.391.dist-info}/METADATA +3 -1
  30. {supervisely-6.73.390.dist-info → supervisely-6.73.391.dist-info}/RECORD +34 -23
  31. {supervisely-6.73.390.dist-info → supervisely-6.73.391.dist-info}/LICENSE +0 -0
  32. {supervisely-6.73.390.dist-info → supervisely-6.73.391.dist-info}/WHEEL +0 -0
  33. {supervisely-6.73.390.dist-info → supervisely-6.73.391.dist-info}/entry_points.txt +0 -0
  34. {supervisely-6.73.390.dist-info → supervisely-6.73.391.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,90 @@
1
+ import inspect
2
+ import os
3
+ from datetime import datetime
4
+ from pathlib import Path
5
+ from typing import Any, Dict, Optional, Tuple
6
+
7
+ import supervisely.io.env as sly_env
8
+ import supervisely.io.fs as sly_fs
9
+ import supervisely.io.json as sly_json
10
+ from supervisely import logger
11
+ from supervisely.api.api import Api
12
+ from supervisely.template.template_renderer import TemplateRenderer
13
+
14
+
15
+ class BaseGenerator:
16
+ """
17
+ Base class for generating reports from Jinja2 templates and uploading them to Supervisely.
18
+ """
19
+
20
+ TEMPLATE = "template.html.jinja"
21
+ VUE_TEMPLATE_NAME = "template.vue"
22
+ LINK_FILE = "Open Report.lnk"
23
+
24
+ def __init__(self, api: Api, output_dir: str):
25
+ self.api = api
26
+ self.output_dir = output_dir
27
+ self.template_renderer = TemplateRenderer()
28
+ os.makedirs(self.output_dir, exist_ok=True)
29
+
30
+ @property
31
+ def template_path(self) -> str:
32
+ cls_dir = Path(inspect.getfile(self.__class__)).parent
33
+ return f"{cls_dir}/{self.TEMPLATE}"
34
+
35
+ def context(self) -> dict:
36
+ raise NotImplementedError("Subclasses must implement the context method.")
37
+
38
+ def state(self) -> dict:
39
+ return {}
40
+
41
+ def generate(self):
42
+ # Render
43
+ content = self._render()
44
+ # Save template.vue
45
+ template_path = f"{self.output_dir}/{self.VUE_TEMPLATE_NAME}"
46
+ with open(template_path, "w", encoding="utf-8") as f:
47
+ f.write(content)
48
+ # Save state.json
49
+ state = self.state()
50
+ state_path = f"{self.output_dir}/state.json"
51
+ sly_json.dump_json_file(state, state_path)
52
+
53
+ def upload(self, remote_dir: str, team_id: Optional[int] = None, **kwargs):
54
+ team_id = team_id or sly_env.team_id()
55
+ self.api.file.upload_directory_fast(
56
+ team_id=team_id,
57
+ local_dir=self.output_dir,
58
+ remote_dir=remote_dir,
59
+ **kwargs
60
+ )
61
+ logger.info(f"Template uploaded to {remote_dir}")
62
+ template_id = self.api.file.get_info_by_path(
63
+ team_id=team_id,
64
+ remote_path=f"{remote_dir}/{self.VUE_TEMPLATE_NAME}",
65
+ ).id
66
+ if self._report_url(self.api.server_address, template_id) is not None:
67
+ url = self._upload_link_file(template_id, remote_dir, team_id)
68
+ logger.info(f"Open URL: {url}")
69
+ else:
70
+ logger.warning("Subclasses must implement the `_report_url` method to upload a link file.")
71
+ return template_id
72
+
73
+ def _render(self) -> str:
74
+ context = self.context()
75
+ content = self.template_renderer.render(self.template_path, context)
76
+ return content
77
+
78
+ def _upload_link_file(self, template_id: int, remote_dir: str, team_id: int):
79
+ url = self._report_url(self.api.server_address, template_id)
80
+ link_path = os.path.join(self.output_dir, self.LINK_FILE)
81
+ with open(link_path, "w") as f:
82
+ f.write(url)
83
+ self.api.file.upload(team_id=team_id, src=link_path, dst=self._link_file_dst_path(remote_dir))
84
+ return url
85
+
86
+ def _link_file_dst_path(self, remote_dir: str) -> str:
87
+ return f"{remote_dir}/{self.LINK_FILE}"
88
+
89
+ def _report_url(self, server_address: str, template_id: int) -> str:
90
+ raise NotImplementedError("Subclasses must implement the `_report_url` method to upload a link file.")
File without changes
@@ -0,0 +1,537 @@
1
+ {% if experiment.training.evaluation.id and widgets.sample_pred_gallery %}
2
+
3
+ ## Predictions
4
+
5
+ Here are prediction samples made with **{{ experiment.training.checkpoints.pytorch.name }}** checkpoint.
6
+
7
+ <div class="prediction-gallery">
8
+ {{ widgets.sample_pred_gallery | safe }}
9
+ </div>
10
+
11
+ {% endif %}
12
+
13
+ {% if experiment.training.evaluation.id %}
14
+
15
+ ## Evaluation
16
+
17
+ The **{{ experiment.training.checkpoints.pytorch.name }}** checkpoint was evaluated on the validation set containing **{{ experiment.project.splits.val }}
18
+ images**.
19
+
20
+ See the full [📊 Evaluation Report]({{ experiment.training.evaluation.url }}) for details and visualizations.
21
+ {{ widgets.tables.metrics | safe }}
22
+
23
+ {% else %}
24
+
25
+ ## Evaluation
26
+ No evaluation metrics available for this experiment. The model training was completed successfully, but no evaluation
27
+ was performed.
28
+
29
+ {% endif %}
30
+
31
+ ## Artifacts
32
+
33
+ [📂 Open in Team Files]({{ experiment.paths.artifacts_dir.url }}){:target="_blank"}
34
+
35
+ The artifacts of this experiment are stored in the **Team Files**. You can download them and use in your code.
36
+ Here are the essential checkpoints and exports of the model:
37
+
38
+ - 🔥 Best checkpoint: **{{ experiment.training.checkpoints.pytorch.name }}** ([download]({{ experiment.training.checkpoints.pytorch.url
39
+ }}){:download="{{ experiment.training.checkpoints.pytorch.name }}"})
40
+ {% if experiment.training.checkpoints.onnx.name %}
41
+ - 📦 ONNX export: **{{ experiment.training.checkpoints.onnx.name }}** ([download]({{ experiment.training.checkpoints.onnx.url
42
+ }}){:download="{{ experiment.training.checkpoints.onnx.name }}"})
43
+ {% endif %}
44
+ {% if experiment.training.checkpoints.tensorrt.name %}
45
+ - ⚡ TensorRT export: **{{ experiment.training.checkpoints.tensorrt.name }}** ([download]({{ experiment.training.checkpoints.tensorrt.url
46
+ }}){:download="{{ experiment.training.checkpoints.tensorrt.name }}"})
47
+ {% endif %}
48
+
49
+ {% if widgets.tables.checkpoints %}
50
+ <details>
51
+ <summary>📋 All Checkpoints</summary>
52
+ {{ widgets.tables.checkpoints }}
53
+ </details>
54
+ {% endif %}
55
+
56
+ ## Classes
57
+
58
+ The model can predict {{ experiment.project.classes.count }} classes. Here is the full list of classes:
59
+
60
+ <details>
61
+ <summary>Classes</summary>
62
+ {{ widgets.tables.classes }}
63
+
64
+ </details>
65
+
66
+ {% if experiment.training.hyperparameters %}
67
+
68
+ ## Hyperparameters
69
+
70
+ The training process was configured with the following hyperparameters. You can use them to reproduce the training.
71
+
72
+ <details>
73
+ <summary>Hyperparameters</summary>
74
+
75
+ ```yaml
76
+ {% for hyperparameter in experiment.training.hyperparameters %}
77
+ {{ hyperparameter }}
78
+ {% endfor %}
79
+ ```
80
+
81
+ </details>
82
+ {% endif %}
83
+
84
+ ## Supervisely Apps
85
+
86
+ The quick actions on this page, such as **Deploy**, **Predict**, or **Fine-tune**, help you to quickly work with your model. But you can also run the apps manually from the Supervisely Platform. Here are related apps to this experiment:
87
+
88
+ - [Serve {{ experiment.model.framework }}]({{ env.server_address }}/ecosystem/apps/{{ resources.apps.serve.slug
89
+ }}){:target="_blank"} - deploy your model in the Supervisely Platform.
90
+ - [Train {{ experiment.model.framework }}]({{ env.server_address }}/ecosystem/apps/{{ resources.apps.train.slug
91
+ }}){:target="_blank"} - train a model in the Supervisely Platform.
92
+ - [Apply NN to Images]({{ env.server_address }}/ecosystem/apps/{{ resources.apps.apply_nn_to_images.slug }}){:target="_blank"} -
93
+ connect to your model and make predictions on image project or dataset.
94
+ - [Apply NN to Videos]({{ env.server_address }}/ecosystem/apps/{{ resources.apps.apply_nn_to_videos.slug }}){:target="_blank"} -
95
+ for predictions on video project or dataset.
96
+
97
+ ## API Integration & Deployment
98
+
99
+ In this section, you'll find the quickstart guides for integrating your model into your applications using the Supervisely API, you'll learn how to deploy your model outside of the Supervisely Platform, and how to deploy it in a Docker container.
100
+
101
+ ### Table of contents:
102
+
103
+ - [Supervisely API](#supervisely-api)
104
+ - [Deploy in Docker](#deploy-in-docker)
105
+ - [Deploy locally with Supervisely SDK](#deploy-locally-with-supervisely-sdk)
106
+ - [Using Original Model Codebase](#using-original-model-codebase)
107
+
108
+ ## Supervisely API
109
+
110
+ Here is a **quickstart** guide of how to use the Supervisely API.
111
+
112
+ 1. Install Supervisely:
113
+
114
+ ```bash
115
+ pip install supervisely
116
+ ```
117
+
118
+ 2. Authentication. Provide your **API token** and the **Server address** into environment variables. For example, you
119
+ can pass them in the terminal before running the script:
120
+
121
+ ```bash
122
+ export API_TOKEN="your_api_token"
123
+ export SERVER_ADDRESS="https://app.supervisely.com" # or your own server URL for Enterprise Edition
124
+ ```
125
+
126
+ If you need help with authentication, check the [Basics of Authentication](https://developer.supervisely.com/getting-started/basics-of-authentication){:target="_blank"} tutorial.
127
+
128
+ 3. The following code will deploy a model and make predictions using the Supervisely API.
129
+
130
+ ```python
131
+ import supervisely as sly
132
+
133
+ # 1. Authenticate with Supervisely API
134
+ api = sly.Api() # Make sure you've set your credentials in environment variables.
135
+
136
+ # 2. Deploy the model
137
+ model = api.nn.deploy(
138
+ model="{{ experiment.paths.artifacts_dir.path }}/checkpoints/{{ experiment.training.checkpoints.pytorch.name }}",
139
+ device="cuda:0", # or "cpu"
140
+ )
141
+
142
+ # 3. Predict
143
+ predictions = model.predict(
144
+ input=["image1.jpg", "image2.jpg"], # can also be numpy arrays, PIL images, URLs or a directory
145
+ )
146
+ ```
147
+
148
+ ### Deploy via API
149
+
150
+ Deploy your model in a few lines of code. The model will be deployed in the Supervisely Platform, after this, you can
151
+ use it for predictions.
152
+
153
+ {% tabs %}
154
+
155
+ {% tab title="PyTorch" %}
156
+
157
+ ```python
158
+ import supervisely as sly
159
+
160
+ api = sly.Api()
161
+
162
+ # Deploy PyTorch checkpoint
163
+ model = api.nn.deploy(
164
+ model="{{ experiment.paths.artifacts_dir.path }}/checkpoints/{{ experiment.training.checkpoints.pytorch.name }}",
165
+ device="cuda:0", # or "cpu"
166
+ )
167
+ ```
168
+
169
+ {% endtab %}
170
+
171
+ {% if experiment.training.checkpoints.onnx.name %}
172
+ {% tab title="ONNX" %}
173
+
174
+ ```python
175
+ import supervisely as sly
176
+
177
+ api = sly.Api()
178
+
179
+ # Deploy ONNX checkpoint
180
+ model = api.nn.deploy(
181
+ model="{{ experiment.paths.artifacts_dir.path }}/export/{{ experiment.training.checkpoints.onnx.name }}",
182
+ device="cuda:0", # or "cpu"
183
+ )
184
+ ```
185
+
186
+ {% endtab %}
187
+ {% endif %}
188
+
189
+ {% if experiment.training.checkpoints.tensorrt.name %}
190
+ {% tab title="TensorRT" %}
191
+
192
+ ```python
193
+ import supervisely as sly
194
+
195
+ api = sly.Api()
196
+
197
+ # Deploy TensorRT checkpoint
198
+ model = api.nn.deploy(
199
+ model="{{ experiment.paths.artifacts_dir.path }}/export/{{ experiment.training.checkpoints.tensorrt.name }}",
200
+ device="cuda:0", # or "cpu"
201
+ )
202
+ ```
203
+
204
+ {% endtab %}
205
+ {% endif %}
206
+
207
+ {% endtabs %}
208
+
209
+ > For more information, see [Model
210
+ API](https://docs.supervisely.com/neural-networks/overview-1/model-api){:target="_blank"}
211
+ documentation.
212
+
213
+ ### Predict via API
214
+
215
+ Use the deployed model to make predictions on images, videos, or directories. Connect to a deployed
216
+ model and make predictions.
217
+
218
+ {% tabs %}
219
+
220
+ {% tab title="Local Images" %}
221
+
222
+ ```python
223
+ # Predict local images
224
+ predictions = model.predict(
225
+ input="image.jpg", # Can also be a directory, np.array, PIL.Image, URL or a list of them
226
+ )
227
+ ```
228
+
229
+ {% endtab %}
230
+
231
+ {% tab title="Image IDs" %}
232
+
233
+ ```python
234
+ # Predict images in Supervisely
235
+ predictions = model.predict(image_ids=[123, 124] # Image IDs in Supervisely)
236
+ ```
237
+
238
+ {% endtab %}
239
+
240
+ {% tab title="Dataset" %}
241
+
242
+ ```python
243
+ # Predict dataset
244
+ predictions = model.predict(dataset_id=12 # Dataset ID in Supervisely)
245
+ ```
246
+
247
+ {% endtab %}
248
+
249
+ {% tab title="Project" %}
250
+
251
+ ```python
252
+ # Predict project
253
+ predictions = model.predict(project_id=21 # Project ID in Supervisely)
254
+ ```
255
+
256
+ {% endtab %}
257
+
258
+ {% tab title="Video" %}
259
+
260
+ ```python
261
+ # Predict video
262
+ predictions = model.predict(video_id=123 # Video ID in Supervisely)
263
+ ```
264
+
265
+ {% endtab %}
266
+
267
+ {% endtabs %}
268
+
269
+ > For more information, see [Prediction
270
+ API](https://docs.supervisely.com/neural-networks/overview-1/prediction-api){:target="_blank"}.
271
+
272
+ {% if experiment.model.task_type == "object detection" %}
273
+
274
+ ## Tracking Objects in Video
275
+
276
+ You can track objects in video using `boxmot` library.
277
+ [BoxMot](https://github.com/mikel-brostrom/boxmot){:target="_blank"} is a
278
+ third-party library that implements lightweight neural networks for tracking-by-detection task (when the tracking is
279
+ performed on the objects predicted by a separate detector). For `boxmot` models you can use even CPU device.
280
+
281
+ First, install [BoxMot](https://github.com/mikel-brostrom/boxmot){:target="_blank"}:
282
+
283
+ ```bash
284
+ pip install boxmot
285
+ ```
286
+
287
+ Supervisely SDK has the `track()` method from `supervisely.nn.tracking` which allows you to apply `boxmot` models
288
+ together with a detector in a single line of code. This method takes two arguments: a `boxmot` tracker, and a
289
+ `PredictionSession` of a detector. It returns a `sly.VideoAnnotation` with the tracked objects.
290
+
291
+ ```python
292
+ import supervisely as sly
293
+ from supervisely.nn.tracking import track
294
+ import boxmot
295
+ from pathlib import Path
296
+
297
+ # Deploy a detector
298
+ detector = api.nn.deploy(
299
+ model="{{ experiment.model.framework }}/{{ experiment.model.name }}",
300
+ device="cuda:0", # Use GPU for detection
301
+ )
302
+
303
+ # Load BoxMot tracker
304
+ tracker = boxmot.BotSort(
305
+ reid_weights=Path('osnet_x0_25_msmt17.pt'),
306
+ device="cpu", # Use CPU for tracking
307
+ )
308
+
309
+ # Track objects in a single line
310
+ video_ann: sly.VideoAnnotation = track(
311
+ video_id=42,
312
+ detector=detector,
313
+ tracker=tracker,
314
+ )
315
+ ```
316
+
317
+ > For more information, see the section [Tracking Objects in
318
+ Video](https://docs.supervisely.com/neural-networks/overview-1/prediction-api#tracking-objects-in-video){:target="_blank"}.
319
+
320
+ {% endif %}
321
+
322
+ ## Deploy in Docker
323
+
324
+ You can deploy the model in a 🐋 Docker Container with a single `docker run` command. Download a checkpoint, pull the
325
+ docker image for the corresponding model's framework, and run the `docker run` command with addtional arguments.
326
+
327
+ 1. Download checkpoint from Supervisely ([Open in Team Files]({{ experiment.paths.checkpoints_dir.url }}){:target="_blank"})
328
+
329
+ 2. Pull the Docker image
330
+
331
+ ```bash
332
+ docker pull {{ code.docker.image }}
333
+ ```
334
+
335
+ 3. Run the Docker container
336
+
337
+ ```bash
338
+ docker run \
339
+ --runtime=nvidia \
340
+ -v "./{{ experiment.paths.experiment_dir.path }}:/model" \ # Mount the experiment directory to the container
341
+ -p 8000:8000 \
342
+ {{ code.docker.image }} \
343
+ deploy \
344
+ --model "/model/checkpoints/{{ experiment.training.checkpoints.pytorch.name }}" \
345
+ --device "cuda:0"
346
+ ```
347
+
348
+ 4. Connect and run the inference:
349
+
350
+ ```python
351
+ from supervisely.nn import ModelAPI
352
+
353
+ # No need to authenticate for local deployment
354
+ model = ModelAPI(
355
+ url="http://localhost:8000" # URL of a running model's server in Docker container
356
+ )
357
+
358
+ # Predict
359
+ predictions = model.predict(
360
+ input=["image1.jpg", "image2.jpg"] # Can also be numpy arrays, PIL images, URLs or a directory
361
+ )
362
+ ```
363
+
364
+ See the [Model API](https://docs.supervisely.com/neural-networks/overview-1/model-api){:target="_blank"} documentation for more details on how to use the `ModelAPI` class.
365
+
366
+ Alternatively, you can use `docker run` with the `predict` action to make predictions in a single command. This is a
367
+ quick way to start inference on your local images, videos, or directories without deploying the model. The container will be automatically stopped after the predictions are made.
368
+
369
+ ```bash
370
+ docker run \
371
+ --runtime=nvidia \
372
+ -v "./{{ experiment.paths.experiment_dir.path }}:/model" \
373
+ -p 8000:8000 \
374
+ {{ code.docker.image }} \
375
+ predict \
376
+ "./image.jpg" \ # Put your image/video/directory here
377
+ --model "/model/checkpoints/{{ experiment.training.checkpoints.pytorch.name }}" \
378
+ --device "cuda:0"
379
+ ```
380
+
381
+ > For more information, see [Deploy in Docker
382
+ Container](https://docs.supervisely.com/neural-networks/overview-1/deploy_and_predict_with_supervisely_sdk#deploy-in-docker-container){:target="_blank"}
383
+ documentation.
384
+
385
+ ## Deploy locally with Supervisely SDK
386
+
387
+ If you develop your application outside of Supervisely, you can deploy your model on your machine with the help of the Supervisely SDK and our prepared codebase for a specific model (we usually make a fork of the original model repository).
388
+ This approach helps you to quickly set up the environment and run inference without the need to implement code for loading the model and making predictions by yourself, because all our model integrations are developed with the Supervisely SDK, and the inference could be done in a few lines of code, in a unified way.
389
+
390
+ 1. Download checkpoint from Supervisely ([Open in Team Files]({{ experiment.paths.checkpoints_dir.url }}){:target="_blank"})
391
+
392
+ 2. Clone our repository
393
+
394
+ ```bash
395
+ git clone {{ code.local_prediction.repo.url }}
396
+ cd {{ code.local_prediction.repo.name }}
397
+ ```
398
+
399
+ 3. Install requirements
400
+
401
+ ```bash
402
+ pip install -r dev_requirements.txt
403
+ pip install supervisely
404
+ ```
405
+
406
+ 4. Run the inference code
407
+
408
+ ```python
409
+ # Be sure you are in the root of the {{ code.local_prediction.repo.name }} repository
410
+ from {{ code.local_prediction.serving_module }} import {{ code.local_prediction.serving_class }}
411
+
412
+ # Load model
413
+ model = {{ code.local_prediction.serving_class }}(
414
+ model="{{ experiment.paths.artifacts_dir.path }}/checkpoints/{{ experiment.training.checkpoints.pytorch.name }}", # path to the checkpoint you've downloaded
415
+ device="cuda", # or "cuda:1", "cpu"
416
+ )
417
+
418
+ # Predict
419
+ predictions = model.predict(
420
+ # 'input' can accept various formats: image paths, np.arrays, Supervisely IDs and others.
421
+ input=["path/to/image1.jpg", "path/to/image2.jpg"],
422
+ conf=0.5, # confidence threshold
423
+ # ... additional parameters (see the docs)
424
+ )
425
+ ```
426
+
427
+ > For more information, see [Local Deployment](https://docs.supervisely.com/neural-networks/overview-1/local-deployment.md){:target="_blank"} and [Prediction API](https://docs.supervisely.com/neural-networks/overview-1/prediction-api){:target="_blank"} documentations.
428
+
429
+ {% if experiment.training.checkpoints.onnx.name or experiment.training.checkpoints.tensorrt.name %}
430
+ ### Deploy ONNX/TensorRT
431
+
432
+ You can also use exported ONNX and TensorRT models. Specify the `model` parameter as a path to your ONNX or TensorRT
433
+ model,
434
+ {% if experiment.training.checkpoints.onnx.classes_url or experiment.training.checkpoints.tensorrt.classes_url %}
435
+ and [download `classes.json`]({{ experiment.training.checkpoints.onnx.classes_url or experiment.training.checkpoints.tensorrt.classes_url
436
+ }}){:download="classes.json"} file from the export directory.
437
+ {% else %}
438
+ and provide class names in the additional `classes` parameter.
439
+ {% endif %}
440
+
441
+ ```python
442
+ # Be sure you are in the root of the {{ code.local_prediction.repo.name }} repository
443
+ from {{ code.local_prediction.serving_module }} import {{ code.local_prediction.serving_class }}
444
+ {% if experiment.training.checkpoints.onnx.classes_url or experiment.training.checkpoints.tensorrt.classes_url %}
445
+ from supervisely.io.json import load_json_file
446
+
447
+ classes_path = "./{{ experiment.paths.experiment_dir.path }}/export/classes.json"
448
+ classes = load_json_file(classes_path)
449
+ {% else %}
450
+
451
+ classes = {{ project.classes.names.short_list }}
452
+ {% endif %}
453
+
454
+ # Deploy ONNX or TensorRT
455
+ model = {{ code.local_prediction.serving_class }}(
456
+ # Path to the ONNX or TensorRT model
457
+ model="{{ experiment.paths.artifacts_dir.path }}/export/{{ experiment.training.checkpoints.onnx.name or experiment.training.checkpoints.tensorrt.name }}",
458
+ device="cuda",
459
+ )
460
+
461
+ # Predict
462
+ predictions = model.predict(
463
+ # 'input' can accept various formats: image paths, np.arrays, Supervisely IDs and others.
464
+ input=["path/to/image1.jpg", "path/to/image2.jpg"],
465
+ conf=0.5, # confidence threshold
466
+ classes=classes,
467
+ # ... additional parameters (see the docs)
468
+ )
469
+ ```
470
+
471
+ {% endif %}
472
+
473
+ {% if code.demo.pytorch.path %}
474
+
475
+ ## Using Original Model Codebase
476
+
477
+ In this approach you'll completely decouple your model from both the **Supervisely Platform** and **Supervisely SDK**,
478
+ and you will develop your own code for inference and deployment of that particular model. It's important to understand
479
+ that for each neural network or a framework, you need to set up an environment and write inference code by yourself,
480
+ since each model has its own installation instructions and the way of processing inputs and outputs correctly.
481
+
482
+ We provide a basic instructions and a demo script of how to load {{ experiment.model.framework }} and get predictions
483
+ using the original code from the authors.
484
+
485
+ 1. Download checkpoint from Supervisely ([Open in Team Files]({{ experiment.paths.checkpoints_dir.url }}){:target="_blank"})
486
+
487
+ 2. Prepare environment following the instructions of the original repository [{{ code.local_prediction.repo.name }}]({{
488
+ code.local_prediction.repo.url }}){:target="_blank"}
489
+
490
+ 3. Use the demo script for inference:
491
+
492
+ <details>
493
+ <summary><strong>🐍 View Code</strong></summary>
494
+
495
+ <sly-iw-tabs :tabs="[
496
+ { name: 'pytorch-demo', title: '🔥 PyTorch' },
497
+ {% if code.demo.onnx.path %}
498
+ { name: 'onnx-demo', title: '📦 ONNX' },
499
+ {% endif %}
500
+ {% if code.demo.tensorrt.path %}
501
+ { name: 'tensorrt-demo', title: '⚡ TensorRT' }
502
+ {% endif %}
503
+ ]" :defaultIndex="0">
504
+
505
+ <template #pytorch-demo>
506
+
507
+ ```python
508
+ {{ code.demo.pytorch.script | safe }}
509
+ ```
510
+
511
+ </template>
512
+
513
+ {% if code.demo.onnx.path %}
514
+ <template #onnx-demo>
515
+
516
+ ```python
517
+ {{ code.demo.onnx.script | safe }}
518
+ ```
519
+
520
+ </template>
521
+ {% endif %}
522
+
523
+ {% if code.demo.tensorrt.path %}
524
+ <template #tensorrt-demo>
525
+
526
+ ```python
527
+ {{ code.demo.tensorrt.script | safe }}
528
+ ```
529
+
530
+ </template>
531
+ {% endif %}
532
+
533
+ </sly-iw-tabs>
534
+
535
+ </details>
536
+
537
+ {% endif %}