supervisely 6.73.406__py3-none-any.whl → 6.73.408__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/api/app_api.py +5 -1
- supervisely/api/task_api.py +35 -0
- supervisely/nn/inference/inference.py +75 -7
- supervisely/nn/model/model_api.py +6 -0
- {supervisely-6.73.406.dist-info → supervisely-6.73.408.dist-info}/METADATA +1 -1
- {supervisely-6.73.406.dist-info → supervisely-6.73.408.dist-info}/RECORD +10 -10
- {supervisely-6.73.406.dist-info → supervisely-6.73.408.dist-info}/LICENSE +0 -0
- {supervisely-6.73.406.dist-info → supervisely-6.73.408.dist-info}/WHEEL +0 -0
- {supervisely-6.73.406.dist-info → supervisely-6.73.408.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.406.dist-info → supervisely-6.73.408.dist-info}/top_level.txt +0 -0
supervisely/api/app_api.py
CHANGED
|
@@ -11,7 +11,7 @@ from typing_extensions import Literal
|
|
|
11
11
|
|
|
12
12
|
from supervisely._utils import is_community, is_development, take_with_default
|
|
13
13
|
from supervisely.api.module_api import ApiField
|
|
14
|
-
from supervisely.api.task_api import TaskApi
|
|
14
|
+
from supervisely.api.task_api import KubernetesSettings, TaskApi
|
|
15
15
|
|
|
16
16
|
# from supervisely.app.constants import DATA, STATE, CONTEXT, TEMPLATE
|
|
17
17
|
STATE = "state"
|
|
@@ -1682,6 +1682,7 @@ class AppApi(TaskApi):
|
|
|
1682
1682
|
proxy_keep_url: bool = False,
|
|
1683
1683
|
module_id: Optional[int] = None,
|
|
1684
1684
|
redirect_requests: Dict[str, int] = {},
|
|
1685
|
+
kubernetes_settings: Optional[Union[KubernetesSettings, Dict[str, Any]]] = None,
|
|
1685
1686
|
) -> SessionInfo:
|
|
1686
1687
|
"""Start a new application session (task).
|
|
1687
1688
|
|
|
@@ -1713,6 +1714,8 @@ class AppApi(TaskApi):
|
|
|
1713
1714
|
:type module_id: Optional[int]
|
|
1714
1715
|
:param redirect_requests: For internal usage only in Develop and Debug mode.
|
|
1715
1716
|
:type redirect_requests: dict
|
|
1717
|
+
:param kubernetes_settings: Kubernetes settings for the task. If not specified, default settings will be used.
|
|
1718
|
+
:type kubernetes_settings: Optional[Union[KubernetesSettings, Dict[str, Any]]]
|
|
1716
1719
|
:return: SessionInfo object with information about the started task.
|
|
1717
1720
|
:rtype: SessionInfo
|
|
1718
1721
|
:raises ValueError: If both app_id and module_id are not provided.
|
|
@@ -1747,6 +1750,7 @@ class AppApi(TaskApi):
|
|
|
1747
1750
|
proxy_keep_url=proxy_keep_url,
|
|
1748
1751
|
module_id=module_id,
|
|
1749
1752
|
redirect_requests=redirect_requests,
|
|
1753
|
+
kubernetes_settings=kubernetes_settings,
|
|
1750
1754
|
)
|
|
1751
1755
|
if type(result) is not list:
|
|
1752
1756
|
result = [result]
|
supervisely/api/task_api.py
CHANGED
|
@@ -11,6 +11,7 @@ from pathlib import Path
|
|
|
11
11
|
from typing import Any, Callable, Dict, List, Literal, NamedTuple, Optional, Union
|
|
12
12
|
|
|
13
13
|
import requests
|
|
14
|
+
from pydantic import BaseModel, Field
|
|
14
15
|
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
|
|
15
16
|
from tqdm import tqdm
|
|
16
17
|
|
|
@@ -31,6 +32,28 @@ from supervisely.io.fs import (
|
|
|
31
32
|
)
|
|
32
33
|
|
|
33
34
|
|
|
35
|
+
class KubernetesSettings(BaseModel):
|
|
36
|
+
"""
|
|
37
|
+
KubernetesSettings for application resource limits and requests.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
use_health_check: Optional[bool] = Field(None, alias="useHealthCheck")
|
|
41
|
+
request_cpus: Optional[int] = Field(None, alias="requestCpus")
|
|
42
|
+
limit_cpus: Optional[int] = Field(None, alias="limitCpus")
|
|
43
|
+
limit_memory_gb: Optional[int] = Field(None, alias="limitMemoryGb")
|
|
44
|
+
limit_shm_gb: Optional[int] = Field(None, alias="limitShmGb")
|
|
45
|
+
limit_storage_gb: Optional[int] = Field(None, alias="limitStorageGb")
|
|
46
|
+
limit_gpus: Optional[int] = Field(None, alias="limitGpus")
|
|
47
|
+
limit_gpu_memory_mb: Optional[int] = Field(None, alias="limitGpuMemoryMb")
|
|
48
|
+
limit_gpu_cores_perc: Optional[int] = Field(None, alias="limitGpuCoresPerc")
|
|
49
|
+
|
|
50
|
+
model_config = {"populate_by_name": True}
|
|
51
|
+
|
|
52
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
53
|
+
"""Convert to dict with only non-None values using aliases."""
|
|
54
|
+
return self.model_dump(exclude_none=True, by_alias=True)
|
|
55
|
+
|
|
56
|
+
|
|
34
57
|
class TaskFinishedWithError(Exception):
|
|
35
58
|
"""TaskFinishedWithError"""
|
|
36
59
|
|
|
@@ -365,6 +388,7 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
|
|
|
365
388
|
module_id: Optional[int] = None,
|
|
366
389
|
redirect_requests: Optional[Dict[str, int]] = {},
|
|
367
390
|
limit_by_workspace: bool = False,
|
|
391
|
+
kubernetes_settings: Optional[Union[KubernetesSettings, Dict[str, Any]]] = None,
|
|
368
392
|
) -> Dict[str, Any]:
|
|
369
393
|
"""Starts the application task on the agent.
|
|
370
394
|
|
|
@@ -401,6 +425,8 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
|
|
|
401
425
|
:param limit_by_workspace: If set to True tasks will be only visible inside of the workspace
|
|
402
426
|
with specified workspace_id.
|
|
403
427
|
:type limit_by_workspace: bool, optional
|
|
428
|
+
:param kubernetes_settings: Kubernetes settings for the application.
|
|
429
|
+
:type kubernetes_settings: Union[KubernetesSettings, Dict[str, Any]], optional
|
|
404
430
|
:return: Task information in JSON format.
|
|
405
431
|
:rtype: Dict[str, Any]
|
|
406
432
|
|
|
@@ -439,6 +465,15 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
|
|
|
439
465
|
ApiField.LIMIT_BY_WORKSPACE: limit_by_workspace,
|
|
440
466
|
}
|
|
441
467
|
|
|
468
|
+
if kubernetes_settings is not None:
|
|
469
|
+
if isinstance(kubernetes_settings, KubernetesSettings):
|
|
470
|
+
kubernetes_settings = kubernetes_settings.to_dict()
|
|
471
|
+
if not isinstance(kubernetes_settings, dict):
|
|
472
|
+
raise TypeError(
|
|
473
|
+
f"kubernetes_settings must be a dict or an instance of KubernetesSettings, got {type(kubernetes_settings)}"
|
|
474
|
+
)
|
|
475
|
+
advanced_settings.update(kubernetes_settings)
|
|
476
|
+
|
|
442
477
|
data = {
|
|
443
478
|
ApiField.AGENT_ID: agent_id,
|
|
444
479
|
# "nodeId": agent_id,
|
|
@@ -182,6 +182,9 @@ class Inference:
|
|
|
182
182
|
self.classes: List[str] = None
|
|
183
183
|
self._model_dir = model_dir
|
|
184
184
|
self._model_served = False
|
|
185
|
+
self._freeze_timer = None
|
|
186
|
+
self._model_frozen = False
|
|
187
|
+
self._inactivity_timeout = 3600 # 1 hour
|
|
185
188
|
self._deploy_params: dict = None
|
|
186
189
|
self._model_meta = None
|
|
187
190
|
self._confidence = "confidence"
|
|
@@ -225,6 +228,7 @@ class Inference:
|
|
|
225
228
|
deploy_params["model_files"] = local_model_files
|
|
226
229
|
logger.debug("Loading model...")
|
|
227
230
|
self._load_model_headless(**deploy_params)
|
|
231
|
+
self._schedule_freeze_on_inactivity()
|
|
228
232
|
|
|
229
233
|
if self._use_gui:
|
|
230
234
|
initialize_custom_gui_method = getattr(self, "initialize_custom_gui", None)
|
|
@@ -273,6 +277,7 @@ class Inference:
|
|
|
273
277
|
self.device = device
|
|
274
278
|
self.load_on_device(self._model_dir, device)
|
|
275
279
|
gui.show_deployed_model_info(self)
|
|
280
|
+
self._schedule_freeze_on_inactivity()
|
|
276
281
|
|
|
277
282
|
def on_change_model_callback(
|
|
278
283
|
gui: Union[GUI.InferenceGUI, GUI.ServingGUI, GUI.ServingGUITemplate],
|
|
@@ -407,6 +412,7 @@ class Inference:
|
|
|
407
412
|
deploy_params = self._get_deploy_parameters_from_custom_checkpoint(checkpoint_path, device, runtime)
|
|
408
413
|
logger.debug(f"Deploying custom model '{checkpoint_name}'...")
|
|
409
414
|
self._load_model_headless(**deploy_params)
|
|
415
|
+
self._schedule_freeze_on_inactivity()
|
|
410
416
|
return self
|
|
411
417
|
|
|
412
418
|
def get_batch_size(self):
|
|
@@ -1228,6 +1234,10 @@ class Inference:
|
|
|
1228
1234
|
|
|
1229
1235
|
def shutdown_model(self):
|
|
1230
1236
|
self._model_served = False
|
|
1237
|
+
self._model_frozen = False
|
|
1238
|
+
if self._freeze_timer is not None:
|
|
1239
|
+
self._freeze_timer.cancel()
|
|
1240
|
+
self._freeze_timer = None
|
|
1231
1241
|
self.device = None
|
|
1232
1242
|
self.runtime = None
|
|
1233
1243
|
self.model_precision = None
|
|
@@ -1437,12 +1447,13 @@ class Inference:
|
|
|
1437
1447
|
if api is None:
|
|
1438
1448
|
api = self.api
|
|
1439
1449
|
return api
|
|
1440
|
-
|
|
1450
|
+
|
|
1441
1451
|
def _inference_auto(
|
|
1442
1452
|
self,
|
|
1443
1453
|
source: List[Union[str, np.ndarray]],
|
|
1444
1454
|
settings: Dict[str, Any],
|
|
1445
1455
|
) -> Tuple[List[Annotation], List[dict]]:
|
|
1456
|
+
self._unfreeze_model()
|
|
1446
1457
|
inference_mode = settings.get("inference_mode", "full_image")
|
|
1447
1458
|
use_raw = (
|
|
1448
1459
|
inference_mode == "sliding_window" and settings["sliding_window_mode"] == "advanced"
|
|
@@ -1453,9 +1464,11 @@ class Inference:
|
|
|
1453
1464
|
if (not use_raw and self.is_batch_inference_supported()) or (
|
|
1454
1465
|
use_raw and is_predict_batch_raw_implemented
|
|
1455
1466
|
):
|
|
1456
|
-
|
|
1467
|
+
result = self._inference_batched_wrapper(source, settings)
|
|
1457
1468
|
else:
|
|
1458
|
-
|
|
1469
|
+
result = self._inference_one_by_one_wrapper(source, settings)
|
|
1470
|
+
self._schedule_freeze_on_inactivity()
|
|
1471
|
+
return result
|
|
1459
1472
|
|
|
1460
1473
|
def inference(
|
|
1461
1474
|
self,
|
|
@@ -2432,9 +2445,7 @@ class Inference:
|
|
|
2432
2445
|
def _check_serve_before_call(self, func):
|
|
2433
2446
|
@wraps(func)
|
|
2434
2447
|
def wrapper(*args, **kwargs):
|
|
2435
|
-
if self._model_served is
|
|
2436
|
-
return func(*args, **kwargs)
|
|
2437
|
-
else:
|
|
2448
|
+
if self._model_served is False:
|
|
2438
2449
|
msg = (
|
|
2439
2450
|
"The model has not yet been deployed. "
|
|
2440
2451
|
"Please select the appropriate model in the UI and press the 'Serve' button. "
|
|
@@ -2442,9 +2453,53 @@ class Inference:
|
|
|
2442
2453
|
)
|
|
2443
2454
|
# raise DialogWindowError(title="Call undeployed model.", description=msg)
|
|
2444
2455
|
raise RuntimeError(msg)
|
|
2445
|
-
|
|
2456
|
+
return func(*args, **kwargs)
|
|
2446
2457
|
return wrapper
|
|
2447
2458
|
|
|
2459
|
+
def _freeze_model(self):
|
|
2460
|
+
if self._model_frozen or not self._model_served:
|
|
2461
|
+
return
|
|
2462
|
+
logger.debug("Freezing model...")
|
|
2463
|
+
runtime = self._deploy_params.get("runtime")
|
|
2464
|
+
if runtime and runtime.lower() != RuntimeType.PYTORCH.lower():
|
|
2465
|
+
logger.debug("Model is not running in PyTorch runtime, cannot freeze.")
|
|
2466
|
+
return
|
|
2467
|
+
previous_device = self._deploy_params.get("device")
|
|
2468
|
+
if previous_device == "cpu":
|
|
2469
|
+
logger.debug("Model is already running on CPU, cannot freeze.")
|
|
2470
|
+
return
|
|
2471
|
+
|
|
2472
|
+
deploy_params = self._deploy_params.copy()
|
|
2473
|
+
deploy_params["device"] = "cpu"
|
|
2474
|
+
try:
|
|
2475
|
+
self._load_model(deploy_params)
|
|
2476
|
+
self._model_frozen = True
|
|
2477
|
+
logger.info(
|
|
2478
|
+
"Model has been re-deployed to CPU for resource optimization. "
|
|
2479
|
+
"It will be loaded back to the original device on the next inference request."
|
|
2480
|
+
)
|
|
2481
|
+
finally:
|
|
2482
|
+
self._deploy_params["device"] = previous_device
|
|
2483
|
+
clean_up_cuda()
|
|
2484
|
+
|
|
2485
|
+
def _unfreeze_model(self):
|
|
2486
|
+
if not self._model_frozen:
|
|
2487
|
+
return
|
|
2488
|
+
logger.debug("Unfreezing model...")
|
|
2489
|
+
self._model_frozen = False
|
|
2490
|
+
self._load_model(self._deploy_params)
|
|
2491
|
+
clean_up_cuda()
|
|
2492
|
+
logger.debug("Model is unfrozen and ready for inference.")
|
|
2493
|
+
|
|
2494
|
+
def _schedule_freeze_on_inactivity(self):
|
|
2495
|
+
if self._freeze_timer is not None:
|
|
2496
|
+
self._freeze_timer.cancel()
|
|
2497
|
+
timer = threading.Timer(self._inactivity_timeout, self._freeze_model)
|
|
2498
|
+
timer.daemon = True
|
|
2499
|
+
timer.start()
|
|
2500
|
+
self._freeze_timer = timer
|
|
2501
|
+
logger.debug("Model will be frozen in %s seconds due to inactivity.", self._inactivity_timeout)
|
|
2502
|
+
|
|
2448
2503
|
def _set_served_callback(self):
|
|
2449
2504
|
self._model_served = True
|
|
2450
2505
|
|
|
@@ -2506,6 +2561,7 @@ class Inference:
|
|
|
2506
2561
|
# update to set correct device
|
|
2507
2562
|
device = deploy_params.get("device", "cpu")
|
|
2508
2563
|
self.gui.set_deployed(device)
|
|
2564
|
+
self._schedule_freeze_on_inactivity()
|
|
2509
2565
|
return {"result": "model was successfully deployed"}
|
|
2510
2566
|
except Exception as e:
|
|
2511
2567
|
self.gui._success_label.hide()
|
|
@@ -3400,6 +3456,8 @@ class Inference:
|
|
|
3400
3456
|
if self.gui is not None:
|
|
3401
3457
|
self.gui._success_label.hide()
|
|
3402
3458
|
raise e
|
|
3459
|
+
finally:
|
|
3460
|
+
self._schedule_freeze_on_inactivity()
|
|
3403
3461
|
|
|
3404
3462
|
@server.post("/list_pretrained_models")
|
|
3405
3463
|
def _list_pretrained_models():
|
|
@@ -3479,6 +3537,16 @@ class Inference:
|
|
|
3479
3537
|
},
|
|
3480
3538
|
}
|
|
3481
3539
|
|
|
3540
|
+
@server.post("/freeze_model")
|
|
3541
|
+
def _freeze_model(request: Request):
|
|
3542
|
+
if self._model_frozen:
|
|
3543
|
+
return {"message": "Model is already frozen."}
|
|
3544
|
+
|
|
3545
|
+
self._freeze_model()
|
|
3546
|
+
if not self._model_frozen:
|
|
3547
|
+
return {"message": "Failed to freeze model. Check the logs for details."}
|
|
3548
|
+
return {"message": "Model is frozen."}
|
|
3549
|
+
|
|
3482
3550
|
# Local deploy without predict args
|
|
3483
3551
|
if self._is_cli_deploy:
|
|
3484
3552
|
self._run_server()
|
|
@@ -111,6 +111,12 @@ class ModelAPI:
|
|
|
111
111
|
response = self._post("tasks.stop", {ApiField.ID: id})
|
|
112
112
|
return TaskApi.Status(response[ApiField.STATUS])
|
|
113
113
|
|
|
114
|
+
def freeze_model(self):
|
|
115
|
+
"""Freeze the model to free up resources."""
|
|
116
|
+
if self.task_id is not None:
|
|
117
|
+
return self.api.task.send_request(self.task_id, "freeze_model", {})
|
|
118
|
+
return self._post("freeze_model", {})
|
|
119
|
+
|
|
114
120
|
# --------------------- #
|
|
115
121
|
|
|
116
122
|
# region Load
|
|
@@ -23,7 +23,7 @@ supervisely/api/advanced_api.py,sha256=Nd5cCnHFWc3PSUrCtENxTGtDjS37_lCHXsgXvUI3T
|
|
|
23
23
|
supervisely/api/agent_api.py,sha256=8EQBwD6v7KLS0-xKcZ12B7mtzKwG7RRgq1fk1vaN144,8893
|
|
24
24
|
supervisely/api/annotation_api.py,sha256=U6dHUIOt6Fe8XcbX1MA19z-fg91maOumJAawKG5ZJsk,82876
|
|
25
25
|
supervisely/api/api.py,sha256=pEgRIWlVqDdtDjAeL_nx2Rwldm6ANwLLacm6kLnyvbE,67723
|
|
26
|
-
supervisely/api/app_api.py,sha256=
|
|
26
|
+
supervisely/api/app_api.py,sha256=ghhBVaxca7xO9Bgy2zbexC89BJ9s0k7fVPd6-6F_Wvw,76594
|
|
27
27
|
supervisely/api/constants.py,sha256=WfqIcEpRnU4Mcfb6q0njeRs2VVSoTAJaIyrqBkBjP8I,253
|
|
28
28
|
supervisely/api/dataset_api.py,sha256=7idBMFL8jumWNw-wlBAbQWC09RskG-3GlidfPDukq3Q,47930
|
|
29
29
|
supervisely/api/entities_collection_api.py,sha256=Be13HsfMFLmq9XpiOfQog0Y569kbUn52hXv6x5vX3Vg,22624
|
|
@@ -44,7 +44,7 @@ supervisely/api/remote_storage_api.py,sha256=1O4rTIwW8s9gxC00yvFuKbEMGNsa7YSRlZ8
|
|
|
44
44
|
supervisely/api/report_api.py,sha256=Om7CGulUbQ4BuJ16eDtz7luLe0JQNqab-LoLpUXu7YE,7123
|
|
45
45
|
supervisely/api/role_api.py,sha256=c1XAU_wZg6zL4wG2R7iuS9EJOoaHHNGchxa1nYVL7yo,3047
|
|
46
46
|
supervisely/api/storage_api.py,sha256=VxiflQt-SfyB1OuEOB66JsMkxCosUr4n0WHQ5if3Ltg,13039
|
|
47
|
-
supervisely/api/task_api.py,sha256=
|
|
47
|
+
supervisely/api/task_api.py,sha256=5Ae3jKWgvvz_Mljk6V1UaIVqDbTvnSD6cpIJ3H3KqT0,38435
|
|
48
48
|
supervisely/api/team_api.py,sha256=uPsBpDP_Ig9HDQ9Zm6Y-VboLbSYKIV9S_a1S7e4vqvo,19470
|
|
49
49
|
supervisely/api/user_api.py,sha256=m29GP9tvem8P2fJZgg7DAZ9yhFdBX26ZBcWxCKdnhn4,24943
|
|
50
50
|
supervisely/api/video_annotation_tool_api.py,sha256=3A9-U8WJzrTShP_n9T8U01M9FzGYdeS51CCBTzUnooo,6686
|
|
@@ -893,7 +893,7 @@ supervisely/nn/benchmark/visualization/widgets/table/__init__.py,sha256=47DEQpj8
|
|
|
893
893
|
supervisely/nn/benchmark/visualization/widgets/table/table.py,sha256=atmDnF1Af6qLQBUjLhK18RMDKAYlxnsuVHMSEa5a-e8,4319
|
|
894
894
|
supervisely/nn/inference/__init__.py,sha256=QFukX2ip-U7263aEPCF_UCFwj6EujbMnsgrXp5Bbt8I,1623
|
|
895
895
|
supervisely/nn/inference/cache.py,sha256=rfmb1teJ9lNDfisUSh6bwDCVkPZocn8GMvDgLQktnbo,35023
|
|
896
|
-
supervisely/nn/inference/inference.py,sha256=
|
|
896
|
+
supervisely/nn/inference/inference.py,sha256=7c2-GuG3MgI5H0muTyxoR-XgUzWpbP9if37CyRewobA,198528
|
|
897
897
|
supervisely/nn/inference/inference_request.py,sha256=y6yw0vbaRRcEBS27nq3y0sL6Gmq2qLA_Bm0GrnJGegE,14267
|
|
898
898
|
supervisely/nn/inference/session.py,sha256=dIg2F-OBl68pUzcmtmcI0YQIp1WWNnrJTVMjwFN91Q4,35824
|
|
899
899
|
supervisely/nn/inference/uploader.py,sha256=21a9coOimCHhEqAbV-llZWcp12847DEMoQp3N16bpK0,5425
|
|
@@ -968,7 +968,7 @@ supervisely/nn/legacy/pytorch/weights.py,sha256=Zb9kcpUCg6ykr7seO53CkKSQa2K44wo8
|
|
|
968
968
|
supervisely/nn/legacy/training/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
969
969
|
supervisely/nn/legacy/training/eval_planner.py,sha256=zN9b0_CX7sWGdC8e6riTvD-NOUc3_Xduyhj00S7PEIo,1311
|
|
970
970
|
supervisely/nn/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
971
|
-
supervisely/nn/model/model_api.py,sha256=
|
|
971
|
+
supervisely/nn/model/model_api.py,sha256=rq-08BrQmWKqUxWnBB5yKBfnjxw8Lg88mhva848Ae2I,9911
|
|
972
972
|
supervisely/nn/model/prediction.py,sha256=N3oO9s3NDiC5CFvW8utfU8rz3bfpCl37Sk4VEBH94Bc,11307
|
|
973
973
|
supervisely/nn/model/prediction_session.py,sha256=sy0FSQaWSmT8i0RkR4J8oIn3Ek4IDVJNBR1Tg4mulkM,25523
|
|
974
974
|
supervisely/nn/tracker/__init__.py,sha256=LiojByb5kGsTQ49lWuboEh7B4JUwM1vfz81J8kJlLYo,337
|
|
@@ -1114,9 +1114,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
|
1114
1114
|
supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
|
|
1115
1115
|
supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
|
|
1116
1116
|
supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
|
|
1117
|
-
supervisely-6.73.
|
|
1118
|
-
supervisely-6.73.
|
|
1119
|
-
supervisely-6.73.
|
|
1120
|
-
supervisely-6.73.
|
|
1121
|
-
supervisely-6.73.
|
|
1122
|
-
supervisely-6.73.
|
|
1117
|
+
supervisely-6.73.408.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
1118
|
+
supervisely-6.73.408.dist-info/METADATA,sha256=C6yRG5xZiYpAXpe3RyAIimpbPRggSn--0fqKT00sSKI,35254
|
|
1119
|
+
supervisely-6.73.408.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
1120
|
+
supervisely-6.73.408.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
|
|
1121
|
+
supervisely-6.73.408.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
|
|
1122
|
+
supervisely-6.73.408.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|