kalavai-client 0.7.11__py3-none-any.whl → 0.7.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kalavai-client might be problematic. Click here for more details.

@@ -1,2 +1,2 @@
1
1
 
2
- __version__ = "0.7.11"
2
+ __version__ = "0.7.13"
@@ -19,7 +19,7 @@ repositories:
19
19
  url: https://charts.longhorn.io
20
20
  - name: volcano-sh
21
21
  url: https://volcano-sh.github.io/helm-charts
22
- - name: prometheus
22
+ - name: prometheus-community # prometheus
23
23
  url: https://prometheus-community.github.io/helm-charts
24
24
  - name: opencost-charts
25
25
  url: https://opencost.github.io/opencost-helm-chart
@@ -152,11 +152,27 @@ releases:
152
152
  namespace: opencost
153
153
  chart: opencost-charts/opencost
154
154
  installed: {{deploy_opencost|default("false", true)}}
155
+ set:
156
+ - name: service.type
157
+ value: NodePort
158
+ - name: opencost.nodeSelector.{{kalavai_role_label}}
159
+ value: server
160
+ # point at prometheus instance (theres an opencost.prometheus.external too)
161
+ - name: opencost.prometheus.internal.enabled
162
+ value: true
163
+ - name: opencost.prometheus.internal.serviceName
164
+ value: {{prometheus_service_name}}
165
+ - name: opencost.prometheus.internal.namespaceName
166
+ value: {{prometheus_namespace}}
167
+ - name: opencost.prometheus.internal.port
168
+ value: {{prometheus_port}}
155
169
  - name: prometheus
156
- namespace: prometheus-system
157
- chart: prometheus/prometheus
170
+ namespace: {{prometheus_namespace}}
171
+ chart: prometheus-community/kube-prometheus-stack #prometheus/prometheus
158
172
  installed: {{deploy_prometheus|default("false", true)}}
159
173
  set:
174
+ - name: server.nodeSelector.{{kalavai_role_label}}
175
+ value: server
160
176
  - name: prometheus-pushgateway.enabled
161
177
  value: false
162
178
  - name: alertmanager.enabled
@@ -204,13 +220,13 @@ releases:
204
220
  - name: kalavai-watcher
205
221
  namespace: kalavai
206
222
  chart: kalavai/kalavai-watcher
207
- version: "0.3.8"
223
+ version: "0.4.0"
208
224
  installed: {{deploy_watcher|default("false", true)}}
209
225
  set:
210
226
  - name: namespace
211
227
  value: kalavai
212
- - name: replicas
213
- value: 1
228
+ - name: deployment.replicas
229
+ value: {{watcher_replicas}}
214
230
  - name: image_tag
215
231
  value: "{{watcher_image_tag}}" #"v2025.07.34"
216
232
  - name: deployment.in_cluster
@@ -232,13 +248,19 @@ releases:
232
248
  - name: deployment.kalavai_api_endpoint
233
249
  value: {{kalavai_api_endpoint}}
234
250
  - name: deployment.prometheus_endpoint
235
- value: {{prometheus_endpoint}}
251
+ value: "http://{{prometheus_service_name}}.{{prometheus_namespace}}.svc.cluster.local:{{prometheus_port}}"
236
252
  - name: deployment.opencost_endpoint
237
253
  value: {{opencost_endpoint}}
238
254
  - name: deployment.longhorn_manager_endpoint
239
255
  value: {{longhorn_manager_endpoint}}
240
256
  - name: service.nodePort
241
257
  value: {{watcher_port}}
258
+ - name: resources.limits.memory
259
+ value: {{watcher_resources_memory}}
260
+ - name: resources.limits.cpu
261
+ value: {{watcher_resources_cpu}}
262
+ - name: deployment.nodeSelector.{{kalavai_role_label}}
263
+ value: "server"
242
264
  - name: hami-vgpu
243
265
  namespace: kalavai
244
266
  chart: hami-charts/hami
@@ -247,6 +269,10 @@ releases:
247
269
  set:
248
270
  - name: resourceCores
249
271
  value: "nvidia.com/gpucores"
272
+ - name: resourceMem
273
+ value: "nvidia.com/gpumem"
274
+ - name: resourceMemPercentage
275
+ value: "nvidia.com/gpumem-percentage"
250
276
  - name: devicePlugin.runtimeClassName
251
277
  value: "nvidia"
252
278
  - name: scheduler.defaultSchedulerPolicy.nodeSchedulerPolicy
@@ -4,14 +4,14 @@ server:
4
4
  location: null
5
5
  name: "kalavai_cluster"
6
6
  mtu: ""
7
- watcher_image_tag: "v2025.10.10"
7
+ watcher_image_tag: "v2025.10.24"
8
8
 
9
9
  core:
10
10
  # Deploy systems
11
11
  deploy:
12
12
  - volcano
13
- - cert_manager
14
- - rocm
13
+ #- cert_manager
14
+ #- rocm
15
15
  - watcher
16
16
  - hami
17
17
  #- lago
@@ -21,6 +21,7 @@ core:
21
21
  #- minio
22
22
  # "Kalavai API endpoint"
23
23
  kalavai_api_endpoint: "https://platform.kalavai.net/_/api"
24
+ kalavai_role_label: "kalavai/role"
24
25
  # "Opencost endpoint"
25
26
  opencost_endpoint: "http://opencost.opencost.svc.cluster.local:9003"
26
27
  # "Longhorn manager endpoint"
@@ -29,11 +30,15 @@ core:
29
30
  helios_harvest_interval: 120
30
31
  # "Watcher is shared pool"
31
32
  watcher_is_shared_pool: "True"
32
- # "Prometheus endpoint"
33
- prometheus_endpoint: "http://prometheus-server.prometheus-system.svc.cluster.local:80"
33
+ watcher_resources_memory: 0.5
34
+ watcher_resources_cpu: 0.5
35
+ watcher_replicas: 1
36
+ # "Prometheus and opencost"
37
+ prometheus_service_name: prometheus-kube-prometheus-prometheus
38
+ prometheus_namespace: prometheus-system
39
+ prometheus_port: 9090
34
40
  # "Prometheus server retention"
35
41
  prometheus_server_retention: "30d"
36
- # "Prometheus disk size"
37
42
  prometheus_disk_size: "8Gi"
38
43
  # "Longhorn UI port"
39
44
  longhorn_ui_port: 30000
@@ -20,7 +20,7 @@ services:
20
20
  # run worker only if command is set
21
21
  {%if command %}
22
22
  {{service_name}}:
23
- image: docker.io/bundenth/kalavai-runner:{{target_platform}}-latest
23
+ image: docker.io/bundenth/kalavai-runner-{{target_platform}}:latest
24
24
  pull_policy: always
25
25
  container_name: {{service_name}}
26
26
  platform: linux/{{target_platform}}
@@ -43,26 +43,29 @@ services:
43
43
  {% if random_suffix %}
44
44
  --random_suffix="{{random_suffix}}"
45
45
  {% endif %}
46
- {% if command == "server" %}
46
+ {% if command == "server" %}
47
47
  --port_range="30000-32767"
48
- {% else %}
48
+ {% if load_balancer_ip_address %}
49
+ --tls_san={{load_balancer_ip_address}}
50
+ {% endif %}
51
+ {% else %}
49
52
  --server_ip={{pool_ip}}
50
53
  --token={{pool_token}}
51
- {% endif %}
52
- {%if vpn %}
54
+ {% endif %}
55
+ {%if vpn %}
53
56
  --flannel_iface={{flannel_iface}}
54
- {% endif %}
55
- {% if num_gpus and num_gpus > 0 %}
57
+ {% endif %}
58
+ {% if num_gpus and num_gpus > 0 %}
56
59
  --gpu=on
57
- {% else %}
60
+ {% else %}
58
61
  --gpu=off
59
- {% endif %}
60
- {% if node_labels %}
62
+ {% endif %}
63
+ {% if node_labels %}
61
64
  --extra="{{node_labels}}"
62
- {% endif %}
63
- {% if mtu != "" %}
65
+ {% endif %}
66
+ {% if mtu != "" %}
64
67
  --mtu={{mtu}}
65
- {% endif %}
68
+ {% endif %}
66
69
 
67
70
  # volumes:
68
71
  # - {{k3s_path}}:/var/lib/rancher/k3s # Persist data
@@ -614,14 +614,14 @@ def node_labels(request: NodeLabelsRequest, api_key: str = Depends(verify_api_ke
614
614
  description="Retrieves all labels associated with specified compute nodes in the pool. Labels provide metadata about nodes and can be used for filtering and scheduling decisions.",
615
615
  tags=["info"],
616
616
  response_description="Node labels")
617
- def node_labels_get(request: Optional[NodesActionRequest]=NodesActionRequest(), api_key: str = Depends(verify_api_key)):
617
+ def node_labels_get(nodes: Optional[List[str]] = Query(None), api_key: str = Depends(verify_api_key)):
618
618
  """
619
619
  Get node labels with the following parameters:
620
620
 
621
- - **node_names**: List of node names to get labels for
621
+ - **nodes**: List of node names to get labels for
622
622
  """
623
623
  result = get_node_labels(
624
- node_names=request.nodes
624
+ node_names=nodes
625
625
  )
626
626
  return result
627
627
 
kalavai_client/cli.py CHANGED
@@ -406,6 +406,7 @@ def pool__start(
406
406
  watcher_image_tag: str=None,
407
407
  platform="amd64",
408
408
  ip_address: str=None,
409
+ lb_address: str=None,
409
410
  location: str=None,
410
411
  non_interactive: bool=False,
411
412
  node_labels: Annotated[dict, arguably.arg.handler(parse_key_value_pairs)] = {}
@@ -421,6 +422,10 @@ def pool__start(
421
422
  if CLUSTER.is_cluster_init():
422
423
  console.log(f"[white] You are already connected to {load_server_info(data_key=CLUSTER_NAME_KEY, file=USER_LOCAL_SERVER_FILE)}. Enter [yellow]kalavai pool stop[white] to exit and join another one.")
423
424
  return
425
+
426
+ if non_interactive and all([value is None for value in [location, lb_address, ip_address]]):
427
+ console.log("[red]In --non-interactive mode without --location, one of --lb-address or --ip-address must be set")
428
+ return
424
429
 
425
430
  if node_labels:
426
431
  console.log(f"[blue]Configuration received: {node_labels}")
@@ -451,6 +456,7 @@ def pool__start(
451
456
 
452
457
  result = create_pool(
453
458
  ip_address=ip_address,
459
+ lb_ip_address=lb_address,
454
460
  location=location,
455
461
  target_platform=platform,
456
462
  watcher_image_tag=watcher_image_tag,
@@ -523,7 +529,8 @@ def pool__join(
523
529
  platform="amd64",
524
530
  node_name=None,
525
531
  non_interactive=False,
526
- node_labels: Annotated[dict, arguably.arg.handler(parse_key_value_pairs)] = {}
532
+ node_labels: Annotated[dict, arguably.arg.handler(parse_key_value_pairs)] = {},
533
+ seed: bool=False
527
534
  ):
528
535
  """
529
536
  Join Kalavai pool and start/resume sharing resources.
@@ -536,6 +543,7 @@ def pool__join(
536
543
  node_name: Name for this node
537
544
  non_interactive: Run in non-interactive mode
538
545
  node_labels: Node labels as key=value pairs (e.g., "key1=value1,key2=value2")
546
+ seed: if the node should join as an extra seed (for HA deployments)
539
547
  """
540
548
 
541
549
  # Process node labels if provided
@@ -590,7 +598,8 @@ def pool__join(
590
598
  num_gpus=num_gpus,
591
599
  ip_address=ip_address,
592
600
  mtu=mtu,
593
- node_labels=node_labels
601
+ node_labels=node_labels,
602
+ is_seed=seed
594
603
  )
595
604
  if "error" in result:
596
605
  console.log(f"[red]Error when connecting: {result}")
@@ -1320,7 +1329,7 @@ def job__list(*others):
1320
1329
 
1321
1330
 
1322
1331
  @arguably.command
1323
- def job__logs(name, *others, pod_name=None, stream=False, tail=100, force_namespace: str=None):
1332
+ def job__logs(name, *others, pod_name=None, tail=100, force_namespace: str=None):
1324
1333
  """
1325
1334
  Get logs for a specific job
1326
1335
  """
@@ -1333,34 +1342,57 @@ def job__logs(name, *others, pod_name=None, stream=False, tail=100, force_namesp
1333
1342
  if force_namespace is not None:
1334
1343
  console.log("[WARNING][yellow]--force-namespace [white]requires an admin key. Request will fail if you are not an admin.")
1335
1344
 
1336
- all_logs = fetch_job_logs(
1345
+ data = fetch_job_logs(
1337
1346
  job_name=name,
1338
1347
  pod_name=pod_name,
1339
1348
  force_namespace=force_namespace,
1340
1349
  tail=tail)
1341
- if "error" in all_logs:
1342
- console.log(f"[red]{all_logs}")
1350
+ if "error" in data:
1351
+ console.log(f"[red]{data}")
1343
1352
  return
1344
- while True:
1345
- try:
1346
- if not stream:
1347
- for pod, info in all_logs.items():
1348
- if pod_name is not None and pod_name != pod:
1349
- continue
1350
- console.log(f"[yellow]Pod {pod} in {info['pod']['spec']['node_name']}")
1351
- console.log(f"[green]{info['logs']}")
1352
- break
1353
- else:
1354
- os.system("clear")
1355
- for pod, info in all_logs.items():
1356
- if pod_name is not None and pod_name != pod:
1357
- continue
1358
- print(f"Pod {pod} in {info['pod']['spec']['node_name']}")
1359
- print(f"{info['logs']}")
1360
- time.sleep(1)
1361
- except KeyboardInterrupt:
1362
- break
1353
+ for pod, info in data.items():
1354
+ if pod_name is not None and pod_name != pod:
1355
+ continue
1356
+ if "pod" not in info or info["pod"] is None:
1357
+ console.log(f"[white]Logs for {pod_name} not ready yet. Try [yellow]kalavai job describe {pod_name}")
1358
+ continue
1359
+ console.log(f"[yellow]Pod {pod} in {info['pod']['spec']['node_name']}")
1360
+ console.log(f"[green]{info['logs']}")
1361
+ console.log("---------------------------")
1362
+ console.log("---------------------------")
1363
+ console.log(f"[yellow]Status {pod} in {info['pod']['spec']['node_name']}")
1364
+ console.log(f"[green]{info['status']}")
1365
+
1366
+ @arguably.command
1367
+ def job__describe(name, *others, pod_name=None, force_namespace: str=None):
1368
+ """
1369
+ Get logs for a specific job
1370
+ """
1371
+ try:
1372
+ CLUSTER.validate_cluster()
1373
+ except Exception as e:
1374
+ console.log(f"[red]Problems with your pool: {str(e)}")
1375
+ return
1376
+
1377
+ if force_namespace is not None:
1378
+ console.log("[WARNING][yellow]--force-namespace [white]requires an admin key. Request will fail if you are not an admin.")
1363
1379
 
1380
+ data = fetch_job_logs(
1381
+ job_name=name,
1382
+ pod_name=pod_name,
1383
+ force_namespace=force_namespace)
1384
+ if "error" in data:
1385
+ console.log(f"[red]{data}")
1386
+ return
1387
+ console.log(f"[yellow]Status for {name}:")
1388
+ for pod, info in data.items():
1389
+ if pod_name is not None and pod_name != pod:
1390
+ continue
1391
+ if "pod" not in info or info["pod"] is None:
1392
+ console.log(f"[white]Logs for {pod_name} not ready yet. Try [yellow]kalavai job describe {pod_name}")
1393
+ continue
1394
+
1395
+ console.log(json.dumps(info['status'], indent=2))
1364
1396
 
1365
1397
  @arguably.command
1366
1398
  def job__manifest(*others, name, force_namespace: str=None):
kalavai_client/core.py CHANGED
@@ -468,7 +468,7 @@ def fetch_pod_logs(label_key, label_value, force_namespace=None, pod_name=None,
468
468
  data = {
469
469
  "label": label_key,
470
470
  "value": label_value,
471
- "tail": tail
471
+ "tail_lines": tail
472
472
  }
473
473
  if force_namespace is not None:
474
474
  data["force_namespace"] = force_namespace
@@ -476,7 +476,7 @@ def fetch_pod_logs(label_key, label_value, force_namespace=None, pod_name=None,
476
476
  # send tail as parameter (fetch only last _tail_ lines)
477
477
  all_logs = request_to_server(
478
478
  method="post",
479
- endpoint="/v1/get_logs_for_label",
479
+ endpoint="/v1/get_job_details",
480
480
  data=data,
481
481
  server_creds=USER_LOCAL_SERVER_FILE,
482
482
  user_cookie=USER_COOKIE
@@ -673,7 +673,8 @@ def join_pool(
673
673
  ip_address=None,
674
674
  target_platform="amd64",
675
675
  mtu="",
676
- node_labels={}
676
+ node_labels={},
677
+ is_seed=False
677
678
  ):
678
679
  compatibility = check_worker_compatibility()
679
680
  if len(compatibility["issues"]) > 0:
@@ -705,13 +706,13 @@ def join_pool(
705
706
  node_labels = {
706
707
  **node_labels,
707
708
  STORAGE_CLASS_LABEL: is_storage_compatible(),
708
- NODE_ROLE_LABEL: "worker"
709
+ NODE_ROLE_LABEL: "worker" if not is_seed else "server"
709
710
  }
710
711
  # local agent join
711
712
  # Generate docker compose recipe
712
713
  generate_compose_config(
713
714
  target_platform=target_platform,
714
- role="agent",
715
+ role="agent" if not is_seed else "seed",
715
716
  node_ip_address=ip_address,
716
717
  pool_ip=f"https://{kalavai_seed_ip}:6443",
717
718
  pool_token=kalavai_token,
@@ -758,20 +759,21 @@ def join_pool(
758
759
  return cluster_name
759
760
 
760
761
  def create_pool(
761
- cluster_name: str=None,
762
- ip_address: str=None,
763
- location: str=None,
764
- target_platform: str="amd64",
765
- watcher_image_tag: str=None,
766
- pool_config_file: str=None,
767
- description: str="",
768
- token_mode: TokenType=TokenType.USER,
769
- num_gpus: int=-1,
770
- node_name: str=None,
771
- mtu: str="",
772
- apps: list=[],
773
- node_labels: dict={}
774
- ):
762
+ cluster_name: str=None,
763
+ ip_address: str=None,
764
+ lb_ip_address: str=None,
765
+ location: str=None,
766
+ target_platform: str="amd64",
767
+ watcher_image_tag: str=None,
768
+ pool_config_file: str=None,
769
+ description: str="",
770
+ token_mode: TokenType=TokenType.USER,
771
+ num_gpus: int=-1,
772
+ node_name: str=None,
773
+ mtu: str="",
774
+ apps: list=[],
775
+ node_labels: dict={}
776
+ ):
775
777
 
776
778
  if not check_seed_compatibility():
777
779
  return {"error": "Requirements failed"}
@@ -822,6 +824,7 @@ def create_pool(
822
824
  role="server",
823
825
  vpn_token=location,
824
826
  node_ip_address=ip_address,
827
+ lb_ip_address=lb_ip_address,
825
828
  num_gpus=num_gpus,
826
829
  node_name=node_name,
827
830
  node_labels=node_labels,
@@ -848,7 +851,7 @@ def create_pool(
848
851
  watcher_service = f"{ip_address}:{DEFAULT_WATCHER_PORT}"
849
852
  values = {
850
853
  #CLUSTER_NAME_KEY: cluster_name,
851
- CLUSTER_IP_KEY: ip_address,
854
+ CLUSTER_IP_KEY: ip_address if lb_ip_address is None else lb_ip_address,
852
855
  USER_ID_KEY: user_id if user_id is not None else "",
853
856
  AUTH_KEY: auth_key,
854
857
  READONLY_AUTH_KEY: readonly_auth_key,
@@ -861,7 +864,7 @@ def create_pool(
861
864
  }
862
865
 
863
866
  store_server_info(
864
- server_ip=ip_address,
867
+ server_ip=ip_address if lb_ip_address is None else lb_ip_address,
865
868
  auth_key=auth_key,
866
869
  readonly_auth_key=readonly_auth_key,
867
870
  write_auth_key=write_auth_key,
kalavai_client/utils.py CHANGED
@@ -39,7 +39,7 @@ AUTH_KEY = "watcher_admin_key"
39
39
  USER_ID_KEY = "kalavai_user_id"
40
40
  WRITE_AUTH_KEY = "watcher_write_key"
41
41
  ALLOW_UNREGISTERED_USER_KEY = "watcher_allow_unregistered_user"
42
- NODE_ROLE_LABEL = "kalavai.node_role"
42
+ NODE_ROLE_LABEL = "kalavai/role"
43
43
  USER_API_KEY = "user_api_key"
44
44
  READONLY_AUTH_KEY = "watcher_readonly_key"
45
45
  WATCHER_SERVICE_KEY = "watcher_service"
@@ -163,6 +163,7 @@ def generate_compose_config(
163
163
  target_platform="amd64",
164
164
  write_to_file=True,
165
165
  node_ip_address="0.0.0.0",
166
+ lb_ip_address=None,
166
167
  num_gpus=0,
167
168
  node_labels=None,
168
169
  pool_ip=None,
@@ -180,6 +181,7 @@ def generate_compose_config(
180
181
  "vpn_name": DEFAULT_VPN_CONTAINER_NAME,
181
182
  "mtu": mtu,
182
183
  "node_ip_address": node_ip_address,
184
+ "load_balancer_ip_address": lb_ip_address if lb_ip_address is not None else "",
183
185
  "pool_ip": pool_ip,
184
186
  "pool_token": pool_token,
185
187
  "vpn_token": vpn_token,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kalavai-client
3
- Version: 0.7.11
3
+ Version: 0.7.13
4
4
  Summary: Client app for kalavai platform
5
5
  License-Expression: Apache-2.0
6
6
  License-File: LICENSE
@@ -1,25 +1,25 @@
1
- kalavai_client/__init__.py,sha256=5D26bgHg726t-4I3xx3mEKFKGE7MpHh185qRvB-7D_o,23
1
+ kalavai_client/__init__.py,sha256=kLcj0rXJOFhYabbxdE6vctowpH6fVPy26B8Sd4uSYsQ,23
2
2
  kalavai_client/__main__.py,sha256=WQUfxvRsBJH5gsCJg8pLz95QnZIj7Ol8psTO77m0QE0,73
3
3
  kalavai_client/assets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- kalavai_client/assets/apps.yaml,sha256=96N4zaOHf_VQ7IxWW2WP8EWvInztEQGn5niuwdl6FVY,8157
5
- kalavai_client/assets/default_pool_config.yaml,sha256=F4U_9cjjGmC0RbbX5VZmooTt2i05Nyl_jYBjTjoVN3M,1911
4
+ kalavai_client/assets/apps.yaml,sha256=D0LmxlGHCOJlNDTXtq6Awim1KoC3hWbXUNpjUIGcPm4,9324
5
+ kalavai_client/assets/default_pool_config.yaml,sha256=TFe-CLNvyPG8XShHnoNoeF5MDmm_nizkQAxz1Y3Ttoo,2052
6
6
  kalavai_client/assets/docker-compose-gui.yaml,sha256=OAVO0ohaCpDB9FGeih0yAbVNwUfDtaCzssZ25uiuJyA,787
7
- kalavai_client/assets/docker-compose-template.yaml,sha256=3OZC3vDLCkG9RQ93UdrO5Aynd3hXQWPpiiXQn9Oi-2M,1963
7
+ kalavai_client/assets/docker-compose-template.yaml,sha256=UOv5L1F1iT8e-CXmrDbPEHahuNc5CGagkbNg6wgz0C4,2042
8
8
  kalavai_client/assets/model_deployment_values.yaml,sha256=PKD9DGJnRRwYfk6pO4cmGN9gIgylHX3IFW2sDweEGdU,1698
9
9
  kalavai_client/assets/nginx.conf,sha256=drVVCg8GHucz7hmt_BI6giAhK92OV71257NTs3LthwM,225
10
10
  kalavai_client/assets/pool_config_template.yaml,sha256=MhBZQsEMKrBgbUVSKgIGmXWhybeGKG6l5XvJb38y5GI,577
11
11
  kalavai_client/assets/user_workspace.yaml,sha256=wDvlMYknOPABAEo0dsQwU7bac8iubjAG9tdkFbJZ5Go,476
12
12
  kalavai_client/assets/user_workspace_values.yaml,sha256=G0HOzQUxrDMCwuW9kbWUZaKMzDDPVwDwzBHCL2Xi2ZM,542
13
13
  kalavai_client/auth.py,sha256=EB3PMvKUn5_KAQkezkEHEt-OMZXyfkZguIQlUFkEHcA,3243
14
- kalavai_client/bridge_api.py,sha256=0TvAGgsyfMkbcVqoPKsjhXQLo06WetBH93mZH-pOM7U,26921
14
+ kalavai_client/bridge_api.py,sha256=JXo2uhTLrv_LGKfX_AhAZ3BCTGzsQ0jWQzBxXCrwSOY,26890
15
15
  kalavai_client/bridge_models.py,sha256=bq6vQNTI1py7e_1YgnBZhorFsAKoBqBVN7nRukCuQRE,2960
16
- kalavai_client/cli.py,sha256=leHrxwsMUXsnAtL3u1eTI9v0mdIm-FXZUOYAox6X7RA,49024
16
+ kalavai_client/cli.py,sha256=hXQRUfKgds4bZkhfcTj2F0iluX6HW0Emd8W-rIIBnZs,50357
17
17
  kalavai_client/cluster.py,sha256=Z2PIXbZuSAv9xmw-MyZP1M41BpVMpirLzG51bqGA-zc,13548
18
- kalavai_client/core.py,sha256=OpFLnmJgXuk20l1c1jukatDSKm1Vc1xkj6cPpRy680U,36732
18
+ kalavai_client/core.py,sha256=z8lNuWdPDf-8fb-A8UoULP7nF4_Vl8rdzHJRtmAB6Ns,36912
19
19
  kalavai_client/env.py,sha256=0L5gfEo5KY8gflrW-rSADx10ffDa-8gXmmrGWztKUd8,3099
20
- kalavai_client/utils.py,sha256=xbHsPgyvsA7X2vTl7OIMLe6MMFTYK1-iiSS_VmWJ-Rg,14311
21
- kalavai_client-0.7.11.dist-info/METADATA,sha256=fJhhtZQtf4b52CMReHMysMGoFexZlLkukWGeUjkK9Uw,13176
22
- kalavai_client-0.7.11.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
23
- kalavai_client-0.7.11.dist-info/entry_points.txt,sha256=9T6D45gxwzfVbglMm1r6XPdXuuZdHfy_7fCeu2jUphc,50
24
- kalavai_client-0.7.11.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
25
- kalavai_client-0.7.11.dist-info/RECORD,,
20
+ kalavai_client/utils.py,sha256=JGN-42jEYNpm8ZjKdgaGndAGzqVj_YcOJYtqyCazG34,14418
21
+ kalavai_client-0.7.13.dist-info/METADATA,sha256=FjI9qdNIHp-u6gUc3lxvjYzkYSUbaERKftUTsAA5M_g,13176
22
+ kalavai_client-0.7.13.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
23
+ kalavai_client-0.7.13.dist-info/entry_points.txt,sha256=9T6D45gxwzfVbglMm1r6XPdXuuZdHfy_7fCeu2jUphc,50
24
+ kalavai_client-0.7.13.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
25
+ kalavai_client-0.7.13.dist-info/RECORD,,