kalavai-client 0.6.19__py3-none-any.whl → 0.6.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2 +1,2 @@
1
1
 
2
- __version__ = "0.6.19"
2
+ __version__ = "0.6.21"
@@ -154,7 +154,7 @@ releases:
154
154
  - name: replicas
155
155
  value: 1
156
156
  - name: image_tag
157
- value: "v2025.07.31"
157
+ value: "v2025.07.34"
158
158
  - name: deployment.in_cluster
159
159
  value: "True"
160
160
  - name: deployment.kalavai_username_key
@@ -3,7 +3,7 @@ services:
3
3
  {{vpn_name}}:
4
4
  image: gravitl/netclient:v0.90.0
5
5
  container_name: {{vpn_name}}
6
- platform: linux/amd64
6
+ platform: linux/{{target_platform}}
7
7
  cap_add:
8
8
  - NET_ADMIN
9
9
  - SYS_MODULE
@@ -21,6 +21,7 @@ services:
21
21
  image: docker.io/bundenth/kalavai-runner:{{target_platform}}-latest
22
22
  pull_policy: always
23
23
  container_name: {{service_name}}
24
+ platform: linux/{{target_platform}}
24
25
  {% if vpn %}
25
26
  depends_on:
26
27
  - {{vpn_name}}
@@ -9,6 +9,10 @@ from starlette.requests import Request
9
9
  import uvicorn
10
10
 
11
11
  from kalavai_client.core import Job
12
+ from kalavai_client.env import (
13
+ KALAVAI_SERVICE_LABEL,
14
+ KALAVAI_SERVICE_LABEL_VALUE
15
+ )
12
16
  from kalavai_client.bridge_models import (
13
17
  CreatePoolRequest,
14
18
  InvitesRequest,
@@ -18,7 +22,8 @@ from kalavai_client.bridge_models import (
18
22
  DeleteJobRequest,
19
23
  JobDetailsRequest,
20
24
  NodesActionRequest,
21
- NodeLabelsRequest
25
+ NodeLabelsRequest,
26
+ WorkerConfigRequest
22
27
  )
23
28
  from kalavai_client.core import (
24
29
  create_pool,
@@ -34,6 +39,7 @@ from kalavai_client.core import (
34
39
  fetch_job_logs,
35
40
  fetch_job_templates,
36
41
  fetch_job_defaults,
42
+ fetch_pod_logs,
37
43
  deploy_job,
38
44
  delete_job,
39
45
  authenticate_user,
@@ -52,6 +58,7 @@ from kalavai_client.core import (
52
58
  uncordon_nodes,
53
59
  add_node_labels,
54
60
  get_node_labels,
61
+ generate_worker_package,
55
62
  TokenType
56
63
  )
57
64
  from kalavai_client.utils import (
@@ -241,6 +248,21 @@ def get_token(mode: int, api_key: str = Depends(verify_api_key)):
241
248
  """
242
249
  return get_pool_token(mode=TokenType(mode))
243
250
 
251
+ @app.post("/generate_worker_config",
252
+ operation_id="generate_worker_config",
253
+ summary="Generate a config file for a remote worker to connect to the pool",
254
+ description="Generate a config file for a remote worker to connect to the pool. Different token types provide different levels of access - join tokens allow nodes to contribute resources, while attach tokens allow management access.",
255
+ tags=["pool_management"],
256
+ response_description="Worker config file")
257
+ def generate_worker_config(request: WorkerConfigRequest, api_key: str = Depends(verify_api_key)):
258
+ return generate_worker_package(
259
+ node_name=request.node_name,
260
+ mode=TokenType(request.mode),
261
+ target_platform=request.target_platform,
262
+ num_gpus=request.num_gpus,
263
+ ip_address=request.ip_address,
264
+ storage_compatible=request.storage_compatible)
265
+
244
266
  @app.get("/fetch_devices",
245
267
  operation_id="fetch_devices",
246
268
  summary="Get list of all compute devices in the pool",
@@ -251,6 +273,15 @@ def get_devices(api_key: str = Depends(verify_api_key)):
251
273
  """Get list of available devices"""
252
274
  return fetch_devices()
253
275
 
276
+ @app.get("/fetch_service_logs",
277
+ operation_id="fetch_service_logs",
278
+ summary="Get logs for the kalavai API service",
279
+ description="Get logs for the kalavai API service, including internal logs, debugging messages and status of the service.",
280
+ tags=["info"],
281
+ response_description="Logs")
282
+ def get_service_logs(api_key: str = Depends(verify_api_key)):
283
+ return fetch_pod_logs(label_key=KALAVAI_SERVICE_LABEL, label_value=KALAVAI_SERVICE_LABEL_VALUE, force_namespace="kalavai")
284
+
254
285
  @app.post("/send_pool_invites",
255
286
  operation_id="send_pool_invites",
256
287
  summary="Send invitations to join the pool",
@@ -17,6 +17,15 @@ class CreatePoolRequest(BaseModel):
17
17
  token_mode: TokenType = Field(TokenType.USER, description="Token type for authentication")
18
18
  description: str = Field("", description="Description of the pool")
19
19
 
20
+ class WorkerConfigRequest(BaseModel):
21
+ node_name: str = Field(None, description="Name for the worker node")
22
+ mode: int = Field(2, description="Access mode for the worker (admin, worker or user)")
23
+ target_platform: str = Field("amd64", description="Target platform architecture for the worker (amd64 or arm64)")
24
+ num_gpus: int = Field(0, description="Number of GPUs to use on the worker node")
25
+ ip_address: str = Field("0.0.0.0", description="IP address of the worker node")
26
+ storage_compatible: bool = Field(True, description="Whether to use the node's storage capacity for volumes")
27
+
28
+
20
29
  class NodesActionRequest(BaseModel):
21
30
  nodes: list[str] = Field(None, description="List of node names to perform the action on, defaults to None")
22
31
 
kalavai_client/cli.py CHANGED
@@ -27,6 +27,8 @@ from kalavai_client.env import (
27
27
  USER_TEMPLATES_FOLDER,
28
28
  DOCKER_COMPOSE_GUI,
29
29
  USER_GUI_COMPOSE_FILE,
30
+ KALAVAI_SERVICE_LABEL,
31
+ KALAVAI_SERVICE_LABEL_VALUE,
30
32
  user_path,
31
33
  resource_path,
32
34
  )
@@ -37,6 +39,7 @@ from kalavai_client.core import (
37
39
  fetch_job_details,
38
40
  fetch_devices,
39
41
  fetch_job_logs,
42
+ fetch_pod_logs,
40
43
  fetch_gpus,
41
44
  generate_worker_package,
42
45
  load_gpu_models,
@@ -689,6 +692,25 @@ def pool__update(*others):
689
692
  else:
690
693
  console.log(f"[green]{result}")
691
694
 
695
+ @arguably.command
696
+ def pool__logs(*others):
697
+ """
698
+ Get the logs for the Kalavai API
699
+ """
700
+ logs = []
701
+
702
+ logs.append("Getting Kalavai API logs...")
703
+
704
+ logs = fetch_pod_logs(
705
+ label_key=KALAVAI_SERVICE_LABEL,
706
+ label_value=KALAVAI_SERVICE_LABEL_VALUE,
707
+ force_namespace="kalavai"
708
+ )
709
+ for name, log in logs.items():
710
+ console.log(f"[yellow]LOGS for service: {name}")
711
+ for key, value in log.items():
712
+ console.log(f"[yellow]{key}")
713
+ console.log(json.dumps(value, indent=2))
692
714
 
693
715
  @arguably.command
694
716
  def pool__status(*others, log_file=None):
kalavai_client/core.py CHANGED
@@ -313,7 +313,7 @@ def fetch_job_details(jobs: list[Job]):
313
313
  restart_counts = sum([c["restart_count"] for c in values["conditions"]])
314
314
  workers_status[values["status"]] += 1
315
315
  # get nodes involved in deployment (needs kubewatcher)
316
- if "node_name" in values:
316
+ if "node_name" in values and values["node_name"] is not None:
317
317
  host_nodes.add(values["node_name"])
318
318
 
319
319
  workers = "\n".join([f"{k}: {v}" for k, v in workers_status.items()])
@@ -454,9 +454,18 @@ def fetch_devices():
454
454
  return {"error": str(e)}
455
455
 
456
456
  def fetch_job_logs(job_name, force_namespace=None, pod_name=None, tail=100):
457
+ return fetch_pod_logs(
458
+ label_key=TEMPLATE_LABEL,
459
+ label_value=job_name,
460
+ pod_name=pod_name,
461
+ force_namespace=force_namespace,
462
+ tail=tail
463
+ )
464
+
465
+ def fetch_pod_logs(label_key, label_value, force_namespace=None, pod_name=None, tail=100):
457
466
  data = {
458
- "label": TEMPLATE_LABEL,
459
- "value": job_name,
467
+ "label": label_key,
468
+ "value": label_value,
460
469
  "tail": tail
461
470
  }
462
471
  if force_namespace is not None:
@@ -609,9 +618,16 @@ def attach_to_pool(token, node_name=None):
609
618
 
610
619
  return cluster_name
611
620
 
612
- def generate_worker_package(target_platform="amd64", num_gpus=0, node_name=None, ip_address="0.0.0.0", storage_compatible=True):
621
+ def generate_worker_package(
622
+ target_platform="amd64",
623
+ num_gpus=0,
624
+ node_name=None,
625
+ ip_address="0.0.0.0",
626
+ storage_compatible=True,
627
+ mode=TokenType.WORKER
628
+ ):
613
629
  # get pool data from token
614
- token = get_pool_token(mode=TokenType.WORKER)
630
+ token = get_pool_token(mode=mode)
615
631
  if "error" in token:
616
632
  return {"error": f"[red]Error when getting pool token: {token['error']}"}
617
633
 
kalavai_client/env.py CHANGED
@@ -32,6 +32,8 @@ def resource_path(relative_path: str):
32
32
 
33
33
 
34
34
  TEMPLATE_LABEL = "kalavai.job.name"
35
+ KALAVAI_SERVICE_LABEL = "app"
36
+ KALAVAI_SERVICE_LABEL_VALUE = "kube-watcher-api"
35
37
  STORAGE_CLASS_LABEL = "kalavai.storage.enabled"
36
38
  USER_NODE_LABEL = "kalavai.cluster.user"
37
39
  SERVER_IP_KEY = "server_ip"
kalavai_client/utils.py CHANGED
@@ -175,7 +175,6 @@ def is_watcher_alive(server_creds, user_cookie, timeout=30):
175
175
  timeout=timeout
176
176
  )
177
177
  except Exception as e:
178
- print(str(e))
179
178
  return False
180
179
  return True
181
180
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: kalavai-client
3
- Version: 0.6.19
3
+ Version: 0.6.21
4
4
  Summary: Client app for kalavai platform
5
5
  License: Apache-2.0
6
6
  Keywords: LLM,platform
@@ -50,7 +50,7 @@ Description-Content-Type: text/markdown
50
50
  ⭐⭐⭐ **Kalavai platform is open source, and free to use in both commercial and non-commercial purposes. If you find it useful, consider supporting us by [giving a star to our GitHub project](https://github.com/kalavai-net/kalavai-client), joining our [discord channel](https://discord.gg/YN6ThTJKbM) and follow our [Substack](https://kalavainet.substack.com/).**
51
51
 
52
52
 
53
- # Kalavai: turn your devices into a scalable AI platform
53
+ # Kalavai: a platform to self-host AI on easy mode
54
54
 
55
55
  > AI in the cloud is not aligned with you, it's aligned with the company that owns it. Make sure you own your AI
56
56
 
@@ -61,15 +61,14 @@ Kalavai is an **open source** tool that turns **any devices** into a self-hosted
61
61
 
62
62
  ## What can Kalavai do?
63
63
 
64
- Kalavai's goal is to make using self-hosted AI (GenAI models and agents) in real applications accessible and affordable to all. It's a tool that transforms machines into a _magic box_ that **integrates all the components required to make AI useful in the age of massive computing**, from model deployment and orchestration to Agentic AI.
64
+ Kalavai's goal is to make using self-hosted AI (GenAI models and agents) in real applications accessible and affordable to all.
65
65
 
66
66
  ### Core features
67
67
 
68
- - Manage **multiple devices resources as one**. One pool of RAM, CPUs and GPUs
69
- - **Deploy open source models seamlessly across devices**, wherever they are (cloud, on premises, personal devices)
70
- - Beyond LLMs: not just for large language models, but text-to-speech, speech-to-text, image understanding, coding generation and embedding models.
71
- - The hybrid dream: build on your laptop, move to the cloud (any!) with zero changes
72
- - Auto-discovery: all **models are automatically exposed** through a single OpenAI-like API and a ChatGPT-like UI playground
68
+ - Manage **multiple devices resources as one**, wherever they come from (hybrid cloud, on prem, personal devices)
69
+ - **Deploy open source models seamlessly across devices**, with zero-cost migration
70
+ - Beyond LLMs: not just for large language models, but text-to-speech, speech-to-text, image generation, video understanding, coding generation and embedding models.
71
+ - Production-ready: **models are automatically exposed** through a single OpenAI-like API and a ChatGPT-like UI playground, with off-the-shelf monitoring and evaluation framework.
73
72
  - Compatible with [most popular model engines](#support-for-llm-engines)
74
73
  - [Easy to expand](https://github.com/kalavai-net/kube-watcher/tree/main/templates) to custom workloads
75
74
 
@@ -103,17 +102,24 @@ Kalavai's goal is to make using self-hosted AI (GenAI models and agents) in real
103
102
 
104
103
  </details>
105
104
 
106
- ### Support for LLM engines
105
+ ### Support for AI engines
107
106
 
108
- We currently support out of the box the following LLM engines:
107
+ We currently support out of the box the following AI engines:
109
108
 
110
- - [vLLM](https://docs.vllm.ai/en/latest/)
111
- - [llama.cpp](https://github.com/ggerganov/llama.cpp)
112
- - [Aphrodite Engine](https://github.com/aphrodite-engine/aphrodite-engine)
113
- - [Petals](https://github.com/bigscience-workshop/petals)
109
+ - [vLLM](https://docs.vllm.ai/en/latest/): most popular GPU-based model inference.
110
+ - [llama.cpp](https://github.com/ggerganov/llama.cpp): CPU-based GGUF model inference.
111
+ - [SGLang](https://github.com/sgl-project/sglang): Super fast GPU-based model inference.
112
+ - [n8n](https://n8n.io/): no-code workload automation framework.
113
+ - [Flowise](https://flowiseai.com/): no-code agentic AI workload framework.
114
+ - [Speaches](https://speaches.ai/): audio (speech-to-text and text-to-speech) model inference.
115
+ - [Langfuse](https://langfuse.com/): open source evaluation and monitoring GenAI framework.
116
+ - [OpenWebUI](https://docs.openwebui.com/): ChatGPT-like UI playground to interface with any models.
114
117
 
115
118
  Coming soon:
116
119
 
120
+ - [diffusers](https://huggingface.co/docs/diffusers/en/index)
121
+ - [Aphrodite Engine](https://github.com/aphrodite-engine/aphrodite-engine)
122
+ - [Petals](https://github.com/bigscience-workshop/petals)
117
123
  - [exo](https://github.com/exo-explore/exo)
118
124
  - [GPUstack](https://docs.gpustack.ai/0.4/overview/)
119
125
  - [RayServe](https://docs.ray.io/en/latest/serve/index.html)
@@ -141,12 +147,16 @@ The `kalavai-client` is the main tool to interact with the Kalavai platform, to
141
147
 
142
148
  <summary>Requirements</summary>
143
149
 
150
+ For seed nodes:
151
+ - A 64 bits x86 based Linux machine (laptop, desktop or VM)
152
+ - [Docker engine installed](https://docs.docker.com/engine/install/ubuntu/) with [privilege access](https://docs.docker.com/engine/containers/run/#runtime-privilege-and-linux-capabilities).
153
+
144
154
  For workers sharing resources with the pool:
145
155
 
146
- - A laptop, desktop or Virtual Machine
156
+ - A laptop, desktop or Virtual Machine (MacOS, Linux or Windows; ARM or x86)
157
+ - If self-hosting, workers should be on the same network as the seed node. Looking for over-the-internet connectivity? Check out our [managed seeds](https://platform.kalavai.net)
147
158
  - Docker engine installed (for [linux](https://docs.docker.com/engine/install/ubuntu/), [Windows and MacOS](https://docs.docker.com/desktop/)) with [privilege access](https://docs.docker.com/engine/containers/run/#runtime-privilege-and-linux-capabilities).
148
159
 
149
- > **Support for Windows and MacOS workers is experimental**: kalavai workers run on docker containers that require access to the host network interfaces, thus systems that do not support containers natively (Windows and MacOS) may have difficulties finding each other.
150
160
 
151
161
  </details>
152
162
 
@@ -162,24 +172,24 @@ pip install kalavai-client
162
172
 
163
173
  ## Create a a local, private AI pool
164
174
 
165
- > Kalavai is **free to use, no caps, for both commercial and non-commercial purposes**. All you need to get started is one or more computers that can see each other (i.e. within the same network), and you are good to go. If you are interested in join computers in different locations / networks, [contact us](mailto:info@kalavai.net) or [book a demo](https://app.onecal.io/b/kalavai/book-a-demo) with the founders.
166
-
167
175
  You can create and manage your pools with the new kalavai GUI, which can be started with:
168
176
 
169
177
  ```bash
170
178
  kalavai gui start
171
179
  ```
172
180
 
173
- This will expose the GUI and the backend services in localhost. By default, the GUI is accessible via [http://localhost:3000](http://localhost:3000). In the UI users can create and join LLM pools, monitor devices, deploy LLMs and more.
181
+ This will expose the GUI and the backend services in localhost. By default, the GUI is accessible via [http://localhost:49153](http://localhost:49153). In the UI users can create and join AI pools, monitor devices, deploy LLMs and more.
174
182
 
175
183
  ![Kalavai logo](docs/docs/assets/images/ui_dashboard_multiple.png)
176
184
 
177
- Check out our [getting started guide](https://kalavai-net.github.io/kalavai-client/getting_started/) for next steps.
185
+ Check out our [getting started guide](https://kalavai-net.github.io/kalavai-client/getting_started/) for next steps on how to add more workers to your pool, or use our [managed seeds service](https://kalavai-net.github.io/kalavai-client/getting_started/#1b-managed-pools-create-a-seed) for over-the-internet AI pools.
178
186
 
179
187
 
180
188
  ## Enough already, let's run stuff!
181
189
 
182
- Check our [examples](examples/) to put your new AI pool to good use! For an end to end tour, check our [self-hosted](https://kalavai-net.github.io/kalavai-client/self_hosted_llm_pool/) and [public LLM pools](https://kalavai-net.github.io/kalavai-client/public_llm_pool/) guides.
190
+ For an end to end tour on building your own OpenAI-like service, check our [self-hosted](https://kalavai-net.github.io/kalavai-client/self_hosted_llm_pool/) guide.
191
+
192
+ Check our [examples](examples/) to put your new AI pool to good use!
183
193
 
184
194
 
185
195
  ## Compatibility matrix
@@ -247,7 +257,7 @@ Anything missing here? Give us a shout in the [discussion board](https://github.
247
257
 
248
258
  <summary>Expand</summary>
249
259
 
250
- Python version >= 3.6.
260
+ Python version >= 3.10.
251
261
 
252
262
  ```bash
253
263
  sudo add-apt-repository ppa:deadsnakes/ppa
@@ -1,25 +1,25 @@
1
- kalavai_client/__init__.py,sha256=29RPUdF-Jn8Tqqu5Mk5Ci4E0On_MZIVUBqNZv5nyu0s,23
1
+ kalavai_client/__init__.py,sha256=KnlOn1hr4l_ezHHYDmLMevNZCFA4Ui3CHlLyP0spOl0,23
2
2
  kalavai_client/__main__.py,sha256=WQUfxvRsBJH5gsCJg8pLz95QnZIj7Ol8psTO77m0QE0,73
3
3
  kalavai_client/assets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- kalavai_client/assets/apps.yaml,sha256=17JuXSv-Qj5Az6ZTRyiEaQXVbI325uTrZzKk2irts2g,6410
4
+ kalavai_client/assets/apps.yaml,sha256=jGHcoktg0v-9TC-IJrTE5-7TK6dverxnkJUF_o1dXNs,6410
5
5
  kalavai_client/assets/apps_values.yaml,sha256=LeSNd3PwkIx0wkTIlEk2KNz3Yy4sXSaHALQEkopdhKE,2165
6
6
  kalavai_client/assets/docker-compose-gui.yaml,sha256=OAVO0ohaCpDB9FGeih0yAbVNwUfDtaCzssZ25uiuJyA,787
7
- kalavai_client/assets/docker-compose-template.yaml,sha256=KHIwJ2WWX7Y7wQKiXRr82Jqd3IKRyls5zhTyl8mSmrc,1805
7
+ kalavai_client/assets/docker-compose-template.yaml,sha256=vW7GhOl_PaUodehJk8qajOlE0deZXrPc7qizg5SeYyc,1859
8
8
  kalavai_client/assets/nginx.conf,sha256=drVVCg8GHucz7hmt_BI6giAhK92OV71257NTs3LthwM,225
9
9
  kalavai_client/assets/pool_config_template.yaml,sha256=MhBZQsEMKrBgbUVSKgIGmXWhybeGKG6l5XvJb38y5GI,577
10
10
  kalavai_client/assets/pool_config_values.yaml,sha256=_iAnugramLiwJaaDcPSetThvOdR7yFiCffdMri-SQCU,68
11
11
  kalavai_client/assets/user_workspace.yaml,sha256=wDvlMYknOPABAEo0dsQwU7bac8iubjAG9tdkFbJZ5Go,476
12
12
  kalavai_client/assets/user_workspace_values.yaml,sha256=G0HOzQUxrDMCwuW9kbWUZaKMzDDPVwDwzBHCL2Xi2ZM,542
13
13
  kalavai_client/auth.py,sha256=EB3PMvKUn5_KAQkezkEHEt-OMZXyfkZguIQlUFkEHcA,3243
14
- kalavai_client/bridge_api.py,sha256=Hd7whTX2TAiNYX1G237hv2rqtKUBGRJkzUoWOMZm44A,25562
15
- kalavai_client/bridge_models.py,sha256=3mHCqIHVysLLkQvGT-DKqKOrtAlQSfEOdrwSq2yTRRU,2439
16
- kalavai_client/cli.py,sha256=UC2aRUvNI53Xq-ubyoKDoVaRS1VDRmKtU9sVMf9C5Sg,47522
14
+ kalavai_client/bridge_api.py,sha256=qiN0jleaooj2vYVYxHrG_nfdgY2rKpZyvFE4uz6hkoA,27088
15
+ kalavai_client/bridge_models.py,sha256=mUh67hzhudqGxJEFHx2KGrf-Sjrt_CwkzLno8Xsm7hk,3043
16
+ kalavai_client/cli.py,sha256=ZRNOv1oUvU7Freu47PotrwqJRrBMSFgmoCHg620UdZM,48146
17
17
  kalavai_client/cluster.py,sha256=Z2PIXbZuSAv9xmw-MyZP1M41BpVMpirLzG51bqGA-zc,13548
18
- kalavai_client/core.py,sha256=haNLna0TWzxmGx9cEhJjnV3r9YSOS3Fhtr4dt70LnwQ,35390
19
- kalavai_client/env.py,sha256=YsfZj7LWf6ABquDsoIFFkXCFYwenpDk8zVnGsf7qv98,2823
20
- kalavai_client/utils.py,sha256=5cUpMVsADF3JdDW0wbu-f38MURkhQz9pPngg0WxssJw,13460
21
- kalavai_client-0.6.19.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
22
- kalavai_client-0.6.19.dist-info/METADATA,sha256=CYVNusQKxd6KHa0UCx3QBDytEXGApHg2OrZd7O5LfIU,12393
23
- kalavai_client-0.6.19.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
24
- kalavai_client-0.6.19.dist-info/entry_points.txt,sha256=9T6D45gxwzfVbglMm1r6XPdXuuZdHfy_7fCeu2jUphc,50
25
- kalavai_client-0.6.19.dist-info/RECORD,,
18
+ kalavai_client/core.py,sha256=JVXSMmYvbNBl9ggVPGNJRryK54doySTrGDj-WhAlkfY,35760
19
+ kalavai_client/env.py,sha256=t6dfjg5GY6lbprbmlr9dVOP_KouPwdN94wnDL5zCgIM,2902
20
+ kalavai_client/utils.py,sha256=1mz-dzoJhZ9GJKU7jiGYBC1tP37SXHvxToMqqEir8R0,13438
21
+ kalavai_client-0.6.21.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
22
+ kalavai_client-0.6.21.dist-info/METADATA,sha256=jnSEBFEE_8y8FLLV4UTvlRQ5VtxUDgGLjMtVqq4ly8k,12776
23
+ kalavai_client-0.6.21.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
24
+ kalavai_client-0.6.21.dist-info/entry_points.txt,sha256=9T6D45gxwzfVbglMm1r6XPdXuuZdHfy_7fCeu2jUphc,50
25
+ kalavai_client-0.6.21.dist-info/RECORD,,