waterlight 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,50 @@
1
+ # Virtual environment
2
+ .venv/
3
+ venv/
4
+ env/
5
+
6
+ # Environment variables (contains API keys)
7
+ .env
8
+
9
+ # Python cache
10
+ __pycache__/
11
+ *.py[cod]
12
+ *$py.class
13
+ *.so
14
+
15
+ # Logs
16
+ logs/
17
+ *.log
18
+
19
+ # IDE
20
+ .idea/
21
+ .vscode/
22
+ *.swp
23
+ *.swo
24
+
25
+ # OS files
26
+ .DS_Store
27
+ Thumbs.db
28
+
29
+ # Node.js
30
+ node_modules/
31
+ waterlight-sdk-node/dist/
32
+
33
+ # Python SDK build artifacts
34
+ waterlight-sdk/dist/
35
+ # Model weights and checkpoints - too large for GitHub, stored on Storage Box
36
+ mist/breeding/staging/**/checkpoint-*/
37
+ mist/breeding/staging/**/*.safetensors
38
+ mist/breeding/staging/**/*.pt
39
+ mist/breeding/staging/**/*.bin
40
+ mist/breeding/staging/**/*.pth
41
+ mist/breeding/staging/**/*.gguf
42
+
43
+ # Backup files
44
+ *.bak
45
+ logs/
46
+ scripts/breeding/gen8_output/corpus/sft_corpus.jsonl
47
+
48
+ # Training data (large files — stored on compute boxes, not GitHub)
49
+ data/*.jsonl
50
+ mist/mist/saline/_generate_pricing_json.py
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Waterlight Research
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,191 @@
1
+ Metadata-Version: 2.4
2
+ Name: waterlight
3
+ Version: 0.1.0
4
+ Summary: Waterlight AI SDK — OpenAI-compatible client for the Waterlight API
5
+ Project-URL: Homepage, https://waterlight.ai
6
+ Project-URL: Documentation, https://docs.waterlight.ai
7
+ Author: Waterlight Research
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Keywords: ai,api,llm,openai,waterlight
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Programming Language :: Python :: 3.13
20
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
21
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
+ Requires-Python: >=3.9
23
+ Description-Content-Type: text/markdown
24
+
25
+ # Waterlight Python SDK
26
+
27
+ OpenAI-compatible Python client for the Waterlight API. Zero external dependencies.
28
+
29
+ ## Install
30
+
31
+ ```bash
32
+ pip install waterlight
33
+ ```
34
+
35
+ ## Quick Start
36
+
37
+ ```python
38
+ from waterlight import Waterlight
39
+
40
+ client = Waterlight(api_key="wl-...")
41
+
42
+ response = client.chat.completions.create(
43
+ model="mist-1-turbo",
44
+ messages=[{"role": "user", "content": "Hello!"}],
45
+ )
46
+ print(response.choices[0].message.content)
47
+ ```
48
+
49
+ ## Drop-in OpenAI Replacement
50
+
51
+ Change two lines:
52
+
53
+ ```python
54
+ # Before
55
+ from openai import OpenAI
56
+ client = OpenAI(api_key="sk-...")
57
+
58
+ # After
59
+ from waterlight import Waterlight
60
+ client = Waterlight(api_key="wl-...")
61
+
62
+ # Everything else stays the same
63
+ response = client.chat.completions.create(
64
+ model="mist-1-turbo",
65
+ messages=[{"role": "user", "content": "Explain quantum computing"}],
66
+ )
67
+ ```
68
+
69
+ ## Streaming
70
+
71
+ ```python
72
+ stream = client.chat.completions.create(
73
+ model="mist-1-turbo",
74
+ messages=[{"role": "user", "content": "Tell me a story"}],
75
+ stream=True,
76
+ )
77
+ for chunk in stream:
78
+ content = chunk.choices[0].delta.content
79
+ if content:
80
+ print(content, end="", flush=True)
81
+ ```
82
+
83
+ ## Tool Calling
84
+
85
+ ```python
86
+ tools = [{
87
+ "type": "function",
88
+ "function": {
89
+ "name": "get_weather",
90
+ "description": "Get the weather for a location",
91
+ "parameters": {
92
+ "type": "object",
93
+ "properties": {
94
+ "location": {"type": "string"},
95
+ },
96
+ "required": ["location"],
97
+ },
98
+ },
99
+ }]
100
+
101
+ response = client.chat.completions.create(
102
+ model="mist-1-turbo",
103
+ messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
104
+ tools=tools,
105
+ )
106
+ ```
107
+
108
+ ## Embeddings
109
+
110
+ ```python
111
+ response = client.embeddings.create(input="Hello world")
112
+ print(len(response.data[0].embedding))
113
+ ```
114
+
115
+ ## Models
116
+
117
+ ```python
118
+ models = client.models.list()
119
+ for model in models.data:
120
+ print(model.id)
121
+ ```
122
+
123
+ ## Available Models
124
+
125
+ | Model | Best For |
126
+ |-------|----------|
127
+ | `mist-1` | Highest quality reasoning and analysis |
128
+ | `mist-1-turbo` | Fast, high quality all-rounder |
129
+ | `mist-1-flash` | Fastest responses, triage, summarization |
130
+ | `mist-1-reason` | Deep reasoning and math |
131
+ | `mist-1-code` | Code generation and review |
132
+ | `mist-1-vision` | Multimodal / image understanding |
133
+
134
+ ## Billing
135
+
136
+ ```python
137
+ billing = client.billing.get()
138
+ print(billing) # plan, spent_usd, balance, limits
139
+ ```
140
+
141
+ ## Error Handling
142
+
143
+ ```python
144
+ from waterlight import (
145
+ AuthenticationError,
146
+ RateLimitError,
147
+ InsufficientCreditsError,
148
+ APIError,
149
+ )
150
+
151
+ try:
152
+ response = client.chat.completions.create(
153
+ model="mist-1-turbo",
154
+ messages=[{"role": "user", "content": "Hello"}],
155
+ )
156
+ except AuthenticationError:
157
+ print("Invalid API key")
158
+ except RateLimitError as e:
159
+ print(f"Rate limited — retry after {e.retry_after}s")
160
+ except InsufficientCreditsError:
161
+ print("Add credits at https://waterlight.ai")
162
+ except APIError as e:
163
+ print(f"API error {e.status_code}: {e.message}")
164
+ ```
165
+
166
+ ## Configuration
167
+
168
+ | Parameter | Env Var | Default |
169
+ |-----------|---------|---------|
170
+ | `api_key` | `WATERLIGHT_API_KEY` | — (required) |
171
+ | `base_url` | `WATERLIGHT_BASE_URL` | `https://api.waterlight.ai` |
172
+ | `timeout` | — | `120.0` |
173
+ | `max_retries` | — | `2` |
174
+
175
+ ```python
176
+ client = Waterlight(
177
+ api_key="wl-...", # or set WATERLIGHT_API_KEY env var
178
+ base_url="https://...", # or set WATERLIGHT_BASE_URL env var
179
+ timeout=120.0, # request timeout in seconds
180
+ max_retries=2, # retries on transient errors
181
+ )
182
+ ```
183
+
184
+ ## Requirements
185
+
186
+ - Python 3.9+
187
+ - No external dependencies
188
+
189
+ ## License
190
+
191
+ MIT
@@ -0,0 +1,167 @@
1
+ # Waterlight Python SDK
2
+
3
+ OpenAI-compatible Python client for the Waterlight API. Zero external dependencies.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ pip install waterlight
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ```python
14
+ from waterlight import Waterlight
15
+
16
+ client = Waterlight(api_key="wl-...")
17
+
18
+ response = client.chat.completions.create(
19
+ model="mist-1-turbo",
20
+ messages=[{"role": "user", "content": "Hello!"}],
21
+ )
22
+ print(response.choices[0].message.content)
23
+ ```
24
+
25
+ ## Drop-in OpenAI Replacement
26
+
27
+ Change two lines:
28
+
29
+ ```python
30
+ # Before
31
+ from openai import OpenAI
32
+ client = OpenAI(api_key="sk-...")
33
+
34
+ # After
35
+ from waterlight import Waterlight
36
+ client = Waterlight(api_key="wl-...")
37
+
38
+ # Everything else stays the same
39
+ response = client.chat.completions.create(
40
+ model="mist-1-turbo",
41
+ messages=[{"role": "user", "content": "Explain quantum computing"}],
42
+ )
43
+ ```
44
+
45
+ ## Streaming
46
+
47
+ ```python
48
+ stream = client.chat.completions.create(
49
+ model="mist-1-turbo",
50
+ messages=[{"role": "user", "content": "Tell me a story"}],
51
+ stream=True,
52
+ )
53
+ for chunk in stream:
54
+ content = chunk.choices[0].delta.content
55
+ if content:
56
+ print(content, end="", flush=True)
57
+ ```
58
+
59
+ ## Tool Calling
60
+
61
+ ```python
62
+ tools = [{
63
+ "type": "function",
64
+ "function": {
65
+ "name": "get_weather",
66
+ "description": "Get the weather for a location",
67
+ "parameters": {
68
+ "type": "object",
69
+ "properties": {
70
+ "location": {"type": "string"},
71
+ },
72
+ "required": ["location"],
73
+ },
74
+ },
75
+ }]
76
+
77
+ response = client.chat.completions.create(
78
+ model="mist-1-turbo",
79
+ messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
80
+ tools=tools,
81
+ )
82
+ ```
83
+
84
+ ## Embeddings
85
+
86
+ ```python
87
+ response = client.embeddings.create(input="Hello world")
88
+ print(len(response.data[0].embedding))
89
+ ```
90
+
91
+ ## Models
92
+
93
+ ```python
94
+ models = client.models.list()
95
+ for model in models.data:
96
+ print(model.id)
97
+ ```
98
+
99
+ ## Available Models
100
+
101
+ | Model | Best For |
102
+ |-------|----------|
103
+ | `mist-1` | Highest quality reasoning and analysis |
104
+ | `mist-1-turbo` | Fast, high quality all-rounder |
105
+ | `mist-1-flash` | Fastest responses, triage, summarization |
106
+ | `mist-1-reason` | Deep reasoning and math |
107
+ | `mist-1-code` | Code generation and review |
108
+ | `mist-1-vision` | Multimodal / image understanding |
109
+
110
+ ## Billing
111
+
112
+ ```python
113
+ billing = client.billing.get()
114
+ print(billing) # plan, spent_usd, balance, limits
115
+ ```
116
+
117
+ ## Error Handling
118
+
119
+ ```python
120
+ from waterlight import (
121
+ AuthenticationError,
122
+ RateLimitError,
123
+ InsufficientCreditsError,
124
+ APIError,
125
+ )
126
+
127
+ try:
128
+ response = client.chat.completions.create(
129
+ model="mist-1-turbo",
130
+ messages=[{"role": "user", "content": "Hello"}],
131
+ )
132
+ except AuthenticationError:
133
+ print("Invalid API key")
134
+ except RateLimitError as e:
135
+ print(f"Rate limited — retry after {e.retry_after}s")
136
+ except InsufficientCreditsError:
137
+ print("Add credits at https://waterlight.ai")
138
+ except APIError as e:
139
+ print(f"API error {e.status_code}: {e.message}")
140
+ ```
141
+
142
+ ## Configuration
143
+
144
+ | Parameter | Env Var | Default |
145
+ |-----------|---------|---------|
146
+ | `api_key` | `WATERLIGHT_API_KEY` | — (required) |
147
+ | `base_url` | `WATERLIGHT_BASE_URL` | `https://api.waterlight.ai` |
148
+ | `timeout` | — | `120.0` |
149
+ | `max_retries` | — | `2` |
150
+
151
+ ```python
152
+ client = Waterlight(
153
+ api_key="wl-...", # or set WATERLIGHT_API_KEY env var
154
+ base_url="https://...", # or set WATERLIGHT_BASE_URL env var
155
+ timeout=120.0, # request timeout in seconds
156
+ max_retries=2, # retries on transient errors
157
+ )
158
+ ```
159
+
160
+ ## Requirements
161
+
162
+ - Python 3.9+
163
+ - No external dependencies
164
+
165
+ ## License
166
+
167
+ MIT
@@ -0,0 +1,35 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "waterlight"
7
+ version = "0.1.0"
8
+ description = "Waterlight AI SDK — OpenAI-compatible client for the Waterlight API"
9
+ readme = "README.md"
10
+ license = "MIT"
11
+ requires-python = ">=3.9"
12
+ authors = [
13
+ { name = "Waterlight Research" },
14
+ ]
15
+ keywords = ["ai", "llm", "waterlight", "openai", "api"]
16
+ classifiers = [
17
+ "Development Status :: 4 - Beta",
18
+ "Intended Audience :: Developers",
19
+ "License :: OSI Approved :: MIT License",
20
+ "Programming Language :: Python :: 3",
21
+ "Programming Language :: Python :: 3.9",
22
+ "Programming Language :: Python :: 3.10",
23
+ "Programming Language :: Python :: 3.11",
24
+ "Programming Language :: Python :: 3.12",
25
+ "Programming Language :: Python :: 3.13",
26
+ "Topic :: Software Development :: Libraries :: Python Modules",
27
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
28
+ ]
29
+
30
+ [project.urls]
31
+ Homepage = "https://waterlight.ai"
32
+ Documentation = "https://docs.waterlight.ai"
33
+
34
+ [tool.hatch.build.targets.wheel]
35
+ packages = ["src/waterlight"]
@@ -0,0 +1,41 @@
1
+ """Waterlight AI SDK — OpenAI-compatible client.
2
+
3
+ Usage:
4
+ from waterlight import Waterlight
5
+
6
+ client = Waterlight(api_key="wl-...")
7
+ response = client.chat.completions.create(
8
+ model="mist-1-turbo",
9
+ messages=[{"role": "user", "content": "Hello"}],
10
+ )
11
+ print(response.choices[0].message.content)
12
+
13
+ Drop-in replacement for OpenAI:
14
+ # Before
15
+ from openai import OpenAI
16
+ client = OpenAI()
17
+
18
+ # After
19
+ from waterlight import Waterlight
20
+ client = Waterlight()
21
+ """
22
+
23
+ from .client import Waterlight
24
+ from .errors import (
25
+ WaterlightError,
26
+ AuthenticationError,
27
+ RateLimitError,
28
+ InsufficientCreditsError,
29
+ APIError,
30
+ )
31
+
32
+ __version__ = "0.1.0"
33
+
34
+ __all__ = [
35
+ "Waterlight",
36
+ "WaterlightError",
37
+ "AuthenticationError",
38
+ "RateLimitError",
39
+ "InsufficientCreditsError",
40
+ "APIError",
41
+ ]
@@ -0,0 +1,457 @@
1
+ """Waterlight SDK client — OpenAI-compatible, zero external dependencies.
2
+
3
+ Drop-in replacement for the OpenAI Python SDK:
4
+ # Before
5
+ from openai import OpenAI
6
+ client = OpenAI(api_key="sk-...")
7
+
8
+ # After
9
+ from waterlight import Waterlight
10
+ client = Waterlight(api_key="wl-...")
11
+
12
+ All methods mirror the OpenAI SDK interface:
13
+ client.chat.completions.create(...)
14
+ client.embeddings.create(...)
15
+ client.models.list()
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import json
21
+ import os
22
+ import time
23
+ from dataclasses import dataclass, field
24
+ from typing import Any, Iterator, Optional
25
+ from urllib import error as urlerror
26
+ from urllib import request
27
+
28
+ from .errors import APIError, WaterlightError, _map_status
29
+
30
+
31
+ # =============================================================================
32
+ # Response Types
33
+ # =============================================================================
34
+
35
+ @dataclass
36
+ class Message:
37
+ """Chat message."""
38
+ role: str
39
+ content: str | None = None
40
+ tool_calls: list[dict] | None = None
41
+
42
+ @dataclass
43
+ class Choice:
44
+ """Single completion choice."""
45
+ index: int
46
+ message: Message
47
+ finish_reason: str | None = None
48
+
49
+ @dataclass
50
+ class Usage:
51
+ """Token usage."""
52
+ prompt_tokens: int = 0
53
+ completion_tokens: int = 0
54
+ total_tokens: int = 0
55
+
56
+ @dataclass
57
+ class ChatCompletion:
58
+ """Non-streaming chat completion response."""
59
+ id: str = ""
60
+ object: str = "chat.completion"
61
+ created: int = 0
62
+ model: str = ""
63
+ choices: list[Choice] = field(default_factory=list)
64
+ usage: Usage = field(default_factory=Usage)
65
+
66
+ @dataclass
67
+ class Delta:
68
+ """Streaming delta."""
69
+ content: str | None = None
70
+ tool_calls: list[dict] | None = None
71
+
72
+ @dataclass
73
+ class StreamChoice:
74
+ """Single streaming choice."""
75
+ index: int = 0
76
+ delta: Delta = field(default_factory=Delta)
77
+ finish_reason: str | None = None
78
+
79
+ @dataclass
80
+ class ChatCompletionChunk:
81
+ """Streaming chunk."""
82
+ id: str = ""
83
+ object: str = "chat.completion.chunk"
84
+ created: int = 0
85
+ model: str = ""
86
+ choices: list[StreamChoice] = field(default_factory=list)
87
+
88
+ @dataclass
89
+ class Embedding:
90
+ """Single embedding result."""
91
+ object: str = "embedding"
92
+ embedding: list[float] = field(default_factory=list)
93
+ index: int = 0
94
+
95
+ @dataclass
96
+ class EmbeddingResponse:
97
+ """Embeddings response."""
98
+ object: str = "list"
99
+ data: list[Embedding] = field(default_factory=list)
100
+ model: str = ""
101
+ usage: Usage = field(default_factory=Usage)
102
+
103
+ @dataclass
104
+ class Model:
105
+ """Model info."""
106
+ id: str = ""
107
+ object: str = "model"
108
+ created: int = 0
109
+ owned_by: str = "waterlight"
110
+
111
+ @dataclass
112
+ class ModelList:
113
+ """Models list response."""
114
+ object: str = "list"
115
+ data: list[Model] = field(default_factory=list)
116
+
117
+
118
+ # =============================================================================
119
+ # Resource Classes
120
+ # =============================================================================
121
+
122
+ class Completions:
123
+ """Chat completions resource — mirrors openai.chat.completions."""
124
+
125
+ def __init__(self, client: Waterlight):
126
+ self._client = client
127
+
128
+ def create(
129
+ self,
130
+ *,
131
+ model: str = "mist-1-turbo",
132
+ messages: list[dict[str, Any]],
133
+ temperature: float | None = None,
134
+ max_tokens: int | None = None,
135
+ stream: bool = False,
136
+ tools: list[dict] | None = None,
137
+ tool_choice: str | dict | None = None,
138
+ **kwargs: Any,
139
+ ) -> ChatCompletion | Iterator[ChatCompletionChunk]:
140
+ """Create a chat completion.
141
+
142
+ Args:
143
+ model: Model to use (mist-1, mist-1-turbo, mist-1-flash, etc.)
144
+ messages: List of message dicts with 'role' and 'content'
145
+ temperature: Sampling temperature (0-2)
146
+ max_tokens: Maximum tokens to generate
147
+ stream: If True, returns an iterator of chunks
148
+ tools: OpenAI-format tool definitions
149
+ tool_choice: Tool choice strategy
150
+
151
+ Returns:
152
+ ChatCompletion if stream=False, Iterator[ChatCompletionChunk] if stream=True
153
+ """
154
+ payload: dict[str, Any] = {
155
+ "model": model,
156
+ "messages": messages,
157
+ "stream": stream,
158
+ }
159
+ if temperature is not None:
160
+ payload["temperature"] = temperature
161
+ if max_tokens is not None:
162
+ payload["max_tokens"] = max_tokens
163
+ if tools is not None:
164
+ payload["tools"] = tools
165
+ if tool_choice is not None:
166
+ payload["tool_choice"] = tool_choice
167
+ payload.update(kwargs)
168
+
169
+ if stream:
170
+ return self._stream(payload)
171
+ return self._complete(payload)
172
+
173
+ def _complete(self, payload: dict) -> ChatCompletion:
174
+ body = self._client._post("/v1/chat/completions", payload)
175
+ choices = []
176
+ for c in body.get("choices", []):
177
+ msg = c.get("message", {})
178
+ choices.append(Choice(
179
+ index=c.get("index", 0),
180
+ message=Message(
181
+ role=msg.get("role", "assistant"),
182
+ content=msg.get("content"),
183
+ tool_calls=msg.get("tool_calls"),
184
+ ),
185
+ finish_reason=c.get("finish_reason"),
186
+ ))
187
+ usage_data = body.get("usage", {})
188
+ return ChatCompletion(
189
+ id=body.get("id", ""),
190
+ created=body.get("created", 0),
191
+ model=body.get("model", ""),
192
+ choices=choices,
193
+ usage=Usage(
194
+ prompt_tokens=usage_data.get("prompt_tokens", 0),
195
+ completion_tokens=usage_data.get("completion_tokens", 0),
196
+ total_tokens=usage_data.get("total_tokens", 0),
197
+ ),
198
+ )
199
+
200
+ def _stream(self, payload: dict) -> Iterator[ChatCompletionChunk]:
201
+ for event_data in self._client._post_stream("/v1/chat/completions", payload):
202
+ if event_data == "[DONE]":
203
+ return
204
+ try:
205
+ data = json.loads(event_data)
206
+ except json.JSONDecodeError:
207
+ continue
208
+ choices = []
209
+ for c in data.get("choices", []):
210
+ delta_data = c.get("delta", {})
211
+ choices.append(StreamChoice(
212
+ index=c.get("index", 0),
213
+ delta=Delta(
214
+ content=delta_data.get("content"),
215
+ tool_calls=delta_data.get("tool_calls"),
216
+ ),
217
+ finish_reason=c.get("finish_reason"),
218
+ ))
219
+ yield ChatCompletionChunk(
220
+ id=data.get("id", ""),
221
+ created=data.get("created", 0),
222
+ model=data.get("model", ""),
223
+ choices=choices,
224
+ )
225
+
226
+
227
+ class Chat:
228
+ """Chat namespace — mirrors openai.chat."""
229
+
230
+ def __init__(self, client: Waterlight):
231
+ self.completions = Completions(client)
232
+
233
+
234
+ class Embeddings:
235
+ """Embeddings resource — mirrors openai.embeddings."""
236
+
237
+ def __init__(self, client: Waterlight):
238
+ self._client = client
239
+
240
+ def create(
241
+ self,
242
+ *,
243
+ input: str | list[str],
244
+ model: str | None = None,
245
+ encoding_format: str = "float",
246
+ ) -> EmbeddingResponse:
247
+ """Create embeddings for the given input."""
248
+ payload: dict[str, Any] = {"input": input, "encoding_format": encoding_format}
249
+ if model is not None:
250
+ payload["model"] = model
251
+ body = self._client._post("/v1/embeddings", payload)
252
+ data = []
253
+ for item in body.get("data", []):
254
+ data.append(Embedding(
255
+ embedding=item.get("embedding", []),
256
+ index=item.get("index", 0),
257
+ ))
258
+ usage_data = body.get("usage", {})
259
+ return EmbeddingResponse(
260
+ data=data,
261
+ model=body.get("model", ""),
262
+ usage=Usage(
263
+ prompt_tokens=usage_data.get("prompt_tokens", 0),
264
+ completion_tokens=usage_data.get("completion_tokens", 0),
265
+ total_tokens=usage_data.get("total_tokens", 0),
266
+ ),
267
+ )
268
+
269
+
270
+ class Models:
271
+ """Models resource — mirrors openai.models."""
272
+
273
+ def __init__(self, client: Waterlight):
274
+ self._client = client
275
+
276
+ def list(self) -> ModelList:
277
+ """List available models."""
278
+ body = self._client._get("/v1/models")
279
+ models = []
280
+ for item in body.get("data", []):
281
+ models.append(Model(
282
+ id=item.get("id", ""),
283
+ created=item.get("created", 0),
284
+ owned_by=item.get("owned_by", "waterlight"),
285
+ ))
286
+ return ModelList(data=models)
287
+
288
+
289
+ class Billing:
290
+ """Billing resource — Waterlight-specific (not in OpenAI SDK)."""
291
+
292
+ def __init__(self, client: Waterlight):
293
+ self._client = client
294
+
295
+ def get(self) -> dict:
296
+ """Get billing info for the authenticated key.
297
+
298
+ Returns a dict with plan, billing_mode, spent_usd, balance_usd (PAYG),
299
+ remaining_usd (subscription), daily_limit, daily_used, etc.
300
+ """
301
+ return self._client._get("/v1/billing")
302
+
303
+
304
+ # =============================================================================
305
+ # Main Client
306
+ # =============================================================================
307
+
308
+ class Waterlight:
309
+ """Waterlight API client — OpenAI-compatible interface.
310
+
311
+ Usage:
312
+ client = Waterlight(api_key="wl-...")
313
+
314
+ # Chat completion
315
+ response = client.chat.completions.create(
316
+ model="mist-1-turbo",
317
+ messages=[{"role": "user", "content": "Hello"}],
318
+ )
319
+ print(response.choices[0].message.content)
320
+
321
+ # Streaming
322
+ stream = client.chat.completions.create(
323
+ model="mist-1-turbo",
324
+ messages=[{"role": "user", "content": "Tell me a story"}],
325
+ stream=True,
326
+ )
327
+ for chunk in stream:
328
+ if chunk.choices[0].delta.content:
329
+ print(chunk.choices[0].delta.content, end="", flush=True)
330
+
331
+ # Embeddings
332
+ emb = client.embeddings.create(input="Hello world")
333
+ print(len(emb.data[0].embedding))
334
+ """
335
+
336
+ DEFAULT_BASE_URL = "https://api.waterlight.ai"
337
+
338
+ def __init__(
339
+ self,
340
+ api_key: str | None = None,
341
+ base_url: str | None = None,
342
+ timeout: float = 120.0,
343
+ max_retries: int = 2,
344
+ ):
345
+ self.api_key = api_key or os.getenv("WATERLIGHT_API_KEY")
346
+ if not self.api_key:
347
+ raise ValueError(
348
+ "API key required. Pass api_key= or set WATERLIGHT_API_KEY env var. "
349
+ "Get your key at https://waterlight.ai"
350
+ )
351
+ self.base_url = (base_url or os.getenv("WATERLIGHT_BASE_URL") or self.DEFAULT_BASE_URL).rstrip("/")
352
+ self._timeout = timeout
353
+ self._max_retries = max_retries
354
+
355
+ self.chat = Chat(self)
356
+ self.embeddings = Embeddings(self)
357
+ self.models = Models(self)
358
+ self.billing = Billing(self)
359
+
360
+ def _headers(self) -> dict[str, str]:
361
+ return {
362
+ "Authorization": f"Bearer {self.api_key}",
363
+ "Content-Type": "application/json",
364
+ "User-Agent": "waterlight-python/0.1.0",
365
+ }
366
+
367
+ def _post(self, path: str, payload: dict) -> dict:
368
+ """POST JSON and return parsed response body."""
369
+ data = json.dumps(payload).encode("utf-8")
370
+ req = request.Request(
371
+ url=f"{self.base_url}{path}",
372
+ data=data,
373
+ headers=self._headers(),
374
+ method="POST",
375
+ )
376
+ return self._execute(req)
377
+
378
+ def _get(self, path: str) -> dict:
379
+ """GET and return parsed response body."""
380
+ req = request.Request(
381
+ url=f"{self.base_url}{path}",
382
+ headers=self._headers(),
383
+ method="GET",
384
+ )
385
+ return self._execute(req)
386
+
387
+ def _post_stream(self, path: str, payload: dict) -> Iterator[str]:
388
+ """POST and yield SSE event data strings."""
389
+ data = json.dumps(payload).encode("utf-8")
390
+ headers = self._headers()
391
+ headers["Accept"] = "text/event-stream"
392
+ req = request.Request(
393
+ url=f"{self.base_url}{path}",
394
+ data=data,
395
+ headers=headers,
396
+ method="POST",
397
+ )
398
+ try:
399
+ with request.urlopen(req, timeout=self._timeout) as resp:
400
+ for raw_line in resp:
401
+ line = raw_line.decode("utf-8").rstrip("\r\n")
402
+ if line.startswith("data: "):
403
+ yield line[6:]
404
+ except urlerror.HTTPError as exc:
405
+ self._handle_http_error(exc)
406
+
407
+ def _execute(self, req: request.Request) -> dict:
408
+ """Execute request with retry on transient failures."""
409
+ attempt = 0
410
+ while True:
411
+ try:
412
+ with request.urlopen(req, timeout=self._timeout) as resp:
413
+ return json.loads(resp.read().decode("utf-8") or "{}")
414
+ except urlerror.HTTPError as exc:
415
+ status = exc.code
416
+ if status in (429, 500, 502, 503, 504) and attempt < self._max_retries:
417
+ retry_after = exc.headers.get("Retry-After") if exc.headers else None
418
+ delay = float(retry_after) if retry_after else 0.5 * (2 ** attempt)
419
+ time.sleep(delay)
420
+ attempt += 1
421
+ continue
422
+ self._handle_http_error(exc)
423
+ except urlerror.URLError as exc:
424
+ raise APIError(f"Network error: {exc.reason}", status_code=503)
425
+
426
+ def _handle_http_error(self, exc: urlerror.HTTPError) -> None:
427
+ """Parse error body and raise appropriate exception."""
428
+ body = ""
429
+ try:
430
+ body = exc.read().decode("utf-8") if exc.fp else ""
431
+ except Exception:
432
+ pass
433
+ message = "Request failed"
434
+ request_id = None
435
+ retry_after = None
436
+ if body:
437
+ try:
438
+ data = json.loads(body)
439
+ if isinstance(data, dict):
440
+ err = data.get("error", {})
441
+ if isinstance(err, dict):
442
+ message = err.get("message", message)
443
+ elif isinstance(err, str):
444
+ message = err
445
+ elif "message" in data:
446
+ message = data["message"]
447
+ except json.JSONDecodeError:
448
+ message = body
449
+ if exc.headers:
450
+ request_id = exc.headers.get("X-Request-Id")
451
+ ra = exc.headers.get("Retry-After")
452
+ if ra:
453
+ try:
454
+ retry_after = float(ra)
455
+ except ValueError:
456
+ pass
457
+ raise _map_status(exc.code, message, request_id=request_id, retry_after=retry_after)
@@ -0,0 +1,60 @@
1
+ """Waterlight SDK error hierarchy."""
2
+
3
+ from __future__ import annotations
4
+
5
+
6
+ class WaterlightError(Exception):
7
+ """Base exception for all Waterlight SDK errors."""
8
+
9
+ def __init__(
10
+ self,
11
+ message: str,
12
+ status_code: int | None = None,
13
+ request_id: str | None = None,
14
+ ):
15
+ super().__init__(message)
16
+ self.message = message
17
+ self.status_code = status_code
18
+ self.request_id = request_id
19
+
20
+
21
+ class AuthenticationError(WaterlightError):
22
+ """Invalid or missing API key."""
23
+ pass
24
+
25
+
26
+ class RateLimitError(WaterlightError):
27
+ """Rate limit exceeded."""
28
+
29
+ def __init__(
30
+ self,
31
+ message: str,
32
+ status_code: int | None = None,
33
+ request_id: str | None = None,
34
+ retry_after: float | None = None,
35
+ ):
36
+ super().__init__(message, status_code=status_code, request_id=request_id)
37
+ self.retry_after = retry_after
38
+
39
+
40
+ class InsufficientCreditsError(WaterlightError):
41
+ """Account has insufficient credits."""
42
+ pass
43
+
44
+
45
+ class APIError(WaterlightError):
46
+ """Server-side error."""
47
+ pass
48
+
49
+
50
+ def _map_status(status: int, message: str, request_id: str | None = None,
51
+ retry_after: float | None = None) -> WaterlightError:
52
+ """Map HTTP status to the appropriate error class."""
53
+ if status == 401:
54
+ return AuthenticationError(message, status_code=status, request_id=request_id)
55
+ if status == 402:
56
+ return InsufficientCreditsError(message, status_code=status, request_id=request_id)
57
+ if status == 429:
58
+ return RateLimitError(message, status_code=status, request_id=request_id,
59
+ retry_after=retry_after)
60
+ return APIError(message, status_code=status, request_id=request_id)