alloyai-client 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alloyai_client/__init__.py +11 -0
- alloyai_client/alloyai_client.py +214 -0
- alloyai_client/types.py +233 -0
- alloyai_client-0.1.1.dist-info/METADATA +89 -0
- alloyai_client-0.1.1.dist-info/RECORD +8 -0
- alloyai_client-0.1.1.dist-info/WHEEL +5 -0
- alloyai_client-0.1.1.dist-info/licenses/LICENSE +21 -0
- alloyai_client-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import base64
|
|
4
|
+
import json
|
|
5
|
+
import urllib.error
|
|
6
|
+
import urllib.request
|
|
7
|
+
from typing import (
|
|
8
|
+
Any,
|
|
9
|
+
Callable,
|
|
10
|
+
Dict,
|
|
11
|
+
Iterable,
|
|
12
|
+
Iterator,
|
|
13
|
+
Literal,
|
|
14
|
+
Mapping,
|
|
15
|
+
Optional,
|
|
16
|
+
Sequence,
|
|
17
|
+
Union,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
from .types import ChatResponse, JsonSchemaValue, Message, Tool
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AlloyClientError(RuntimeError):
|
|
24
|
+
def __init__(self, status_code: int, message: str, body: Optional[str] = None) -> None:
|
|
25
|
+
super().__init__(f"HTTP {status_code}: {message}")
|
|
26
|
+
self.status_code = status_code
|
|
27
|
+
self.body = body
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class AlloyClient:
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
base_url: str = "http://127.0.0.1:8000",
|
|
34
|
+
*,
|
|
35
|
+
timeout_s: float = 300.0,
|
|
36
|
+
) -> None:
|
|
37
|
+
self._base_url = base_url.rstrip("/")
|
|
38
|
+
self._timeout_s = timeout_s
|
|
39
|
+
|
|
40
|
+
def image(
|
|
41
|
+
self,
|
|
42
|
+
model_id: str,
|
|
43
|
+
prompt: Any,
|
|
44
|
+
*,
|
|
45
|
+
stream: bool = False,
|
|
46
|
+
decode_images: bool = True,
|
|
47
|
+
timeout_s: Optional[float] = None,
|
|
48
|
+
**params: Any,
|
|
49
|
+
) -> Union[Dict[str, Any], Iterator[Dict[str, Any]]]:
|
|
50
|
+
payload: Dict[str, Any] = {
|
|
51
|
+
"model_id": model_id,
|
|
52
|
+
"prompt": prompt,
|
|
53
|
+
"stream": stream,
|
|
54
|
+
}
|
|
55
|
+
payload.update(params)
|
|
56
|
+
|
|
57
|
+
if stream:
|
|
58
|
+
response = self._post("/image", payload, stream=True, timeout_s=timeout_s)
|
|
59
|
+
return self._stream_events(response, decode_images)
|
|
60
|
+
|
|
61
|
+
response = self._post("/image", payload, stream=False, timeout_s=timeout_s)
|
|
62
|
+
with response:
|
|
63
|
+
data = self._read_json(response)
|
|
64
|
+
return self._maybe_decode_images(data, decode_images)
|
|
65
|
+
|
|
66
|
+
def chat(
|
|
67
|
+
self,
|
|
68
|
+
model: str,
|
|
69
|
+
messages: Optional[Sequence[Union[Mapping[str, Any], Message]]],
|
|
70
|
+
*,
|
|
71
|
+
think: bool | Literal["low", "medium", "high"] | None = None,
|
|
72
|
+
tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None,
|
|
73
|
+
options: Optional[Dict[str, Any]] = None,
|
|
74
|
+
stream: bool = False,
|
|
75
|
+
format: Optional[Union[JsonSchemaValue, Literal["", "json"]]] = None,
|
|
76
|
+
keep_alive: float | str | None = None,
|
|
77
|
+
) -> Union[Iterator[ChatResponse], ChatResponse]:
|
|
78
|
+
if stream:
|
|
79
|
+
raise ValueError("Streaming chat is not supported yet")
|
|
80
|
+
|
|
81
|
+
payload: Dict[str, Any] = {
|
|
82
|
+
"model": model,
|
|
83
|
+
"messages": messages,
|
|
84
|
+
"stream": stream,
|
|
85
|
+
}
|
|
86
|
+
if think is not None:
|
|
87
|
+
payload["think"] = think
|
|
88
|
+
if tools is not None:
|
|
89
|
+
payload["tools"] = tools
|
|
90
|
+
if options is not None:
|
|
91
|
+
payload["options"] = options
|
|
92
|
+
if format is not None:
|
|
93
|
+
payload["format"] = format
|
|
94
|
+
if keep_alive is not None:
|
|
95
|
+
payload["keep_alive"] = keep_alive
|
|
96
|
+
|
|
97
|
+
response = self._post("/chat", payload, stream=False, timeout_s=None)
|
|
98
|
+
with response:
|
|
99
|
+
return self._read_json(response)
|
|
100
|
+
|
|
101
|
+
def audio(
|
|
102
|
+
self,
|
|
103
|
+
model_id: str,
|
|
104
|
+
text: Any,
|
|
105
|
+
*,
|
|
106
|
+
language: Any = None,
|
|
107
|
+
speaker: Any = None,
|
|
108
|
+
instruct: Any = None,
|
|
109
|
+
ref_audio: Any = None,
|
|
110
|
+
ref_text: Any = None,
|
|
111
|
+
stream: bool = False,
|
|
112
|
+
keep_alive: float | str | None = None,
|
|
113
|
+
timeout_s: Optional[float] = None,
|
|
114
|
+
) -> Dict[str, Any]:
|
|
115
|
+
if stream:
|
|
116
|
+
raise ValueError("Streaming audio is not supported yet")
|
|
117
|
+
|
|
118
|
+
payload: Dict[str, Any] = {
|
|
119
|
+
"model_id": model_id,
|
|
120
|
+
"text": text,
|
|
121
|
+
"stream": stream,
|
|
122
|
+
}
|
|
123
|
+
if language is not None:
|
|
124
|
+
payload["language"] = language
|
|
125
|
+
if speaker is not None:
|
|
126
|
+
payload["speaker"] = speaker
|
|
127
|
+
if instruct is not None:
|
|
128
|
+
payload["instruct"] = instruct
|
|
129
|
+
if ref_audio is not None:
|
|
130
|
+
payload["ref_audio"] = ref_audio
|
|
131
|
+
if ref_text is not None:
|
|
132
|
+
payload["ref_text"] = ref_text
|
|
133
|
+
if keep_alive is not None:
|
|
134
|
+
payload["keep_alive"] = keep_alive
|
|
135
|
+
|
|
136
|
+
response = self._post("/audio", payload, stream=False, timeout_s=timeout_s)
|
|
137
|
+
with response:
|
|
138
|
+
return self._read_json(response)
|
|
139
|
+
|
|
140
|
+
def _post(
|
|
141
|
+
self,
|
|
142
|
+
path: str,
|
|
143
|
+
payload: Dict[str, Any],
|
|
144
|
+
*,
|
|
145
|
+
stream: bool,
|
|
146
|
+
timeout_s: Optional[float],
|
|
147
|
+
):
|
|
148
|
+
url = f"{self._base_url}{path}"
|
|
149
|
+
data = json.dumps(payload).encode("utf-8")
|
|
150
|
+
headers = {
|
|
151
|
+
"Content-Type": "application/json",
|
|
152
|
+
"Accept": "text/event-stream" if stream else "application/json",
|
|
153
|
+
}
|
|
154
|
+
request = urllib.request.Request(url, data=data, method="POST", headers=headers)
|
|
155
|
+
timeout = self._timeout_s if timeout_s is None else timeout_s
|
|
156
|
+
try:
|
|
157
|
+
return urllib.request.urlopen(request, timeout=timeout)
|
|
158
|
+
except urllib.error.HTTPError as exc:
|
|
159
|
+
body = exc.read().decode("utf-8", "ignore")
|
|
160
|
+
message = body or exc.reason
|
|
161
|
+
raise AlloyClientError(exc.code, message, body=body) from None
|
|
162
|
+
|
|
163
|
+
def _read_json(self, response) -> Dict[str, Any]:
|
|
164
|
+
raw = response.read()
|
|
165
|
+
if not raw:
|
|
166
|
+
return {}
|
|
167
|
+
return json.loads(raw.decode("utf-8"))
|
|
168
|
+
|
|
169
|
+
def _maybe_decode_images(self, data: Dict[str, Any], decode_images: bool) -> Dict[str, Any]:
|
|
170
|
+
if not decode_images:
|
|
171
|
+
return data
|
|
172
|
+
images = data.get("images")
|
|
173
|
+
if isinstance(images, list):
|
|
174
|
+
data = dict(data)
|
|
175
|
+
data["images"] = [base64.b64decode(item) for item in images]
|
|
176
|
+
return data
|
|
177
|
+
|
|
178
|
+
def _stream_events(
|
|
179
|
+
self,
|
|
180
|
+
response,
|
|
181
|
+
decode_images: bool,
|
|
182
|
+
) -> Iterator[Dict[str, Any]]:
|
|
183
|
+
try:
|
|
184
|
+
for event in self._iter_sse(response):
|
|
185
|
+
payload = event.get("payload")
|
|
186
|
+
if decode_images and isinstance(payload, dict):
|
|
187
|
+
images = payload.get("images")
|
|
188
|
+
if isinstance(images, list):
|
|
189
|
+
payload = dict(payload)
|
|
190
|
+
payload["images"] = [base64.b64decode(item) for item in images]
|
|
191
|
+
event = dict(event)
|
|
192
|
+
event["payload"] = payload
|
|
193
|
+
yield event
|
|
194
|
+
finally:
|
|
195
|
+
response.close()
|
|
196
|
+
|
|
197
|
+
def _iter_sse(self, response) -> Iterable[Dict[str, Any]]:
|
|
198
|
+
event: Dict[str, Any] = {}
|
|
199
|
+
for raw_line in response:
|
|
200
|
+
line = raw_line.decode("utf-8", "ignore").strip()
|
|
201
|
+
if not line:
|
|
202
|
+
if event:
|
|
203
|
+
yield event
|
|
204
|
+
event = {}
|
|
205
|
+
continue
|
|
206
|
+
if line.startswith("data:"):
|
|
207
|
+
data = line[len("data:") :].strip()
|
|
208
|
+
if not data:
|
|
209
|
+
continue
|
|
210
|
+
event["payload"] = json.loads(data)
|
|
211
|
+
elif line.startswith("event:"):
|
|
212
|
+
event["event"] = line[len("event:") :].strip()
|
|
213
|
+
if event:
|
|
214
|
+
yield event
|
alloyai_client/types.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import (
|
|
4
|
+
Any,
|
|
5
|
+
Dict,
|
|
6
|
+
Literal,
|
|
7
|
+
Mapping,
|
|
8
|
+
Optional,
|
|
9
|
+
Sequence,
|
|
10
|
+
Union,
|
|
11
|
+
)
|
|
12
|
+
from pydantic import (
|
|
13
|
+
BaseModel,
|
|
14
|
+
ConfigDict,
|
|
15
|
+
Field,
|
|
16
|
+
)
|
|
17
|
+
from PIL.Image import Image
|
|
18
|
+
|
|
19
|
+
JsonSchemaValue = Dict[str, Any]
|
|
20
|
+
|
|
21
|
+
class SubscriptableBaseModel(BaseModel):
|
|
22
|
+
def __getitem__(self, key: str) -> Any:
|
|
23
|
+
"""
|
|
24
|
+
>>> msg = Message(role='user')
|
|
25
|
+
>>> msg['role']
|
|
26
|
+
'user'
|
|
27
|
+
>>> msg = Message(role='user')
|
|
28
|
+
>>> msg['nonexistent']
|
|
29
|
+
Traceback (most recent call last):
|
|
30
|
+
KeyError: 'nonexistent'
|
|
31
|
+
"""
|
|
32
|
+
if key in self:
|
|
33
|
+
return getattr(self, key)
|
|
34
|
+
|
|
35
|
+
raise KeyError(key)
|
|
36
|
+
|
|
37
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
38
|
+
"""
|
|
39
|
+
>>> msg = Message(role='user')
|
|
40
|
+
>>> msg['role'] = 'assistant'
|
|
41
|
+
>>> msg['role']
|
|
42
|
+
'assistant'
|
|
43
|
+
>>> tool_call = Message.ToolCall(function=Message.ToolCall.Function(name='foo', arguments={}))
|
|
44
|
+
>>> msg = Message(role='user', content='hello')
|
|
45
|
+
>>> msg['tool_calls'] = [tool_call]
|
|
46
|
+
>>> msg['tool_calls'][0]['function']['name']
|
|
47
|
+
'foo'
|
|
48
|
+
"""
|
|
49
|
+
setattr(self, key, value)
|
|
50
|
+
|
|
51
|
+
def __contains__(self, key: str) -> bool:
|
|
52
|
+
"""
|
|
53
|
+
>>> msg = Message(role='user')
|
|
54
|
+
>>> 'nonexistent' in msg
|
|
55
|
+
False
|
|
56
|
+
>>> 'role' in msg
|
|
57
|
+
True
|
|
58
|
+
>>> 'content' in msg
|
|
59
|
+
False
|
|
60
|
+
>>> msg.content = 'hello!'
|
|
61
|
+
>>> 'content' in msg
|
|
62
|
+
True
|
|
63
|
+
>>> msg = Message(role='user', content='hello!')
|
|
64
|
+
>>> 'content' in msg
|
|
65
|
+
True
|
|
66
|
+
>>> 'tool_calls' in msg
|
|
67
|
+
False
|
|
68
|
+
>>> msg['tool_calls'] = []
|
|
69
|
+
>>> 'tool_calls' in msg
|
|
70
|
+
True
|
|
71
|
+
>>> msg['tool_calls'] = [Message.ToolCall(function=Message.ToolCall.Function(name='foo', arguments={}))]
|
|
72
|
+
>>> 'tool_calls' in msg
|
|
73
|
+
True
|
|
74
|
+
>>> msg['tool_calls'] = None
|
|
75
|
+
>>> 'tool_calls' in msg
|
|
76
|
+
True
|
|
77
|
+
>>> tool = Tool()
|
|
78
|
+
>>> 'type' in tool
|
|
79
|
+
True
|
|
80
|
+
"""
|
|
81
|
+
if key in self.model_fields_set:
|
|
82
|
+
return True
|
|
83
|
+
|
|
84
|
+
if value := self.__class__.model_fields.get(key):
|
|
85
|
+
return value.default is not None
|
|
86
|
+
|
|
87
|
+
return False
|
|
88
|
+
|
|
89
|
+
def get(self, key: str, default: Any = None) -> Any:
|
|
90
|
+
"""
|
|
91
|
+
>>> msg = Message(role='user')
|
|
92
|
+
>>> msg.get('role')
|
|
93
|
+
'user'
|
|
94
|
+
>>> msg = Message(role='user')
|
|
95
|
+
>>> msg.get('nonexistent')
|
|
96
|
+
>>> msg = Message(role='user')
|
|
97
|
+
>>> msg.get('nonexistent', 'default')
|
|
98
|
+
'default'
|
|
99
|
+
>>> msg = Message(role='user', tool_calls=[ Message.ToolCall(function=Message.ToolCall.Function(name='foo', arguments={}))])
|
|
100
|
+
>>> msg.get('tool_calls')[0]['function']['name']
|
|
101
|
+
'foo'
|
|
102
|
+
"""
|
|
103
|
+
return getattr(self, key) if hasattr(self, key) else default
|
|
104
|
+
|
|
105
|
+
class Message(SubscriptableBaseModel):
|
|
106
|
+
"""
|
|
107
|
+
Chat message.
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
role: str
|
|
111
|
+
"Assumed role of the message. Response messages has role 'assistant' or 'tool'."
|
|
112
|
+
|
|
113
|
+
content: Optional[str] = None
|
|
114
|
+
'Content of the message. Response messages contains message fragments when streaming.'
|
|
115
|
+
|
|
116
|
+
thinking: Optional[str] = None
|
|
117
|
+
'Thinking content. Only present when thinking is enabled.'
|
|
118
|
+
|
|
119
|
+
images: Optional[Sequence[Image]] = None
|
|
120
|
+
"""
|
|
121
|
+
Optional list of image data for multimodal models.
|
|
122
|
+
|
|
123
|
+
Valid input types are:
|
|
124
|
+
|
|
125
|
+
- `str` or path-like object: path to image file
|
|
126
|
+
- `bytes` or bytes-like object: raw image data
|
|
127
|
+
|
|
128
|
+
Valid image formats depend on the model. See the model card for more information.
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
tool_name: Optional[str] = None
|
|
132
|
+
'Name of the executed tool.'
|
|
133
|
+
|
|
134
|
+
class ToolCall(SubscriptableBaseModel):
|
|
135
|
+
"""
|
|
136
|
+
Model tool calls.
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
class Function(SubscriptableBaseModel):
|
|
140
|
+
"""
|
|
141
|
+
Tool call function.
|
|
142
|
+
"""
|
|
143
|
+
|
|
144
|
+
name: str
|
|
145
|
+
'Name of the function.'
|
|
146
|
+
|
|
147
|
+
arguments: Mapping[str, Any]
|
|
148
|
+
'Arguments of the function.'
|
|
149
|
+
|
|
150
|
+
function: Function
|
|
151
|
+
'Function to be called.'
|
|
152
|
+
|
|
153
|
+
tool_calls: Optional[Sequence[ToolCall]] = None
|
|
154
|
+
"""
|
|
155
|
+
Tools calls to be made by the model.
|
|
156
|
+
"""
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
class Tool(SubscriptableBaseModel):
|
|
160
|
+
type: Optional[str] = 'function'
|
|
161
|
+
|
|
162
|
+
class Function(SubscriptableBaseModel):
|
|
163
|
+
name: Optional[str] = None
|
|
164
|
+
description: Optional[str] = None
|
|
165
|
+
|
|
166
|
+
class Parameters(SubscriptableBaseModel):
|
|
167
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
168
|
+
type: Optional[Literal['object']] = 'object'
|
|
169
|
+
defs: Optional[Any] = Field(None, alias='$defs')
|
|
170
|
+
items: Optional[Any] = None
|
|
171
|
+
required: Optional[Sequence[str]] = None
|
|
172
|
+
|
|
173
|
+
class Property(SubscriptableBaseModel):
|
|
174
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
175
|
+
|
|
176
|
+
type: Optional[Union[str, Sequence[str]]] = None
|
|
177
|
+
items: Optional[Any] = None
|
|
178
|
+
description: Optional[str] = None
|
|
179
|
+
enum: Optional[Sequence[Any]] = None
|
|
180
|
+
|
|
181
|
+
properties: Optional[Mapping[str, Property]] = None
|
|
182
|
+
|
|
183
|
+
parameters: Optional[Parameters] = None
|
|
184
|
+
|
|
185
|
+
function: Optional[Function] = None
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class BaseGenerateResponse(SubscriptableBaseModel):
|
|
189
|
+
model: Optional[str] = None
|
|
190
|
+
'Model used to generate response.'
|
|
191
|
+
|
|
192
|
+
created_at: Optional[str] = None
|
|
193
|
+
'Time when the request was created.'
|
|
194
|
+
|
|
195
|
+
done: Optional[bool] = None
|
|
196
|
+
'True if response is complete, otherwise False. Useful for streaming to detect the final response.'
|
|
197
|
+
|
|
198
|
+
done_reason: Optional[str] = None
|
|
199
|
+
'Reason for completion. Only present when done is True.'
|
|
200
|
+
|
|
201
|
+
total_duration: Optional[int] = None
|
|
202
|
+
'Total duration in nanoseconds.'
|
|
203
|
+
|
|
204
|
+
load_duration: Optional[int] = None
|
|
205
|
+
'Load duration in nanoseconds.'
|
|
206
|
+
|
|
207
|
+
prompt_eval_count: Optional[int] = None
|
|
208
|
+
'Number of tokens evaluated in the prompt.'
|
|
209
|
+
|
|
210
|
+
prompt_eval_duration: Optional[int] = None
|
|
211
|
+
'Duration of evaluating the prompt in nanoseconds.'
|
|
212
|
+
|
|
213
|
+
eval_count: Optional[int] = None
|
|
214
|
+
'Number of tokens evaluated in inference.'
|
|
215
|
+
|
|
216
|
+
eval_duration: Optional[int] = None
|
|
217
|
+
'Duration of evaluating inference in nanoseconds.'
|
|
218
|
+
|
|
219
|
+
class EmbedResponse(BaseGenerateResponse):
|
|
220
|
+
"""
|
|
221
|
+
Response returned by embed requests.
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
embeddings: Sequence[Sequence[float]]
|
|
225
|
+
'Embeddings of the inputs.'
|
|
226
|
+
|
|
227
|
+
class ChatResponse(BaseGenerateResponse):
|
|
228
|
+
"""
|
|
229
|
+
Response returned by chat requests.
|
|
230
|
+
"""
|
|
231
|
+
|
|
232
|
+
message: Message
|
|
233
|
+
'Response message.'
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: alloyai-client
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: Python client for the Alloy server
|
|
5
|
+
License: MIT License
|
|
6
|
+
|
|
7
|
+
Copyright (c) 2026 Alloy Contributors
|
|
8
|
+
|
|
9
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
10
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
11
|
+
in the Software without restriction, including without limitation the rights
|
|
12
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
13
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
14
|
+
furnished to do so, subject to the following conditions:
|
|
15
|
+
|
|
16
|
+
The above copyright notice and this permission notice shall be included in all
|
|
17
|
+
copies or substantial portions of the Software.
|
|
18
|
+
|
|
19
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
20
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
21
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
22
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
23
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
24
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
25
|
+
SOFTWARE.
|
|
26
|
+
|
|
27
|
+
Requires-Python: >=3.10
|
|
28
|
+
Description-Content-Type: text/markdown
|
|
29
|
+
License-File: LICENSE
|
|
30
|
+
Requires-Dist: pydantic<3,>=2.12.5
|
|
31
|
+
Requires-Dist: pillow<13,>=12.1.0
|
|
32
|
+
Dynamic: license-file
|
|
33
|
+
|
|
34
|
+
# alloy-client
|
|
35
|
+
|
|
36
|
+
A lightweight Python client for the Alloy server.
|
|
37
|
+
|
|
38
|
+
## Install
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
pip install alloyai-client
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
For local development:
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pip install -e .
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Quick start
|
|
51
|
+
|
|
52
|
+
```python
|
|
53
|
+
from alloy_client import AlloyClient
|
|
54
|
+
|
|
55
|
+
client = AlloyClient("http://127.0.0.1:8000")
|
|
56
|
+
|
|
57
|
+
# Images
|
|
58
|
+
result = client.image(model_id="qwen-image", prompt="a cinematic portrait")
|
|
59
|
+
with open("output.png", "wb") as f:
|
|
60
|
+
f.write(result["images"][0])
|
|
61
|
+
|
|
62
|
+
# Chat (non-streaming)
|
|
63
|
+
response = client.chat(
|
|
64
|
+
model="qwen3-medium",
|
|
65
|
+
messages=[{"role": "user", "content": "Hello!"}],
|
|
66
|
+
)
|
|
67
|
+
print(response["message"]["content"])
|
|
68
|
+
|
|
69
|
+
# Audio (non-streaming)
|
|
70
|
+
audio = client.audio(
|
|
71
|
+
model_id="qwen3-tts-base",
|
|
72
|
+
text="Hello from Alloy",
|
|
73
|
+
)
|
|
74
|
+
print(audio["sample_rate"], len(audio["outputs"]))
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## Types
|
|
78
|
+
|
|
79
|
+
The client re-exports minimal Ollama-style types so you can annotate inputs without
|
|
80
|
+
pulling in the `ollama` dependency:
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from alloy_client import Message, JsonSchemaValue
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Notes
|
|
87
|
+
|
|
88
|
+
- Streaming is supported for `/image` only.
|
|
89
|
+
- `/chat` and `/audio` are non-streaming for now.
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
alloyai_client/__init__.py,sha256=Ni3bePA04S27k8rMeLnFvpCXds3gGhZbeiwuBZL3LsI,248
|
|
2
|
+
alloyai_client/alloyai_client.py,sha256=5efrnQL2-vp1Vhain7W-Sylj4nYk-jNj9gp5fwC0uqU,6916
|
|
3
|
+
alloyai_client/types.py,sha256=W4hj2tUwwMWlkEV9D7p60DAJMyAdnEOKVf4LCxwS4wU,6390
|
|
4
|
+
alloyai_client-0.1.1.dist-info/licenses/LICENSE,sha256=SAwc119ZCsRjxVu0qvjeRQEENDLW4sl0suNPdyOIbns,1075
|
|
5
|
+
alloyai_client-0.1.1.dist-info/METADATA,sha256=FE1c6VIH-9E4dEOZDtbj00LD-qqU8yrRxU_7hqKNBP4,2604
|
|
6
|
+
alloyai_client-0.1.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
7
|
+
alloyai_client-0.1.1.dist-info/top_level.txt,sha256=oM1nqpgWdx1ZT5ydfq5Stea7wfXSGeuFgFUb92IoP1M,15
|
|
8
|
+
alloyai_client-0.1.1.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Alloy Contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
alloyai_client
|