livellm 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- livellm/livellm.py +456 -29
- livellm/models/agent/agent.py +3 -4
- livellm/models/agent/chat.py +5 -6
- livellm/models/agent/tools.py +1 -1
- livellm/models/audio/speak.py +1 -1
- livellm/models/audio/transcribe.py +7 -8
- livellm/models/fallback.py +3 -3
- livellm-1.2.0.dist-info/METADATA +497 -0
- livellm-1.2.0.dist-info/RECORD +17 -0
- livellm-1.1.0.dist-info/METADATA +0 -573
- livellm-1.1.0.dist-info/RECORD +0 -17
- {livellm-1.1.0.dist-info → livellm-1.2.0.dist-info}/WHEEL +0 -0
- {livellm-1.1.0.dist-info → livellm-1.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,497 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: livellm
|
|
3
|
+
Version: 1.2.0
|
|
4
|
+
Summary: Python client for the LiveLLM Server
|
|
5
|
+
Project-URL: Homepage, https://github.com/qalby-tech/livellm-client-py
|
|
6
|
+
Project-URL: Repository, https://github.com/qalby-tech/livellm-client-py
|
|
7
|
+
Author: Kamil Saliamov
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Classifier: Development Status :: 3 - Alpha
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Typing :: Typed
|
|
17
|
+
Requires-Python: >=3.10
|
|
18
|
+
Requires-Dist: httpx>=0.27.0
|
|
19
|
+
Requires-Dist: pydantic>=2.0.0
|
|
20
|
+
Provides-Extra: testing
|
|
21
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == 'testing'
|
|
22
|
+
Requires-Dist: pytest-cov>=4.1.0; extra == 'testing'
|
|
23
|
+
Requires-Dist: pytest>=8.4.2; extra == 'testing'
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
|
|
26
|
+
# LiveLLM Python Client
|
|
27
|
+
|
|
28
|
+
[](https://www.python.org/downloads/)
|
|
29
|
+
[](https://opensource.org/licenses/MIT)
|
|
30
|
+
|
|
31
|
+
Python client library for the LiveLLM Server - a unified proxy for AI agent, audio, and transcription services.
|
|
32
|
+
|
|
33
|
+
## Features
|
|
34
|
+
|
|
35
|
+
- 🚀 **Async-first** - Built on httpx for high-performance operations
|
|
36
|
+
- 🔒 **Type-safe** - Full type hints and Pydantic validation
|
|
37
|
+
- 🎯 **Multi-provider** - OpenAI, Google, Anthropic, Groq, ElevenLabs
|
|
38
|
+
- 🔄 **Streaming** - Real-time streaming for agent and audio
|
|
39
|
+
- 🛠️ **Flexible API** - Use request objects or keyword arguments
|
|
40
|
+
- 🎙️ **Audio services** - Text-to-speech and transcription
|
|
41
|
+
- ⚡ **Fallback strategies** - Sequential and parallel handling
|
|
42
|
+
- 🧹 **Auto cleanup** - Context managers and garbage collection
|
|
43
|
+
|
|
44
|
+
## Installation
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pip install livellm
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Or with development dependencies:
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
pip install livellm[testing]
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Quick Start
|
|
57
|
+
|
|
58
|
+
```python
|
|
59
|
+
import asyncio
|
|
60
|
+
from livellm import LivellmClient
|
|
61
|
+
from livellm.models import Settings, ProviderKind, TextMessage, MessageRole
|
|
62
|
+
|
|
63
|
+
async def main():
|
|
64
|
+
# Initialize with automatic provider setup
|
|
65
|
+
async with LivellmClient(
|
|
66
|
+
base_url="http://localhost:8000",
|
|
67
|
+
configs=[
|
|
68
|
+
Settings(
|
|
69
|
+
uid="openai",
|
|
70
|
+
provider=ProviderKind.OPENAI,
|
|
71
|
+
api_key="your-api-key"
|
|
72
|
+
)
|
|
73
|
+
]
|
|
74
|
+
) as client:
|
|
75
|
+
# Simple keyword arguments style (gen_config as kwargs)
|
|
76
|
+
response = await client.agent_run(
|
|
77
|
+
provider_uid="openai",
|
|
78
|
+
model="gpt-4",
|
|
79
|
+
messages=[TextMessage(role="user", content="Hello!")],
|
|
80
|
+
temperature=0.7
|
|
81
|
+
)
|
|
82
|
+
print(response.output)
|
|
83
|
+
|
|
84
|
+
asyncio.run(main())
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## Configuration
|
|
88
|
+
|
|
89
|
+
### Client Initialization
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
from livellm import LivellmClient
|
|
93
|
+
from livellm.models import Settings, ProviderKind
|
|
94
|
+
|
|
95
|
+
# Basic
|
|
96
|
+
client = LivellmClient(base_url="http://localhost:8000")
|
|
97
|
+
|
|
98
|
+
# With timeout and pre-configured providers
|
|
99
|
+
client = LivellmClient(
|
|
100
|
+
base_url="http://localhost:8000",
|
|
101
|
+
timeout=30.0,
|
|
102
|
+
configs=[
|
|
103
|
+
Settings(
|
|
104
|
+
uid="openai",
|
|
105
|
+
provider=ProviderKind.OPENAI,
|
|
106
|
+
api_key="sk-...",
|
|
107
|
+
base_url="https://api.openai.com/v1" # Optional
|
|
108
|
+
),
|
|
109
|
+
Settings(
|
|
110
|
+
uid="anthropic",
|
|
111
|
+
provider=ProviderKind.ANTHROPIC,
|
|
112
|
+
api_key="sk-ant-...",
|
|
113
|
+
blacklist_models=["claude-instant-1"] # Optional
|
|
114
|
+
)
|
|
115
|
+
]
|
|
116
|
+
)
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### Supported Providers
|
|
120
|
+
|
|
121
|
+
`OPENAI` • `GOOGLE` • `ANTHROPIC` • `GROQ` • `ELEVENLABS`
|
|
122
|
+
|
|
123
|
+
```python
|
|
124
|
+
# Add provider dynamically
|
|
125
|
+
await client.update_config(Settings(
|
|
126
|
+
uid="my-provider",
|
|
127
|
+
provider=ProviderKind.OPENAI,
|
|
128
|
+
api_key="your-api-key"
|
|
129
|
+
))
|
|
130
|
+
|
|
131
|
+
# List and delete
|
|
132
|
+
configs = await client.get_configs()
|
|
133
|
+
await client.delete_config("my-provider")
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
## Usage Examples
|
|
137
|
+
|
|
138
|
+
### Agent Services
|
|
139
|
+
|
|
140
|
+
#### Two Ways to Call Methods
|
|
141
|
+
|
|
142
|
+
All methods support **two calling styles**:
|
|
143
|
+
|
|
144
|
+
**Style 1: Keyword arguments** (kwargs become `gen_config`)
|
|
145
|
+
```python
|
|
146
|
+
response = await client.agent_run(
|
|
147
|
+
provider_uid="openai",
|
|
148
|
+
model="gpt-4",
|
|
149
|
+
messages=[TextMessage(role="user", content="Hello!")],
|
|
150
|
+
temperature=0.7,
|
|
151
|
+
max_tokens=500
|
|
152
|
+
)
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
**Style 2: Request objects**
|
|
156
|
+
```python
|
|
157
|
+
from livellm.models import AgentRequest
|
|
158
|
+
|
|
159
|
+
response = await client.agent_run(
|
|
160
|
+
AgentRequest(
|
|
161
|
+
provider_uid="openai",
|
|
162
|
+
model="gpt-4",
|
|
163
|
+
messages=[TextMessage(role="user", content="Hello!")],
|
|
164
|
+
gen_config={"temperature": 0.7, "max_tokens": 500}
|
|
165
|
+
)
|
|
166
|
+
)
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
#### Basic Agent Run
|
|
170
|
+
|
|
171
|
+
```python
|
|
172
|
+
from livellm.models import TextMessage
|
|
173
|
+
|
|
174
|
+
# Using kwargs (recommended for simplicity)
|
|
175
|
+
response = await client.agent_run(
|
|
176
|
+
provider_uid="openai",
|
|
177
|
+
model="gpt-4",
|
|
178
|
+
messages=[
|
|
179
|
+
TextMessage(role="system", content="You are helpful."),
|
|
180
|
+
TextMessage(role="user", content="Explain quantum computing")
|
|
181
|
+
],
|
|
182
|
+
temperature=0.7,
|
|
183
|
+
max_tokens=500
|
|
184
|
+
)
|
|
185
|
+
print(f"Output: {response.output}")
|
|
186
|
+
print(f"Tokens: {response.usage.input_tokens} in, {response.usage.output_tokens} out")
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
#### Streaming Agent Response
|
|
190
|
+
|
|
191
|
+
```python
|
|
192
|
+
# Streaming also supports both styles
|
|
193
|
+
stream = client.agent_run_stream(
|
|
194
|
+
provider_uid="openai",
|
|
195
|
+
model="gpt-4",
|
|
196
|
+
messages=[TextMessage(role="user", content="Tell me a story")],
|
|
197
|
+
temperature=0.8
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
async for chunk in stream:
|
|
201
|
+
print(chunk.output, end="", flush=True)
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
#### Agent with Vision (Binary Messages)
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
import base64
|
|
208
|
+
from livellm.models import BinaryMessage
|
|
209
|
+
|
|
210
|
+
with open("image.jpg", "rb") as f:
|
|
211
|
+
image_data = base64.b64encode(f.read()).decode("utf-8")
|
|
212
|
+
|
|
213
|
+
response = await client.agent_run(
|
|
214
|
+
provider_uid="openai",
|
|
215
|
+
model="gpt-4-vision",
|
|
216
|
+
messages=[
|
|
217
|
+
BinaryMessage(
|
|
218
|
+
role="user",
|
|
219
|
+
content=image_data,
|
|
220
|
+
mime_type="image/jpeg",
|
|
221
|
+
caption="What's in this image?"
|
|
222
|
+
)
|
|
223
|
+
]
|
|
224
|
+
)
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
#### Agent with Tools
|
|
228
|
+
|
|
229
|
+
```python
|
|
230
|
+
from livellm.models import WebSearchInput, MCPStreamableServerInput, ToolKind
|
|
231
|
+
|
|
232
|
+
# Web search tool
|
|
233
|
+
response = await client.agent_run(
|
|
234
|
+
provider_uid="openai",
|
|
235
|
+
model="gpt-4",
|
|
236
|
+
messages=[TextMessage(role="user", content="Latest AI news?")],
|
|
237
|
+
tools=[WebSearchInput(
|
|
238
|
+
kind=ToolKind.WEB_SEARCH,
|
|
239
|
+
search_context_size="high" # low, medium, or high
|
|
240
|
+
)]
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
# MCP server tool
|
|
244
|
+
response = await client.agent_run(
|
|
245
|
+
provider_uid="openai",
|
|
246
|
+
model="gpt-4",
|
|
247
|
+
messages=[TextMessage(role="user", content="Run custom tool")],
|
|
248
|
+
tools=[MCPStreamableServerInput(
|
|
249
|
+
kind=ToolKind.MCP_STREAMABLE_SERVER,
|
|
250
|
+
url="http://mcp-server:8080",
|
|
251
|
+
prefix="mcp_",
|
|
252
|
+
timeout=15
|
|
253
|
+
)]
|
|
254
|
+
)
|
|
255
|
+
```
|
|
256
|
+
|
|
257
|
+
### Audio Services
|
|
258
|
+
|
|
259
|
+
#### Text-to-Speech
|
|
260
|
+
|
|
261
|
+
```python
|
|
262
|
+
from livellm.models import SpeakMimeType
|
|
263
|
+
|
|
264
|
+
# Non-streaming
|
|
265
|
+
audio = await client.speak(
|
|
266
|
+
provider_uid="openai",
|
|
267
|
+
model="tts-1",
|
|
268
|
+
text="Hello, world!",
|
|
269
|
+
voice="alloy",
|
|
270
|
+
mime_type=SpeakMimeType.MP3,
|
|
271
|
+
sample_rate=24000,
|
|
272
|
+
speed=1.0 # kwargs become gen_config
|
|
273
|
+
)
|
|
274
|
+
with open("output.mp3", "wb") as f:
|
|
275
|
+
f.write(audio)
|
|
276
|
+
|
|
277
|
+
# Streaming
|
|
278
|
+
audio = bytes()
|
|
279
|
+
async for chunk in client.speak_stream(
|
|
280
|
+
provider_uid="openai",
|
|
281
|
+
model="tts-1",
|
|
282
|
+
text="Hello, world!",
|
|
283
|
+
voice="alloy",
|
|
284
|
+
mime_type=SpeakMimeType.PCM,
|
|
285
|
+
sample_rate=24000
|
|
286
|
+
):
|
|
287
|
+
audio += chunk
|
|
288
|
+
|
|
289
|
+
# Save PCM as WAV
|
|
290
|
+
import wave
|
|
291
|
+
with wave.open("output.wav", "wb") as wf:
|
|
292
|
+
wf.setnchannels(1)
|
|
293
|
+
wf.setsampwidth(2)
|
|
294
|
+
wf.setframerate(24000)
|
|
295
|
+
wf.writeframes(audio)
|
|
296
|
+
```
|
|
297
|
+
|
|
298
|
+
#### Transcription
|
|
299
|
+
|
|
300
|
+
```python
|
|
301
|
+
# Method 1: Multipart upload (kwargs style)
|
|
302
|
+
with open("audio.wav", "rb") as f:
|
|
303
|
+
audio_bytes = f.read()
|
|
304
|
+
|
|
305
|
+
transcription = await client.transcribe(
|
|
306
|
+
provider_uid="openai",
|
|
307
|
+
file=("audio.wav", audio_bytes, "audio/wav"),
|
|
308
|
+
model="whisper-1",
|
|
309
|
+
language="en", # Optional
|
|
310
|
+
temperature=0.0 # kwargs become gen_config
|
|
311
|
+
)
|
|
312
|
+
print(f"Text: {transcription.text}")
|
|
313
|
+
print(f"Language: {transcription.language}")
|
|
314
|
+
|
|
315
|
+
# Method 2: JSON request object (base64-encoded)
|
|
316
|
+
import base64
|
|
317
|
+
from livellm.models import TranscribeRequest
|
|
318
|
+
|
|
319
|
+
audio_b64 = base64.b64encode(audio_bytes).decode("utf-8")
|
|
320
|
+
transcription = await client.transcribe(
|
|
321
|
+
TranscribeRequest(
|
|
322
|
+
provider_uid="openai",
|
|
323
|
+
file=("audio.wav", audio_b64, "audio/wav"),
|
|
324
|
+
model="whisper-1"
|
|
325
|
+
)
|
|
326
|
+
)
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
### Fallback Strategies
|
|
330
|
+
|
|
331
|
+
Handle failures automatically with sequential or parallel fallback:
|
|
332
|
+
|
|
333
|
+
```python
|
|
334
|
+
from livellm.models import AgentRequest, AgentFallbackRequest, FallbackStrategy, TextMessage
|
|
335
|
+
|
|
336
|
+
messages = [TextMessage(role="user", content="Hello!")]
|
|
337
|
+
|
|
338
|
+
# Sequential: try each in order until one succeeds
|
|
339
|
+
response = await client.agent_run(
|
|
340
|
+
AgentFallbackRequest(
|
|
341
|
+
strategy=FallbackStrategy.SEQUENTIAL,
|
|
342
|
+
requests=[
|
|
343
|
+
AgentRequest(provider_uid="primary", model="gpt-4", messages=messages, tools=[]),
|
|
344
|
+
AgentRequest(provider_uid="backup", model="claude-3", messages=messages, tools=[])
|
|
345
|
+
],
|
|
346
|
+
timeout_per_request=30
|
|
347
|
+
)
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
# Parallel: try all simultaneously, use first success
|
|
351
|
+
response = await client.agent_run(
|
|
352
|
+
AgentFallbackRequest(
|
|
353
|
+
strategy=FallbackStrategy.PARALLEL,
|
|
354
|
+
requests=[
|
|
355
|
+
AgentRequest(provider_uid="p1", model="gpt-4", messages=messages, tools=[]),
|
|
356
|
+
AgentRequest(provider_uid="p2", model="claude-3", messages=messages, tools=[]),
|
|
357
|
+
AgentRequest(provider_uid="p3", model="gemini-pro", messages=messages, tools=[])
|
|
358
|
+
],
|
|
359
|
+
timeout_per_request=10
|
|
360
|
+
)
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
# Also works for audio
|
|
364
|
+
from livellm.models import AudioFallbackRequest, SpeakRequest
|
|
365
|
+
|
|
366
|
+
audio = await client.speak(
|
|
367
|
+
AudioFallbackRequest(
|
|
368
|
+
strategy=FallbackStrategy.SEQUENTIAL,
|
|
369
|
+
requests=[
|
|
370
|
+
SpeakRequest(provider_uid="elevenlabs", model="turbo", text="Hi",
|
|
371
|
+
voice="rachel", mime_type=SpeakMimeType.MP3, sample_rate=44100),
|
|
372
|
+
SpeakRequest(provider_uid="openai", model="tts-1", text="Hi",
|
|
373
|
+
voice="alloy", mime_type=SpeakMimeType.MP3, sample_rate=44100)
|
|
374
|
+
]
|
|
375
|
+
)
|
|
376
|
+
)
|
|
377
|
+
```
|
|
378
|
+
|
|
379
|
+
## Resource Management
|
|
380
|
+
|
|
381
|
+
**Recommended**: Use context managers for automatic cleanup.
|
|
382
|
+
|
|
383
|
+
```python
|
|
384
|
+
# ✅ Best: Context manager (auto cleanup)
|
|
385
|
+
async with LivellmClient(base_url="http://localhost:8000") as client:
|
|
386
|
+
response = await client.ping()
|
|
387
|
+
# Configs deleted, connection closed automatically
|
|
388
|
+
|
|
389
|
+
# ✅ Good: Manual cleanup
|
|
390
|
+
client = LivellmClient(base_url="http://localhost:8000")
|
|
391
|
+
try:
|
|
392
|
+
response = await client.ping()
|
|
393
|
+
finally:
|
|
394
|
+
await client.cleanup()
|
|
395
|
+
|
|
396
|
+
# ⚠️ OK: Garbage collection (shows warning if configs exist)
|
|
397
|
+
client = LivellmClient(base_url="http://localhost:8000")
|
|
398
|
+
response = await client.ping()
|
|
399
|
+
# Cleaned up when object is destroyed
|
|
400
|
+
```
|
|
401
|
+
|
|
402
|
+
## API Reference
|
|
403
|
+
|
|
404
|
+
### Client Methods
|
|
405
|
+
|
|
406
|
+
**Configuration**
|
|
407
|
+
- `ping()` - Health check
|
|
408
|
+
- `update_config(config)` / `update_configs(configs)` - Add/update providers
|
|
409
|
+
- `get_configs()` - List all configurations
|
|
410
|
+
- `delete_config(uid)` - Remove provider
|
|
411
|
+
|
|
412
|
+
**Agent**
|
|
413
|
+
- `agent_run(request | **kwargs)` - Run agent (blocking)
|
|
414
|
+
- `agent_run_stream(request | **kwargs)` - Run agent (streaming)
|
|
415
|
+
|
|
416
|
+
**Audio**
|
|
417
|
+
- `speak(request | **kwargs)` - Text-to-speech (blocking)
|
|
418
|
+
- `speak_stream(request | **kwargs)` - Text-to-speech (streaming)
|
|
419
|
+
- `transcribe(request | **kwargs)` - Speech-to-text
|
|
420
|
+
|
|
421
|
+
**Cleanup**
|
|
422
|
+
- `cleanup()` - Release resources
|
|
423
|
+
- `async with client:` - Auto cleanup (recommended)
|
|
424
|
+
|
|
425
|
+
### Key Models
|
|
426
|
+
|
|
427
|
+
**Core**
|
|
428
|
+
- `Settings(uid, provider, api_key, base_url?, blacklist_models?)` - Provider config
|
|
429
|
+
- `ProviderKind` - `OPENAI` | `GOOGLE` | `ANTHROPIC` | `GROQ` | `ELEVENLABS`
|
|
430
|
+
|
|
431
|
+
**Messages**
|
|
432
|
+
- `TextMessage(role, content)` - Text message
|
|
433
|
+
- `BinaryMessage(role, content, mime_type, caption?)` - Image/audio message
|
|
434
|
+
- `MessageRole` - `USER` | `MODEL` | `SYSTEM` (or use strings: `"user"`, `"model"`, `"system"`)
|
|
435
|
+
|
|
436
|
+
**Requests**
|
|
437
|
+
- `AgentRequest(provider_uid, model, messages, tools?, gen_config?)`
|
|
438
|
+
- `SpeakRequest(provider_uid, model, text, voice, mime_type, sample_rate, gen_config?)`
|
|
439
|
+
- `TranscribeRequest(provider_uid, file, model, language?, gen_config?)`
|
|
440
|
+
|
|
441
|
+
**Tools**
|
|
442
|
+
- `WebSearchInput(kind=ToolKind.WEB_SEARCH, search_context_size)`
|
|
443
|
+
- `MCPStreamableServerInput(kind=ToolKind.MCP_STREAMABLE_SERVER, url, prefix?, timeout?)`
|
|
444
|
+
|
|
445
|
+
**Fallback**
|
|
446
|
+
- `AgentFallbackRequest(strategy, requests, timeout_per_request?)`
|
|
447
|
+
- `AudioFallbackRequest(strategy, requests, timeout_per_request?)`
|
|
448
|
+
- `FallbackStrategy` - `SEQUENTIAL` | `PARALLEL`
|
|
449
|
+
|
|
450
|
+
**Responses**
|
|
451
|
+
- `AgentResponse(output, usage{input_tokens, output_tokens}, ...)`
|
|
452
|
+
- `TranscribeResponse(text, language)`
|
|
453
|
+
|
|
454
|
+
## Error Handling
|
|
455
|
+
|
|
456
|
+
```python
|
|
457
|
+
import httpx
|
|
458
|
+
|
|
459
|
+
try:
|
|
460
|
+
response = await client.agent_run(
|
|
461
|
+
provider_uid="openai",
|
|
462
|
+
model="gpt-4",
|
|
463
|
+
messages=[TextMessage(role="user", content="Hi")]
|
|
464
|
+
)
|
|
465
|
+
except httpx.HTTPStatusError as e:
|
|
466
|
+
print(f"HTTP {e.response.status_code}: {e.response.text}")
|
|
467
|
+
except httpx.RequestError as e:
|
|
468
|
+
print(f"Request failed: {e}")
|
|
469
|
+
```
|
|
470
|
+
|
|
471
|
+
## Development
|
|
472
|
+
|
|
473
|
+
```bash
|
|
474
|
+
# Install with dev dependencies
|
|
475
|
+
pip install -e ".[testing]"
|
|
476
|
+
|
|
477
|
+
# Run tests
|
|
478
|
+
pytest tests/
|
|
479
|
+
|
|
480
|
+
# Type checking
|
|
481
|
+
mypy livellm
|
|
482
|
+
```
|
|
483
|
+
|
|
484
|
+
## Requirements
|
|
485
|
+
|
|
486
|
+
- Python 3.10+
|
|
487
|
+
- httpx >= 0.27.0
|
|
488
|
+
- pydantic >= 2.0.0
|
|
489
|
+
|
|
490
|
+
## Links
|
|
491
|
+
|
|
492
|
+
- [GitHub Repository](https://github.com/qalby-tech/livellm-client-py)
|
|
493
|
+
- [Issue Tracker](https://github.com/qalby-tech/livellm-client-py/issues)
|
|
494
|
+
|
|
495
|
+
## License
|
|
496
|
+
|
|
497
|
+
MIT License - see LICENSE file for details.
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
livellm/__init__.py,sha256=JG_0-UCfQI_3D0Y2PzobZLS5OhJwK76i8t81ye0KpfY,279
|
|
2
|
+
livellm/livellm.py,sha256=w6Dc0eewOSJie4rmfMq-afck6Coh30-KmkRNh9_Eeko,24003
|
|
3
|
+
livellm/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
livellm/models/__init__.py,sha256=JBUd1GkeDexLSdjSOcUet78snu0NNxnhU7mBN3BhqIA,1199
|
|
5
|
+
livellm/models/common.py,sha256=YqRwP6ChWbRdoen4MU6RO4u6HeM0mQJbXiiRV4DuauM,1740
|
|
6
|
+
livellm/models/fallback.py,sha256=zGG_MjdbaTx0fqKZTEg3ullej-CJznPfwaon0jEvRvI,1170
|
|
7
|
+
livellm/models/agent/__init__.py,sha256=KVm6AgQoWEaoq47QAG4Ou4NimoXOTkjXC-0-gnMRLZ8,476
|
|
8
|
+
livellm/models/agent/agent.py,sha256=-UcGv5Bzw5ALmWX4lIqpbWqMVjCsjBc0KIE6_JKbCXM,1106
|
|
9
|
+
livellm/models/agent/chat.py,sha256=zGfeEHx0luwq23pqWF1megcuEDUl6IhV4keLJeZry_A,1028
|
|
10
|
+
livellm/models/agent/tools.py,sha256=wVWfx6_jxL3IcmX_Nt_PonZ3RQLtpfqJnszHz32BQiU,1403
|
|
11
|
+
livellm/models/audio/__init__.py,sha256=sz2NxCOfFGVvp-XQUsdgOR_TYBO1Wb-8LLXaZDEiAZk,282
|
|
12
|
+
livellm/models/audio/speak.py,sha256=KvENOE_Lf8AWBhzCMqu1dqGYv4WqaLf7fuWz8OYfJo8,1006
|
|
13
|
+
livellm/models/audio/transcribe.py,sha256=Leji2lk5zfq4GE-fw-z2dZR8BuijzW8TJ12GHw_UZJY,2085
|
|
14
|
+
livellm-1.2.0.dist-info/METADATA,sha256=aF-sHBOn1GDj8-u6RNwYYdto5dyRbeIHjWbAMBiFR0Q,13284
|
|
15
|
+
livellm-1.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
16
|
+
livellm-1.2.0.dist-info/licenses/LICENSE,sha256=yapGO2C_00ymEx6TADdbU8Oyc1bWOrZY-fjP-agmFL4,1071
|
|
17
|
+
livellm-1.2.0.dist-info/RECORD,,
|