livellm 1.1.0__tar.gz → 1.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: livellm
3
- Version: 1.1.0
3
+ Version: 1.1.1
4
4
  Summary: Python client for the LiveLLM Server
5
5
  Project-URL: Homepage, https://github.com/qalby-tech/livellm-client-py
6
6
  Project-URL: Repository, https://github.com/qalby-tech/livellm-client-py
@@ -39,18 +39,19 @@ Python client library for the LiveLLM Server - a unified proxy for AI agent, aud
39
39
  - 🛠️ **Agent tools** - Web search and MCP server integration
40
40
  - 🎙️ **Audio services** - Text-to-speech and transcription
41
41
  - ⚡ **Fallback strategies** - Sequential and parallel fallback handling
42
- - 📦 **Context manager support** - Automatic cleanup with async context managers
42
+ - 📦 **Smart resource management** - Automatic cleanup via GC, context managers, or manual control
43
+ - 🧹 **Memory safe** - No resource leaks with multiple cleanup strategies
43
44
 
44
45
  ## Installation
45
46
 
46
47
  ```bash
47
- pip install livellm-client
48
+ pip install livellm
48
49
  ```
49
50
 
50
51
  Or with development dependencies:
51
52
 
52
53
  ```bash
53
- pip install livellm-client[testing]
54
+ pip install livellm[testing]
54
55
  ```
55
56
 
56
57
  ## Quick Start
@@ -181,6 +182,18 @@ print(f"Output: {response.output}")
181
182
  print(f"Tokens used - Input: {response.usage.input_tokens}, Output: {response.usage.output_tokens}")
182
183
  ```
183
184
 
185
+ **Note:** You can use either `MessageRole` enum or string values for the `role` parameter:
186
+
187
+ ```python
188
+ # Using enum (recommended for type safety)
189
+ TextMessage(role=MessageRole.USER, content="Hello")
190
+
191
+ # Using string (more convenient)
192
+ TextMessage(role="user", content="Hello")
193
+
194
+ # Both work identically and serialize correctly
195
+ ```
196
+
184
197
  #### Streaming Agent Response
185
198
 
186
199
  ```python
@@ -417,9 +430,32 @@ fallback_request = AudioFallbackRequest(
417
430
  audio = await client.speak(fallback_request)
418
431
  ```
419
432
 
420
- ## Context Manager Support
433
+ ## Resource Management
434
+
435
+ The client provides multiple ways to manage resources and cleanup:
436
+
437
+ ### 1. Automatic Cleanup (Garbage Collection)
438
+
439
+ The client automatically cleans up when garbage collected:
440
+
441
+ ```python
442
+ async def main():
443
+ client = LivellmClient(base_url="http://localhost:8000")
444
+
445
+ # Use client...
446
+ response = await client.ping()
447
+
448
+ # No explicit cleanup needed - handled automatically when object is destroyed
449
+ # Note: Provider configs are deleted synchronously from the server
450
+
451
+ asyncio.run(main())
452
+ ```
453
+
454
+ **Note**: While automatic cleanup works, it shows a `ResourceWarning` if configs exist to encourage explicit cleanup for immediate resource release.
421
455
 
422
- The client supports async context managers for automatic cleanup:
456
+ ### 2. Context Manager (Recommended)
457
+
458
+ Use async context managers for guaranteed cleanup:
423
459
 
424
460
  ```python
425
461
  async with LivellmClient(base_url="http://localhost:8000") as client:
@@ -433,17 +469,32 @@ async with LivellmClient(base_url="http://localhost:8000") as client:
433
469
  # Automatically cleans up configs and closes HTTP client
434
470
  ```
435
471
 
436
- Or manually:
472
+ ### 3. Manual Cleanup
473
+
474
+ Explicitly call cleanup in a try/finally block:
437
475
 
438
476
  ```python
439
477
  client = LivellmClient(base_url="http://localhost:8000")
440
478
  try:
441
479
  # Use client...
442
- pass
480
+ response = await client.ping()
443
481
  finally:
444
482
  await client.cleanup()
445
483
  ```
446
484
 
485
+ ### Cleanup Behavior
486
+
487
+ The `cleanup()` method:
488
+ - Deletes all provider configs created by the client
489
+ - Closes the HTTP client connection
490
+ - Is idempotent (safe to call multiple times)
491
+
492
+ The `__del__()` destructor (automatic cleanup):
493
+ - Triggers when the object is garbage collected
494
+ - Synchronously deletes provider configs from the server
495
+ - Closes the HTTP client connection
496
+ - Shows a `ResourceWarning` if configs exist (to encourage explicit cleanup)
497
+
447
498
  ## API Reference
448
499
 
449
500
  ### Client Methods
@@ -468,8 +519,9 @@ finally:
468
519
  - `transcribe_json(request: TranscribeRequest | TranscribeFallbackRequest) -> TranscribeResponse` - JSON transcription
469
520
 
470
521
  #### Cleanup
471
- - `cleanup() -> None` - Clean up resources and close client
522
+ - `cleanup() -> None` - Clean up resources and close client (async)
472
523
  - `__aenter__() / __aexit__()` - Async context manager support
524
+ - `__del__()` - Automatic cleanup when garbage collected (sync)
473
525
 
474
526
  ### Models
475
527
 
@@ -14,18 +14,19 @@ Python client library for the LiveLLM Server - a unified proxy for AI agent, aud
14
14
  - 🛠️ **Agent tools** - Web search and MCP server integration
15
15
  - 🎙️ **Audio services** - Text-to-speech and transcription
16
16
  - ⚡ **Fallback strategies** - Sequential and parallel fallback handling
17
- - 📦 **Context manager support** - Automatic cleanup with async context managers
17
+ - 📦 **Smart resource management** - Automatic cleanup via GC, context managers, or manual control
18
+ - 🧹 **Memory safe** - No resource leaks with multiple cleanup strategies
18
19
 
19
20
  ## Installation
20
21
 
21
22
  ```bash
22
- pip install livellm-client
23
+ pip install livellm
23
24
  ```
24
25
 
25
26
  Or with development dependencies:
26
27
 
27
28
  ```bash
28
- pip install livellm-client[testing]
29
+ pip install livellm[testing]
29
30
  ```
30
31
 
31
32
  ## Quick Start
@@ -156,6 +157,18 @@ print(f"Output: {response.output}")
156
157
  print(f"Tokens used - Input: {response.usage.input_tokens}, Output: {response.usage.output_tokens}")
157
158
  ```
158
159
 
160
+ **Note:** You can use either `MessageRole` enum or string values for the `role` parameter:
161
+
162
+ ```python
163
+ # Using enum (recommended for type safety)
164
+ TextMessage(role=MessageRole.USER, content="Hello")
165
+
166
+ # Using string (more convenient)
167
+ TextMessage(role="user", content="Hello")
168
+
169
+ # Both work identically and serialize correctly
170
+ ```
171
+
159
172
  #### Streaming Agent Response
160
173
 
161
174
  ```python
@@ -392,9 +405,32 @@ fallback_request = AudioFallbackRequest(
392
405
  audio = await client.speak(fallback_request)
393
406
  ```
394
407
 
395
- ## Context Manager Support
408
+ ## Resource Management
409
+
410
+ The client provides multiple ways to manage resources and cleanup:
411
+
412
+ ### 1. Automatic Cleanup (Garbage Collection)
413
+
414
+ The client automatically cleans up when garbage collected:
415
+
416
+ ```python
417
+ async def main():
418
+ client = LivellmClient(base_url="http://localhost:8000")
419
+
420
+ # Use client...
421
+ response = await client.ping()
422
+
423
+ # No explicit cleanup needed - handled automatically when object is destroyed
424
+ # Note: Provider configs are deleted synchronously from the server
425
+
426
+ asyncio.run(main())
427
+ ```
428
+
429
+ **Note**: While automatic cleanup works, it shows a `ResourceWarning` if configs exist to encourage explicit cleanup for immediate resource release.
396
430
 
397
- The client supports async context managers for automatic cleanup:
431
+ ### 2. Context Manager (Recommended)
432
+
433
+ Use async context managers for guaranteed cleanup:
398
434
 
399
435
  ```python
400
436
  async with LivellmClient(base_url="http://localhost:8000") as client:
@@ -408,17 +444,32 @@ async with LivellmClient(base_url="http://localhost:8000") as client:
408
444
  # Automatically cleans up configs and closes HTTP client
409
445
  ```
410
446
 
411
- Or manually:
447
+ ### 3. Manual Cleanup
448
+
449
+ Explicitly call cleanup in a try/finally block:
412
450
 
413
451
  ```python
414
452
  client = LivellmClient(base_url="http://localhost:8000")
415
453
  try:
416
454
  # Use client...
417
- pass
455
+ response = await client.ping()
418
456
  finally:
419
457
  await client.cleanup()
420
458
  ```
421
459
 
460
+ ### Cleanup Behavior
461
+
462
+ The `cleanup()` method:
463
+ - Deletes all provider configs created by the client
464
+ - Closes the HTTP client connection
465
+ - Is idempotent (safe to call multiple times)
466
+
467
+ The `__del__()` destructor (automatic cleanup):
468
+ - Triggers when the object is garbage collected
469
+ - Synchronously deletes provider configs from the server
470
+ - Closes the HTTP client connection
471
+ - Shows a `ResourceWarning` if configs exist (to encourage explicit cleanup)
472
+
422
473
  ## API Reference
423
474
 
424
475
  ### Client Methods
@@ -443,8 +494,9 @@ finally:
443
494
  - `transcribe_json(request: TranscribeRequest | TranscribeFallbackRequest) -> TranscribeResponse` - JSON transcription
444
495
 
445
496
  #### Cleanup
446
- - `cleanup() -> None` - Clean up resources and close client
497
+ - `cleanup() -> None` - Clean up resources and close client (async)
447
498
  - `__aenter__() / __aexit__()` - Async context manager support
499
+ - `__del__()` - Automatic cleanup when garbage collected (sync)
448
500
 
449
501
  ### Models
450
502
 
@@ -1,6 +1,8 @@
1
1
  """LiveLLM Client - Python client for the LiveLLM Proxy and Realtime APIs."""
2
+ import asyncio
2
3
  import httpx
3
4
  import json
5
+ import warnings
4
6
  from typing import List, Optional, AsyncIterator, Union
5
7
  from .models.common import Settings, SuccessResponse
6
8
  from .models.agent.agent import AgentRequest, AgentResponse
@@ -164,6 +166,7 @@ class LivellmClient:
164
166
  Should be called when you're done using the client.
165
167
  """
166
168
  for config in self.settings:
169
+ config: Settings = config
167
170
  await self.delete_config(config.uid)
168
171
  await self.client.aclose()
169
172
 
@@ -175,6 +178,32 @@ class LivellmClient:
175
178
  """Async context manager exit."""
176
179
  await self.cleanup()
177
180
 
181
+ def __del__(self):
182
+ """
183
+ Destructor to clean up resources when the client is garbage collected.
184
+ This will close the HTTP client and attempt to delete configs if cleanup wasn't called.
185
+ Note: It's recommended to use the async context manager or call cleanup() explicitly.
186
+ """
187
+ # Warn user if cleanup wasn't called
188
+ if self.settings:
189
+ warnings.warn(
190
+ "LivellmClient is being garbage collected without explicit cleanup. "
191
+ "Provider configs may not be deleted from the server. "
192
+ "Consider using 'async with' or calling 'await client.cleanup()' explicitly.",
193
+ ResourceWarning,
194
+ stacklevel=2
195
+ )
196
+
197
+ # Close the httpx client synchronously
198
+ # httpx.AsyncClient stores a sync Transport that needs cleanup
199
+ try:
200
+ with httpx.Client(base_url=self.base_url) as client:
201
+ for config in self.settings:
202
+ config: Settings = config
203
+ client.delete("providers/config/{config.uid}", headers=self.headers)
204
+ except Exception:
205
+ # Silently fail - we're in a destructor
206
+ pass
178
207
 
179
208
  async def agent_run(
180
209
  self,
@@ -1,18 +1,17 @@
1
1
  # models for chat messages
2
- from pydantic import BaseModel, Field, model_validator
2
+ from pydantic import BaseModel, Field, model_validator, field_serializer
3
3
  from enum import Enum
4
- from typing import Optional
4
+ from typing import Optional, Union
5
5
 
6
- class MessageRole(Enum):
6
+ class MessageRole(str, Enum):
7
7
  USER = "user"
8
8
  MODEL = "model"
9
9
  SYSTEM = "system"
10
10
 
11
11
 
12
12
  class Message(BaseModel):
13
- role: MessageRole = Field(..., description="The role of the message")
14
-
15
-
13
+ role: Union[MessageRole, str] = Field(..., description="The role of the message")
14
+
16
15
  class TextMessage(Message):
17
16
  content: str = Field(..., description="The content of the message")
18
17
 
@@ -3,7 +3,7 @@ from pydantic import BaseModel, Field, field_validator
3
3
  from typing import Literal
4
4
  from enum import Enum
5
5
 
6
- class ToolKind(Enum):
6
+ class ToolKind(str, Enum):
7
7
  WEB_SEARCH = "web_search"
8
8
  MCP_STREAMABLE_SERVER = "mcp_streamable_server"
9
9
 
@@ -6,7 +6,7 @@ from ..common import BaseRequest
6
6
  SpeakStreamResponse: TypeAlias = Tuple[AsyncIterator[bytes], str, int]
7
7
 
8
8
 
9
- class SpeakMimeType(Enum):
9
+ class SpeakMimeType(str, Enum):
10
10
  PCM = "audio/pcm"
11
11
  WAV = "audio/wav"
12
12
  MP3 = "audio/mpeg"
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "livellm"
3
- version = "1.1.0"
3
+ version = "1.1.1"
4
4
  description = "Python client for the LiveLLM Server"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
File without changes
File without changes
File without changes
File without changes