livellm 1.7.1__tar.gz → 1.7.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {livellm-1.7.1 → livellm-1.7.2}/PKG-INFO +1 -1
- {livellm-1.7.1 → livellm-1.7.2}/livellm/livellm.py +19 -3
- {livellm-1.7.1 → livellm-1.7.2}/pyproject.toml +1 -1
- {livellm-1.7.1 → livellm-1.7.2}/.gitignore +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/LICENSE +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/README.md +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/__init__.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/__init__.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/agent/__init__.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/agent/agent.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/agent/chat.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/agent/output_schema.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/agent/tools.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/audio/__init__.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/audio/speak.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/audio/transcribe.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/common.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/fallback.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/transcription.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/models/ws.py +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/py.typed +0 -0
- {livellm-1.7.1 → livellm-1.7.2}/livellm/transcripton.py +0 -0
|
@@ -5,7 +5,7 @@ import json
|
|
|
5
5
|
import warnings
|
|
6
6
|
from typing import List, Optional, AsyncIterator, Union, overload, Dict, Any, Type
|
|
7
7
|
from .models.common import Settings, SuccessResponse
|
|
8
|
-
from .models.agent.agent import AgentRequest, AgentResponse
|
|
8
|
+
from .models.agent.agent import AgentRequest, AgentResponse, ContextOverflowStrategy
|
|
9
9
|
from .models.agent.output_schema import OutputSchema
|
|
10
10
|
from .models.audio.speak import SpeakRequest, EncodedSpeakResponse
|
|
11
11
|
from .models.audio.transcribe import TranscribeRequest, TranscribeResponse, File
|
|
@@ -53,6 +53,8 @@ class BaseLivellmClient(ABC):
|
|
|
53
53
|
tools: Optional[list] = None,
|
|
54
54
|
include_history: bool = False,
|
|
55
55
|
output_schema: Optional[Union[OutputSchema, Dict[str, Any], Type[BaseModel]]] = None,
|
|
56
|
+
context_limit: int = 0,
|
|
57
|
+
context_overflow_strategy: ContextOverflowStrategy = ContextOverflowStrategy.TRUNCATE,
|
|
56
58
|
timeout: Optional[float] = None,
|
|
57
59
|
**kwargs
|
|
58
60
|
) -> AgentResponse:
|
|
@@ -77,6 +79,8 @@ class BaseLivellmClient(ABC):
|
|
|
77
79
|
tools: Optional[list] = None,
|
|
78
80
|
include_history: bool = False,
|
|
79
81
|
output_schema: Optional[Union[OutputSchema, Dict[str, Any], Type[BaseModel]]] = None,
|
|
82
|
+
context_limit: int = 0,
|
|
83
|
+
context_overflow_strategy: ContextOverflowStrategy = ContextOverflowStrategy.TRUNCATE,
|
|
80
84
|
timeout: Optional[float] = None,
|
|
81
85
|
**kwargs
|
|
82
86
|
) -> AgentResponse:
|
|
@@ -111,6 +115,8 @@ class BaseLivellmClient(ABC):
|
|
|
111
115
|
- An OutputSchema instance
|
|
112
116
|
- A dict representing a JSON schema
|
|
113
117
|
- A Pydantic BaseModel class (will be converted to OutputSchema)
|
|
118
|
+
context_limit: Maximum context size in tokens. If <= 0, context overflow handling is disabled.
|
|
119
|
+
context_overflow_strategy: Strategy for handling context overflow: 'truncate' or 'recycle'
|
|
114
120
|
timeout: Optional timeout in seconds (overrides default client timeout)
|
|
115
121
|
|
|
116
122
|
Returns:
|
|
@@ -142,7 +148,9 @@ class BaseLivellmClient(ABC):
|
|
|
142
148
|
tools=tools or [],
|
|
143
149
|
gen_config=kwargs or None,
|
|
144
150
|
include_history=include_history,
|
|
145
|
-
output_schema=resolved_schema
|
|
151
|
+
output_schema=resolved_schema,
|
|
152
|
+
context_limit=context_limit,
|
|
153
|
+
context_overflow_strategy=context_overflow_strategy
|
|
146
154
|
)
|
|
147
155
|
return await self.handle_agent_run(agent_request, timeout=timeout)
|
|
148
156
|
|
|
@@ -184,6 +192,8 @@ class BaseLivellmClient(ABC):
|
|
|
184
192
|
tools: Optional[list] = None,
|
|
185
193
|
include_history: bool = False,
|
|
186
194
|
output_schema: Optional[Union[OutputSchema, Dict[str, Any], Type[BaseModel]]] = None,
|
|
195
|
+
context_limit: int = 0,
|
|
196
|
+
context_overflow_strategy: ContextOverflowStrategy = ContextOverflowStrategy.TRUNCATE,
|
|
187
197
|
timeout: Optional[float] = None,
|
|
188
198
|
**kwargs
|
|
189
199
|
) -> AsyncIterator[AgentResponse]:
|
|
@@ -208,6 +218,8 @@ class BaseLivellmClient(ABC):
|
|
|
208
218
|
tools: Optional[list] = None,
|
|
209
219
|
include_history: bool = False,
|
|
210
220
|
output_schema: Optional[Union[OutputSchema, Dict[str, Any], Type[BaseModel]]] = None,
|
|
221
|
+
context_limit: int = 0,
|
|
222
|
+
context_overflow_strategy: ContextOverflowStrategy = ContextOverflowStrategy.TRUNCATE,
|
|
211
223
|
timeout: Optional[float] = None,
|
|
212
224
|
**kwargs
|
|
213
225
|
) -> AsyncIterator[AgentResponse]:
|
|
@@ -245,6 +257,8 @@ class BaseLivellmClient(ABC):
|
|
|
245
257
|
- An OutputSchema instance
|
|
246
258
|
- A dict representing a JSON schema
|
|
247
259
|
- A Pydantic BaseModel class (will be converted to OutputSchema)
|
|
260
|
+
context_limit: Maximum context size in tokens. If <= 0, context overflow handling is disabled.
|
|
261
|
+
context_overflow_strategy: Strategy for handling context overflow: 'truncate' or 'recycle'
|
|
248
262
|
timeout: Optional timeout in seconds (overrides default client timeout)
|
|
249
263
|
|
|
250
264
|
Returns:
|
|
@@ -276,7 +290,9 @@ class BaseLivellmClient(ABC):
|
|
|
276
290
|
tools=tools or [],
|
|
277
291
|
gen_config=kwargs or None,
|
|
278
292
|
include_history=include_history,
|
|
279
|
-
output_schema=resolved_schema
|
|
293
|
+
output_schema=resolved_schema,
|
|
294
|
+
context_limit=context_limit,
|
|
295
|
+
context_overflow_strategy=context_overflow_strategy
|
|
280
296
|
)
|
|
281
297
|
stream = self.handle_agent_run_stream(agent_request, timeout=timeout)
|
|
282
298
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|