gllm-inference-binary 0.5.38__cp313-cp313-win_amd64.whl → 0.5.40__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gllm-inference-binary might be problematic. Click here for more details.
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/bedrock_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/google_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/openai_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/voyage_em_invoker.pyi +2 -2
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +22 -28
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +24 -29
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +10 -20
- gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +11 -21
- gllm_inference/lm_invoker/google_lm_invoker.pyi +46 -28
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi +10 -20
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi +25 -30
- gllm_inference/lm_invoker/lm_invoker.pyi +4 -1
- gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +22 -28
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +4 -1
- gllm_inference/lm_invoker/openai_lm_invoker.pyi +43 -49
- gllm_inference/lm_invoker/xai_lm_invoker.pyi +26 -42
- gllm_inference/schema/lm_output.pyi +4 -0
- gllm_inference.cp313-win_amd64.pyd +0 -0
- {gllm_inference_binary-0.5.38.dist-info → gllm_inference_binary-0.5.40.dist-info}/METADATA +1 -1
- {gllm_inference_binary-0.5.38.dist-info → gllm_inference_binary-0.5.40.dist-info}/RECORD +24 -24
- {gllm_inference_binary-0.5.38.dist-info → gllm_inference_binary-0.5.40.dist-info}/WHEEL +0 -0
- {gllm_inference_binary-0.5.38.dist-info → gllm_inference_binary-0.5.40.dist-info}/top_level.txt +0 -0
|
@@ -57,9 +57,9 @@ class AzureOpenAIEMInvoker(OpenAIEMInvoker):
|
|
|
57
57
|
|
|
58
58
|
Retry config examples:
|
|
59
59
|
```python
|
|
60
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
60
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
61
61
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
62
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
62
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
63
63
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
64
64
|
```
|
|
65
65
|
|
|
@@ -67,9 +67,9 @@ class BedrockEMInvoker(BaseEMInvoker):
|
|
|
67
67
|
|
|
68
68
|
Retry config examples:
|
|
69
69
|
```python
|
|
70
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
70
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
71
71
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
72
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
72
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
73
73
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
74
74
|
```
|
|
75
75
|
|
|
@@ -89,9 +89,9 @@ class GoogleEMInvoker(BaseEMInvoker):
|
|
|
89
89
|
|
|
90
90
|
Retry config examples:
|
|
91
91
|
```python
|
|
92
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
92
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
93
93
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
94
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
94
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
95
95
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
96
96
|
```
|
|
97
97
|
|
|
@@ -85,9 +85,9 @@ class OpenAIEMInvoker(BaseEMInvoker):
|
|
|
85
85
|
|
|
86
86
|
Retry config examples:
|
|
87
87
|
```python
|
|
88
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
88
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
89
89
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
90
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
90
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
91
91
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
92
92
|
```
|
|
93
93
|
|
|
@@ -71,9 +71,9 @@ class TwelveLabsEMInvoker(BaseEMInvoker):
|
|
|
71
71
|
|
|
72
72
|
Retry config examples:
|
|
73
73
|
```python
|
|
74
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
74
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
75
75
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
76
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
76
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
77
77
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
78
78
|
```
|
|
79
79
|
|
|
@@ -74,9 +74,9 @@ class VoyageEMInvoker(BaseEMInvoker):
|
|
|
74
74
|
|
|
75
75
|
Retry config examples:
|
|
76
76
|
```python
|
|
77
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
77
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
78
78
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
79
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
79
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
80
80
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
81
81
|
```
|
|
82
82
|
|
|
@@ -149,9 +149,9 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
149
149
|
|
|
150
150
|
Retry config examples:
|
|
151
151
|
```python
|
|
152
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
152
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
153
153
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
154
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
154
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
155
155
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
156
156
|
```
|
|
157
157
|
|
|
@@ -188,17 +188,18 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
188
188
|
)
|
|
189
189
|
```
|
|
190
190
|
|
|
191
|
-
When streaming is enabled, the thinking token will be streamed with the `EventType.DATA` event type.
|
|
192
|
-
|
|
193
191
|
Streaming output example:
|
|
194
192
|
```python
|
|
195
|
-
{"type": "
|
|
196
|
-
{"type": "
|
|
197
|
-
{"type": "
|
|
198
|
-
{"type": "
|
|
193
|
+
{"type": "thinking_start", "value": "", ...}
|
|
194
|
+
{"type": "thinking", "value": "Let me think "\', ...}
|
|
195
|
+
{"type": "thinking", "value": "about it..."}\', ...}
|
|
196
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
199
197
|
{"type": "response", "value": "Golden retriever ", ...}
|
|
200
198
|
{"type": "response", "value": "is a good dog breed.", ...}
|
|
201
199
|
```
|
|
200
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
201
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
202
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
202
203
|
|
|
203
204
|
Batch processing:
|
|
204
205
|
The `AnthropicLMInvoker` supports batch processing, which allows the language model to process multiple
|
|
@@ -265,30 +266,20 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
265
266
|
|
|
266
267
|
Output types:
|
|
267
268
|
The output of the `AnthropicLMInvoker` can either be:
|
|
268
|
-
1. `str`:
|
|
269
|
-
2. `LMOutput`: A Pydantic model
|
|
270
|
-
2.1. response (str)
|
|
271
|
-
2.2. tool_calls (list[ToolCall])
|
|
272
|
-
|
|
273
|
-
2.
|
|
274
|
-
|
|
275
|
-
2.
|
|
276
|
-
|
|
277
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
278
|
-
parameter is set to `True`. Defaults to None.
|
|
279
|
-
2.6. finish_details (dict[str, Any]): The details about how the generation finished, if the
|
|
280
|
-
`output_analytics` parameter is set to `True`. Defaults to an empty dictionary.
|
|
281
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects, if the `thinking` parameter is set to `True`.
|
|
282
|
-
Defaults to an empty list.
|
|
283
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
284
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
285
|
-
Defaults to an empty list.
|
|
286
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
269
|
+
1. `str`: A text response.
|
|
270
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
271
|
+
2.1. response (str)
|
|
272
|
+
2.2. tool_calls (list[ToolCall])
|
|
273
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
274
|
+
2.4. token_usage (TokenUsage | None)
|
|
275
|
+
2.5. duration (float | None)
|
|
276
|
+
2.6. finish_details (dict[str, Any])
|
|
277
|
+
2.7. reasoning (list[Reasoning])
|
|
287
278
|
'''
|
|
288
279
|
client: Incomplete
|
|
289
280
|
thinking: Incomplete
|
|
290
281
|
thinking_budget: Incomplete
|
|
291
|
-
def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool = False, thinking_budget: int =
|
|
282
|
+
def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool = False, thinking_budget: int = ..., simplify_events: bool = False) -> None:
|
|
292
283
|
"""Initializes the AnthropicLmInvoker instance.
|
|
293
284
|
|
|
294
285
|
Args:
|
|
@@ -309,6 +300,9 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
309
300
|
thinking (bool, optional): Whether to enable thinking. Only allowed for thinking models. Defaults to False.
|
|
310
301
|
thinking_budget (int, optional): The tokens allocated for the thinking process. Must be greater than or
|
|
311
302
|
equal to 1024. Only allowed for thinking models. Defaults to DEFAULT_THINKING_BUDGET.
|
|
303
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
304
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
305
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
312
306
|
|
|
313
307
|
Raises:
|
|
314
308
|
ValueError:
|
|
@@ -152,9 +152,9 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
|
|
|
152
152
|
|
|
153
153
|
Retry config examples:
|
|
154
154
|
```python
|
|
155
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
155
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
156
156
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
157
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
157
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
158
158
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
159
159
|
```
|
|
160
160
|
|
|
@@ -191,43 +191,35 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
|
|
|
191
191
|
)
|
|
192
192
|
```
|
|
193
193
|
|
|
194
|
-
When streaming is enabled along with reasoning summary, the reasoning summary token will be streamed with the
|
|
195
|
-
`EventType.DATA` event type.
|
|
196
|
-
|
|
197
194
|
Streaming output example:
|
|
198
195
|
```python
|
|
199
|
-
{"type": "
|
|
200
|
-
{"type": "
|
|
201
|
-
{"type": "
|
|
202
|
-
{"type": "
|
|
196
|
+
{"type": "thinking_start", "value": ""}\', ...}
|
|
197
|
+
{"type": "thinking", "value": "Let me think "}\', ...}
|
|
198
|
+
{"type": "thinking", "value": "about it..."}\', ...}
|
|
199
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
200
|
+
{"type": "response", "value": "Golden retriever ", ...}
|
|
201
|
+
{"type": "response", "value": "is a good dog breed.", ...}
|
|
203
202
|
```
|
|
203
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
204
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
205
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
204
206
|
|
|
205
207
|
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
206
208
|
|
|
207
209
|
Output types:
|
|
208
210
|
The output of the `AzureOpenAILMInvoker` can either be:
|
|
209
|
-
1. `str`:
|
|
210
|
-
2. `LMOutput`: A Pydantic model
|
|
211
|
-
2.1. response (str)
|
|
212
|
-
2.2. tool_calls (list[ToolCall])
|
|
213
|
-
|
|
214
|
-
2.
|
|
215
|
-
|
|
216
|
-
2.
|
|
217
|
-
|
|
218
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
219
|
-
parameter is set to `True`. Defaults to None.
|
|
220
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
221
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
222
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects, if the `reasoning_summary` parameter is provided
|
|
223
|
-
for reasoning models. Defaults to an empty list.
|
|
224
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
225
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
226
|
-
Defaults to an empty list.
|
|
227
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
211
|
+
1. `str`: A text response.
|
|
212
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
213
|
+
2.1. response (str)
|
|
214
|
+
2.2. tool_calls (list[ToolCall])
|
|
215
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
216
|
+
2.4. token_usage (TokenUsage | None)
|
|
217
|
+
2.5. duration (float | None)
|
|
218
|
+
2.6. finish_details (dict[str, Any] | None)
|
|
219
|
+
2.7. reasoning (list[Reasoning])
|
|
228
220
|
'''
|
|
229
221
|
client_kwargs: Incomplete
|
|
230
|
-
def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None) -> None:
|
|
222
|
+
def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, simplify_events: bool = False) -> None:
|
|
231
223
|
"""Initializes a new instance of the AzureOpenAILMInvoker class.
|
|
232
224
|
|
|
233
225
|
Args:
|
|
@@ -251,6 +243,9 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
|
|
|
251
243
|
for non-reasoning models. If None, the model will perform medium reasoning effort. Defaults to None.
|
|
252
244
|
reasoning_summary (ReasoningSummary | None, optional): The reasoning summary level for reasoning models.
|
|
253
245
|
Not allowed for non-reasoning models. If None, no summary will be generated. Defaults to None.
|
|
246
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
247
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
248
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
254
249
|
|
|
255
250
|
Raises:
|
|
256
251
|
ValueError:
|
|
@@ -149,9 +149,9 @@ class BedrockLMInvoker(BaseLMInvoker):
|
|
|
149
149
|
|
|
150
150
|
Retry config examples:
|
|
151
151
|
```python
|
|
152
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
152
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
153
153
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
154
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
154
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
155
155
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
156
156
|
```
|
|
157
157
|
|
|
@@ -162,24 +162,14 @@ class BedrockLMInvoker(BaseLMInvoker):
|
|
|
162
162
|
|
|
163
163
|
Output types:
|
|
164
164
|
The output of the `BedrockLMInvoker` can either be:
|
|
165
|
-
1. `str`:
|
|
166
|
-
2. `LMOutput`: A Pydantic model
|
|
167
|
-
2.1. response (str)
|
|
168
|
-
2.2. tool_calls (list[ToolCall])
|
|
169
|
-
|
|
170
|
-
2.
|
|
171
|
-
|
|
172
|
-
2.
|
|
173
|
-
set to `True`. Defaults to None.
|
|
174
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
175
|
-
parameter is set to `True`. Defaults to None.
|
|
176
|
-
2.6. finish_details (dict[str, Any]): The details about how the generation finished, if the
|
|
177
|
-
`output_analytics` parameter is set to `True`. Defaults to an empty dictionary.
|
|
178
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects. Currently not supported. Defaults to an empty list.
|
|
179
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
180
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
181
|
-
Defaults to an empty list.
|
|
182
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
165
|
+
1. `str`: A text response.
|
|
166
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
167
|
+
2.1. response (str)
|
|
168
|
+
2.2. tool_calls (list[ToolCall])
|
|
169
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
170
|
+
2.4. token_usage (TokenUsage | None)
|
|
171
|
+
2.5. duration (float | None)
|
|
172
|
+
2.6. finish_details (dict[str, Any] | None)
|
|
183
173
|
'''
|
|
184
174
|
session: Incomplete
|
|
185
175
|
client_kwargs: Incomplete
|
|
@@ -3,7 +3,7 @@ from gllm_core.event import EventEmitter as EventEmitter
|
|
|
3
3
|
from gllm_core.schema.tool import Tool as Tool
|
|
4
4
|
from gllm_core.utils.retry import RetryConfig as RetryConfig
|
|
5
5
|
from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
|
|
6
|
-
from gllm_inference.lm_invoker.
|
|
6
|
+
from gllm_inference.lm_invoker.openai_chat_completions_lm_invoker import OpenAIChatCompletionsLMInvoker as OpenAIChatCompletionsLMInvoker
|
|
7
7
|
from gllm_inference.lm_invoker.schema.datasaur import InputType as InputType, Key as Key
|
|
8
8
|
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
|
|
9
9
|
from langchain_core.tools import Tool as LangChainTool
|
|
@@ -11,7 +11,7 @@ from typing import Any
|
|
|
11
11
|
|
|
12
12
|
SUPPORTED_ATTACHMENTS: Incomplete
|
|
13
13
|
|
|
14
|
-
class DatasaurLMInvoker(
|
|
14
|
+
class DatasaurLMInvoker(OpenAIChatCompletionsLMInvoker):
|
|
15
15
|
'''A language model invoker to interact with Datasaur LLM Projects Deployment API.
|
|
16
16
|
|
|
17
17
|
Attributes:
|
|
@@ -72,9 +72,9 @@ class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
|
|
|
72
72
|
|
|
73
73
|
Retry config examples:
|
|
74
74
|
```python
|
|
75
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
75
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
76
76
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
77
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
77
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
78
78
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
79
79
|
```
|
|
80
80
|
|
|
@@ -103,23 +103,13 @@ class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
|
|
|
103
103
|
|
|
104
104
|
Output types:
|
|
105
105
|
The output of the `DatasaurLMInvoker` can either be:
|
|
106
|
-
1. `str`:
|
|
107
|
-
2. `LMOutput`: A Pydantic model
|
|
108
|
-
2.1. response (str)
|
|
109
|
-
2.2.
|
|
110
|
-
2.3.
|
|
111
|
-
|
|
112
|
-
2.
|
|
113
|
-
set to `True`. Defaults to None.
|
|
114
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
115
|
-
parameter is set to `True`. Defaults to None.
|
|
116
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
117
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
118
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects. Currently not supported. Defaults to an empty list.
|
|
119
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
120
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
121
|
-
Defaults to an empty list.
|
|
122
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
106
|
+
1. `str`: A text response.
|
|
107
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
108
|
+
2.1. response (str)
|
|
109
|
+
2.2. token_usage (TokenUsage | None)
|
|
110
|
+
2.3. duration (float | None)
|
|
111
|
+
2.4. finish_details (dict[str, Any] | None)
|
|
112
|
+
2.5. citations (list[Chunk])
|
|
123
113
|
'''
|
|
124
114
|
client_kwargs: Incomplete
|
|
125
115
|
citations: Incomplete
|
|
@@ -14,6 +14,7 @@ from typing import Any
|
|
|
14
14
|
SUPPORTED_ATTACHMENTS: Incomplete
|
|
15
15
|
DEFAULT_THINKING_BUDGET: int
|
|
16
16
|
REQUIRE_THINKING_MODEL_PREFIX: Incomplete
|
|
17
|
+
IMAGE_GENERATION_MODELS: Incomplete
|
|
17
18
|
YOUTUBE_URL_PATTERN: Incomplete
|
|
18
19
|
|
|
19
20
|
class GoogleLMInvoker(BaseLMInvoker):
|
|
@@ -30,6 +31,7 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
30
31
|
structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema dictionary.
|
|
31
32
|
output_analytics (bool): Whether to output the invocation analytics.
|
|
32
33
|
retry_config (RetryConfig | None): The retry configuration for the language model.
|
|
34
|
+
generate_image (bool): Whether to generate image. Only allowed for image generation models.
|
|
33
35
|
thinking (bool): Whether to enable thinking. Only allowed for thinking models.
|
|
34
36
|
thinking_budget (int): The tokens allowed for thinking process. Only allowed for thinking models.
|
|
35
37
|
If set to -1, the model will control the budget automatically.
|
|
@@ -80,6 +82,26 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
80
82
|
result = await lm_invoker.invoke([text, image])
|
|
81
83
|
```
|
|
82
84
|
|
|
85
|
+
Image generation:
|
|
86
|
+
The `GoogleLMInvoker` supports image generation. This can be done by using an image generation model,
|
|
87
|
+
such as `gemini-2.5-flash-image`. Streaming is disabled for image generation models.
|
|
88
|
+
The generated image will be stored in the `attachments` attribute in the output.
|
|
89
|
+
|
|
90
|
+
Usage example:
|
|
91
|
+
```python
|
|
92
|
+
lm_invoker = GoogleLMInvoker("gemini-2.5-flash-image")
|
|
93
|
+
result = await lm_invoker.invoke("Create a picture...")
|
|
94
|
+
result.attachments[0].write_to_file("path/to/local/image.png")
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
Output example:
|
|
98
|
+
```python
|
|
99
|
+
LMOutput(
|
|
100
|
+
response="Let me call the tools...",
|
|
101
|
+
attachments=[Attachment(filename="image.png", mime_type="image/png", data=b"...")],
|
|
102
|
+
)
|
|
103
|
+
```
|
|
104
|
+
|
|
83
105
|
Tool calling:
|
|
84
106
|
Tool calling is a feature that allows the language model to call tools to perform tasks.
|
|
85
107
|
Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
|
|
@@ -182,9 +204,9 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
182
204
|
|
|
183
205
|
Retry config examples:
|
|
184
206
|
```python
|
|
185
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
207
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
186
208
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
187
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
209
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
188
210
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
189
211
|
```
|
|
190
212
|
|
|
@@ -216,17 +238,18 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
216
238
|
)
|
|
217
239
|
```
|
|
218
240
|
|
|
219
|
-
When streaming is enabled, the thinking token will be streamed with the `EventType.DATA` event type.
|
|
220
|
-
|
|
221
241
|
Streaming output example:
|
|
222
242
|
```python
|
|
223
|
-
{"type": "
|
|
224
|
-
{"type": "
|
|
225
|
-
{"type": "
|
|
226
|
-
{"type": "
|
|
243
|
+
{"type": "thinking_start", "value": "", ...}
|
|
244
|
+
{"type": "thinking", "value": "Let me think "\', ...}
|
|
245
|
+
{"type": "thinking", "value": "about it...", ...}
|
|
246
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
227
247
|
{"type": "response", "value": "Golden retriever ", ...}
|
|
228
248
|
{"type": "response", "value": "is a good dog breed.", ...}
|
|
229
249
|
```
|
|
250
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
251
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
252
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
230
253
|
|
|
231
254
|
When thinking is enabled, the amount of tokens allocated for the thinking process can be set via the
|
|
232
255
|
`thinking_budget` parameter. The `thinking_budget`:
|
|
@@ -236,30 +259,22 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
236
259
|
|
|
237
260
|
Output types:
|
|
238
261
|
The output of the `GoogleLMInvoker` can either be:
|
|
239
|
-
1. `str`:
|
|
240
|
-
2. `LMOutput`: A Pydantic model
|
|
241
|
-
2.1. response (str)
|
|
242
|
-
2.2.
|
|
243
|
-
|
|
244
|
-
2.
|
|
245
|
-
|
|
246
|
-
2.
|
|
247
|
-
|
|
248
|
-
2.
|
|
249
|
-
parameter is set to `True`. Defaults to None.
|
|
250
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
251
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
252
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects, if the `thinking` parameter is set to `True`.
|
|
253
|
-
Defaults to an empty list.
|
|
254
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
255
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
256
|
-
Defaults to an empty list.
|
|
257
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
262
|
+
1. `str`: A text response.
|
|
263
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
264
|
+
2.1. response (str)
|
|
265
|
+
2.2. attachments (list[Attachment])
|
|
266
|
+
2.3. tool_calls (list[ToolCall])
|
|
267
|
+
2.4. structured_output (dict[str, Any] | BaseModel | None)
|
|
268
|
+
2.5. token_usage (TokenUsage | None)
|
|
269
|
+
2.6. duration (float | None)
|
|
270
|
+
2.7. finish_details (dict[str, Any])
|
|
271
|
+
2.8. reasoning (list[Reasoning])
|
|
258
272
|
'''
|
|
259
273
|
client_params: Incomplete
|
|
274
|
+
generate_image: Incomplete
|
|
260
275
|
thinking: Incomplete
|
|
261
276
|
thinking_budget: Incomplete
|
|
262
|
-
def __init__(self, model_name: str, api_key: str | None = None, credentials_path: str | None = None, project_id: str | None = None, location: str = 'us-central1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool | None = None, thinking_budget: int =
|
|
277
|
+
def __init__(self, model_name: str, api_key: str | None = None, credentials_path: str | None = None, project_id: str | None = None, location: str = 'us-central1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool | None = None, thinking_budget: int = ..., simplify_events: bool = False) -> None:
|
|
263
278
|
'''Initializes a new instance of the GoogleLMInvoker class.
|
|
264
279
|
|
|
265
280
|
Args:
|
|
@@ -288,6 +303,9 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
288
303
|
Defaults to True for Gemini 2.5 Pro models and False for other models.
|
|
289
304
|
thinking_budget (int, optional): The tokens allowed for thinking process. Only allowed for thinking models.
|
|
290
305
|
Defaults to -1, in which case the model will control the budget automatically.
|
|
306
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
307
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
308
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
291
309
|
|
|
292
310
|
Note:
|
|
293
311
|
If neither `api_key` nor `credentials_path` is provided, Google Gen AI will be used by default.
|
|
@@ -175,9 +175,9 @@ class LangChainLMInvoker(BaseLMInvoker):
|
|
|
175
175
|
|
|
176
176
|
Retry config examples:
|
|
177
177
|
```python
|
|
178
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
178
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
179
179
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
180
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
180
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
181
181
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
182
182
|
```
|
|
183
183
|
|
|
@@ -188,24 +188,14 @@ class LangChainLMInvoker(BaseLMInvoker):
|
|
|
188
188
|
|
|
189
189
|
Output types:
|
|
190
190
|
The output of the `LangChainLMInvoker` can either be:
|
|
191
|
-
1. `str`:
|
|
192
|
-
2. `LMOutput`: A Pydantic model
|
|
193
|
-
2.1. response (str)
|
|
194
|
-
2.2. tool_calls (list[ToolCall])
|
|
195
|
-
|
|
196
|
-
2.
|
|
197
|
-
|
|
198
|
-
2.
|
|
199
|
-
set to `True`. Defaults to None.
|
|
200
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
201
|
-
parameter is set to `True`. Defaults to None.
|
|
202
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
203
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
204
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects. Currently not supported. Defaults to an empty list.
|
|
205
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
206
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
207
|
-
Defaults to an empty list.
|
|
208
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
191
|
+
1. `str`: A text response.
|
|
192
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
193
|
+
2.1. response (str)
|
|
194
|
+
2.2. tool_calls (list[ToolCall])
|
|
195
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
196
|
+
2.4. token_usage (TokenUsage | None)
|
|
197
|
+
2.5. duration (float | None)
|
|
198
|
+
2.6. finish_details (dict[str, Any])
|
|
209
199
|
'''
|
|
210
200
|
model: Incomplete
|
|
211
201
|
def __init__(self, model: BaseChatModel | None = None, model_class_path: str | None = None, model_name: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
|
|
@@ -2,7 +2,7 @@ from _typeshed import Incomplete
|
|
|
2
2
|
from gllm_core.event import EventEmitter as EventEmitter
|
|
3
3
|
from gllm_core.schema.tool import Tool as Tool
|
|
4
4
|
from gllm_core.utils.retry import RetryConfig as RetryConfig
|
|
5
|
-
from gllm_inference.lm_invoker.
|
|
5
|
+
from gllm_inference.lm_invoker.openai_chat_completions_lm_invoker import OpenAIChatCompletionsLMInvoker as OpenAIChatCompletionsLMInvoker
|
|
6
6
|
from gllm_inference.lm_invoker.openai_lm_invoker import ReasoningEffort as ReasoningEffort
|
|
7
7
|
from gllm_inference.schema import AttachmentType as AttachmentType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema
|
|
8
8
|
from langchain_core.tools import Tool as LangChainTool
|
|
@@ -10,7 +10,7 @@ from typing import Any
|
|
|
10
10
|
|
|
11
11
|
SUPPORTED_ATTACHMENTS: Incomplete
|
|
12
12
|
|
|
13
|
-
class LiteLLMLMInvoker(
|
|
13
|
+
class LiteLLMLMInvoker(OpenAIChatCompletionsLMInvoker):
|
|
14
14
|
'''A language model invoker to interact with language models using LiteLLM.
|
|
15
15
|
|
|
16
16
|
Attributes:
|
|
@@ -156,9 +156,9 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
|
|
|
156
156
|
|
|
157
157
|
Retry config examples:
|
|
158
158
|
```python
|
|
159
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
159
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
160
160
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
161
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
161
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
162
162
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
163
163
|
```
|
|
164
164
|
|
|
@@ -192,44 +192,36 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
|
|
|
192
192
|
)
|
|
193
193
|
```
|
|
194
194
|
|
|
195
|
-
When streaming is enabled along with reasoning and the provider supports reasoning output, the reasoning token
|
|
196
|
-
will be streamed with the `EventType.DATA` event type.
|
|
197
|
-
|
|
198
195
|
Streaming output example:
|
|
199
196
|
```python
|
|
200
|
-
{"type": "
|
|
201
|
-
{"type": "
|
|
202
|
-
{"type": "
|
|
203
|
-
{"type": "
|
|
197
|
+
{"type": "thinking_start", "value": ""}\', ...}
|
|
198
|
+
{"type": "thinking", "value": "Let me think "}\', ...}
|
|
199
|
+
{"type": "thinking", "value": "about it..."}\', ...}
|
|
200
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
204
201
|
{"type": "response", "value": "Golden retriever ", ...}
|
|
205
202
|
{"type": "response", "value": "is a good dog breed.", ...}
|
|
203
|
+
```
|
|
204
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
205
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
206
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
206
207
|
|
|
207
208
|
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
208
209
|
|
|
209
210
|
|
|
210
211
|
Output types:
|
|
211
212
|
The output of the `LiteLLMLMInvoker` can either be:
|
|
212
|
-
1. `str`:
|
|
213
|
-
2. `LMOutput`: A Pydantic model
|
|
214
|
-
2.1. response (str)
|
|
215
|
-
2.2. tool_calls (list[ToolCall])
|
|
216
|
-
|
|
217
|
-
2.
|
|
218
|
-
|
|
219
|
-
2.
|
|
220
|
-
|
|
221
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
222
|
-
parameter is set to `True`. Defaults to None.
|
|
223
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
224
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
225
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects. Currently not supported. Defaults to an empty list.
|
|
226
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
227
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
228
|
-
Defaults to an empty list.
|
|
229
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
213
|
+
1. `str`: A text response.
|
|
214
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
215
|
+
2.1. response (str)
|
|
216
|
+
2.2. tool_calls (list[ToolCall])
|
|
217
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
218
|
+
2.4. token_usage (TokenUsage | None)
|
|
219
|
+
2.5. duration (float | None)
|
|
220
|
+
2.6. finish_details (dict[str, Any])
|
|
221
|
+
2.7. reasoning (list[Reasoning])
|
|
230
222
|
'''
|
|
231
223
|
completion: Incomplete
|
|
232
|
-
def __init__(self, model_id: str, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None) -> None:
|
|
224
|
+
def __init__(self, model_id: str, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, simplify_events: bool = False) -> None:
|
|
233
225
|
"""Initializes a new instance of the LiteLLMLMInvoker class.
|
|
234
226
|
|
|
235
227
|
Args:
|
|
@@ -246,4 +238,7 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
|
|
|
246
238
|
Defaults to None, in which case a default config with no retry and 30.0 seconds timeout will be used.
|
|
247
239
|
reasoning_effort (ReasoningEffort | None, optional): The reasoning effort for reasoning models.
|
|
248
240
|
Defaults to None.
|
|
241
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
242
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
243
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
249
244
|
"""
|
|
@@ -56,7 +56,7 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
|
|
|
56
56
|
response_schema: Incomplete
|
|
57
57
|
output_analytics: Incomplete
|
|
58
58
|
retry_config: Incomplete
|
|
59
|
-
def __init__(self, model_id: ModelId, default_hyperparameters: dict[str, Any] | None = None, supported_attachments: set[str] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
|
|
59
|
+
def __init__(self, model_id: ModelId, default_hyperparameters: dict[str, Any] | None = None, supported_attachments: set[str] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, simplify_events: bool = False) -> None:
|
|
60
60
|
"""Initializes a new instance of the BaseLMInvoker class.
|
|
61
61
|
|
|
62
62
|
Args:
|
|
@@ -73,6 +73,9 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
|
|
|
73
73
|
output_analytics (bool, optional): Whether to output the invocation analytics. Defaults to False.
|
|
74
74
|
retry_config (RetryConfig | None, optional): The retry configuration for the language model.
|
|
75
75
|
Defaults to None, in which case a default config with no retry and 30.0 seconds timeout will be used.
|
|
76
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
77
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
78
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
76
79
|
"""
|
|
77
80
|
@property
|
|
78
81
|
def model_id(self) -> str:
|
|
@@ -171,9 +171,9 @@ class OpenAIChatCompletionsLMInvoker(BaseLMInvoker):
|
|
|
171
171
|
|
|
172
172
|
Retry config examples:
|
|
173
173
|
```python
|
|
174
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
174
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
175
175
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
176
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
176
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
177
177
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
178
178
|
```
|
|
179
179
|
|
|
@@ -207,44 +207,35 @@ class OpenAIChatCompletionsLMInvoker(BaseLMInvoker):
|
|
|
207
207
|
)
|
|
208
208
|
```
|
|
209
209
|
|
|
210
|
-
When streaming is enabled along with reasoning and the provider supports reasoning output, the reasoning token
|
|
211
|
-
will be streamed with the `EventType.DATA` event type.
|
|
212
|
-
|
|
213
210
|
Streaming output example:
|
|
214
211
|
```python
|
|
215
|
-
{"type": "
|
|
216
|
-
{"type": "
|
|
217
|
-
{"type": "
|
|
218
|
-
{"type": "
|
|
212
|
+
{"type": "thinking_start", "value": ""}\', ...}
|
|
213
|
+
{"type": "thinking", "value": "Let me think "}\', ...}
|
|
214
|
+
{"type": "thinking", "value": "about it..."}\', ...}
|
|
215
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
219
216
|
{"type": "response", "value": "Golden retriever ", ...}
|
|
220
217
|
{"type": "response", "value": "is a good dog breed.", ...}
|
|
221
218
|
```
|
|
219
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
220
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
221
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
222
222
|
|
|
223
223
|
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
224
224
|
|
|
225
225
|
Output types:
|
|
226
226
|
The output of the `OpenAIChatCompletionsLMInvoker` can either be:
|
|
227
|
-
1. `str`:
|
|
228
|
-
2. `LMOutput`: A Pydantic model
|
|
229
|
-
2.1. response (str)
|
|
230
|
-
2.2. tool_calls (list[ToolCall])
|
|
231
|
-
|
|
232
|
-
2.
|
|
233
|
-
|
|
234
|
-
2.
|
|
235
|
-
|
|
236
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
237
|
-
parameter is set to `True`. Defaults to None.
|
|
238
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
239
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
240
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects. Currently not supported. Defaults to an empty list.
|
|
241
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
242
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
243
|
-
Defaults to an empty list.
|
|
244
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
227
|
+
1. `str`: A text response.
|
|
228
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
229
|
+
2.1. response (str)
|
|
230
|
+
2.2. tool_calls (list[ToolCall])
|
|
231
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
232
|
+
2.4. token_usage (TokenUsage | None)
|
|
233
|
+
2.5. duration (float | None)
|
|
234
|
+
2.6. finish_details (dict[str, Any])
|
|
235
|
+
2.7. reasoning (list[Reasoning])
|
|
245
236
|
'''
|
|
246
237
|
client_kwargs: Incomplete
|
|
247
|
-
def __init__(self, model_name: str, api_key: str | None = None, base_url: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None) -> None:
|
|
238
|
+
def __init__(self, model_name: str, api_key: str | None = None, base_url: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, simplify_events: bool = False) -> None:
|
|
248
239
|
'''Initializes a new instance of the OpenAIChatCompletionsLMInvoker class.
|
|
249
240
|
|
|
250
241
|
Args:
|
|
@@ -266,6 +257,9 @@ class OpenAIChatCompletionsLMInvoker(BaseLMInvoker):
|
|
|
266
257
|
retry_config (RetryConfig | None, optional): The retry configuration for the language model.
|
|
267
258
|
Defaults to None, in which case a default config with no retry and 30.0 seconds timeout will be used.
|
|
268
259
|
reasoning_effort (str | None, optional): The reasoning effort for the language model. Defaults to None.
|
|
260
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
261
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
262
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
269
263
|
'''
|
|
270
264
|
def set_response_schema(self, response_schema: ResponseSchema | None) -> None:
|
|
271
265
|
"""Sets the response schema for the OpenAI language model.
|
|
@@ -25,7 +25,7 @@ class OpenAICompatibleLMInvoker(OpenAIChatCompletionsLMInvoker):
|
|
|
25
25
|
|
|
26
26
|
This class is deprecated and will be removed in v0.6. Please use the `OpenAIChatCompletionsLMInvoker` class instead.
|
|
27
27
|
"""
|
|
28
|
-
def __init__(self, model_name: str, base_url: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None) -> None:
|
|
28
|
+
def __init__(self, model_name: str, base_url: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, simplify_events: bool = False) -> None:
|
|
29
29
|
'''Initializes a new instance of the OpenAICompatibleLMInvoker class.
|
|
30
30
|
|
|
31
31
|
Args:
|
|
@@ -46,4 +46,7 @@ class OpenAICompatibleLMInvoker(OpenAIChatCompletionsLMInvoker):
|
|
|
46
46
|
retry_config (RetryConfig | None, optional): The retry configuration for the language model.
|
|
47
47
|
Defaults to None, in which case a default config with no retry and 30.0 seconds timeout will be used.
|
|
48
48
|
reasoning_effort (str | None, optional): The reasoning effort for the language model. Defaults to None.
|
|
49
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
50
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
51
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
49
52
|
'''
|
|
@@ -176,9 +176,9 @@ class OpenAILMInvoker(BaseLMInvoker):
|
|
|
176
176
|
|
|
177
177
|
Retry config examples:
|
|
178
178
|
```python
|
|
179
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
179
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
180
180
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
181
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
181
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
182
182
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
183
183
|
```
|
|
184
184
|
|
|
@@ -216,18 +216,18 @@ class OpenAILMInvoker(BaseLMInvoker):
|
|
|
216
216
|
)
|
|
217
217
|
```
|
|
218
218
|
|
|
219
|
-
When streaming is enabled along with reasoning summary, the reasoning summary token will be streamed with the
|
|
220
|
-
`EventType.DATA` event type.
|
|
221
|
-
|
|
222
219
|
Streaming output example:
|
|
223
220
|
```python
|
|
224
|
-
{"type": "
|
|
225
|
-
{"type": "
|
|
226
|
-
{"type": "
|
|
227
|
-
{"type": "
|
|
221
|
+
{"type": "thinking_start", "value": ""}\', ...}
|
|
222
|
+
{"type": "thinking", "value": "Let me think "}\', ...}
|
|
223
|
+
{"type": "thinking", "value": "about it..."}\', ...}
|
|
224
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
228
225
|
{"type": "response", "value": "Golden retriever ", ...}
|
|
229
226
|
{"type": "response", "value": "is a good dog breed.", ...}
|
|
230
227
|
```
|
|
228
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
229
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
230
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
231
231
|
|
|
232
232
|
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
233
233
|
|
|
@@ -262,14 +262,16 @@ class OpenAILMInvoker(BaseLMInvoker):
|
|
|
262
262
|
)
|
|
263
263
|
```
|
|
264
264
|
|
|
265
|
-
When streaming is enabled, the MCP call activities will be streamed with the `EventType.DATA` event type.
|
|
266
265
|
Streaming output example:
|
|
267
266
|
```python
|
|
268
|
-
{"type": "
|
|
269
|
-
{"type": "
|
|
267
|
+
{"type": "activity", "value": {"type": "mcp_list_tools", ...}, ...}
|
|
268
|
+
{"type": "activity", "value": {"type": "mcp_call", ...}, ...}
|
|
270
269
|
{"type": "response", "value": "The result ", ...}
|
|
271
270
|
{"type": "response", "value": "is 10.", ...}
|
|
272
271
|
```
|
|
272
|
+
Note: By default, the activity token will be streamed with the legacy `EventType.DATA` event type.
|
|
273
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
274
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
273
275
|
|
|
274
276
|
Code interpreter:
|
|
275
277
|
The code interpreter is a feature that allows the language model to write and run Python code in a
|
|
@@ -287,14 +289,8 @@ class OpenAILMInvoker(BaseLMInvoker):
|
|
|
287
289
|
Messages example:
|
|
288
290
|
```python
|
|
289
291
|
messages = [
|
|
290
|
-
Message(
|
|
291
|
-
|
|
292
|
-
contents=["You are a data analyst. Use the python tool to generate a file."],
|
|
293
|
-
),
|
|
294
|
-
Message(
|
|
295
|
-
role=MessageRole.USER,
|
|
296
|
-
contents=["Show an histogram of the following data: [1, 2, 1, 4, 1, 2, 4, 2, 3, 1]"],
|
|
297
|
-
),
|
|
292
|
+
Message.system("You are a data analyst. Use the python tool to generate a file."]),
|
|
293
|
+
Message.user("Show an histogram of the following data: [1, 2, 1, 4, 1, 2, 4, 2, 3, 1]"),
|
|
298
294
|
]
|
|
299
295
|
```
|
|
300
296
|
|
|
@@ -315,16 +311,18 @@ class OpenAILMInvoker(BaseLMInvoker):
|
|
|
315
311
|
)
|
|
316
312
|
```
|
|
317
313
|
|
|
318
|
-
When streaming is enabled, the executed code will be streamed with the `EventType.DATA` event type.
|
|
319
314
|
Streaming output example:
|
|
320
315
|
```python
|
|
321
|
-
{"type": "
|
|
322
|
-
{"type": "
|
|
323
|
-
{"type": "
|
|
324
|
-
{"type": "
|
|
316
|
+
{"type": "code_start", "value": ""}\', ...}
|
|
317
|
+
{"type": "code", "value": "import matplotlib"}\', ...}
|
|
318
|
+
{"type": "code", "value": ".pyplot as plt..."}\', ...}
|
|
319
|
+
{"type": "code_end", "value": ""}\', ...}
|
|
325
320
|
{"type": "response", "value": "The histogram ", ...}
|
|
326
321
|
{"type": "response", "value": "is attached.", ...}
|
|
327
322
|
```
|
|
323
|
+
Note: By default, the code token will be streamed with the legacy `EventType.DATA` event type.
|
|
324
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
325
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
328
326
|
|
|
329
327
|
Web search:
|
|
330
328
|
The web search is a feature that allows the language model to search the web for relevant information.
|
|
@@ -359,40 +357,33 @@ class OpenAILMInvoker(BaseLMInvoker):
|
|
|
359
357
|
)
|
|
360
358
|
```
|
|
361
359
|
|
|
362
|
-
When streaming is enabled, the web search activities will be streamed with the `EventType.DATA` event type.
|
|
363
360
|
Streaming output example:
|
|
364
361
|
```python
|
|
365
|
-
{"type": "
|
|
362
|
+
{"type": "activity", "value": {"query": "search query"}, ...}
|
|
366
363
|
{"type": "response", "value": "The winner of the match ", ...}
|
|
367
364
|
{"type": "response", "value": "is team A ([Example title](https://www.example.com)).", ...}
|
|
368
365
|
```
|
|
366
|
+
Note: By default, the activity token will be streamed with the legacy `EventType.DATA` event type.
|
|
367
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
368
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
369
369
|
|
|
370
370
|
Output types:
|
|
371
371
|
The output of the `OpenAILMInvoker` can either be:
|
|
372
|
-
1. `str`:
|
|
373
|
-
2. `LMOutput`: A Pydantic model
|
|
374
|
-
2.1. response (str)
|
|
375
|
-
2.2. tool_calls (list[ToolCall])
|
|
376
|
-
|
|
377
|
-
2.
|
|
378
|
-
|
|
379
|
-
2.
|
|
380
|
-
|
|
381
|
-
2.
|
|
382
|
-
|
|
383
|
-
2.
|
|
384
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
385
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects, if the `reasoning_summary` parameter is provided
|
|
386
|
-
for reasoning models. Defaults to an empty list.
|
|
387
|
-
2.8. citations (list[Chunk]): The citations, if the web_search is enabled and the language model decides
|
|
388
|
-
to cite the relevant sources. Defaults to an empty list.
|
|
389
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results, if the code interpreter is
|
|
390
|
-
enabled and the language model decides to execute any codes. Defaults to an empty list.
|
|
391
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls, if the MCP servers are provided and the language model
|
|
392
|
-
decides to invoke MCP tools. Defaults to an empty list.
|
|
372
|
+
1. `str`: A text response.
|
|
373
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
374
|
+
2.1. response (str)
|
|
375
|
+
2.2. tool_calls (list[ToolCall])
|
|
376
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
377
|
+
2.4. token_usage (TokenUsage | None)
|
|
378
|
+
2.5. duration (float | None)
|
|
379
|
+
2.6. finish_details (dict[str, Any])
|
|
380
|
+
2.7. reasoning (list[Reasoning])
|
|
381
|
+
2.8. citations (list[Chunk])
|
|
382
|
+
2.9. code_exec_results (list[CodeExecResult])
|
|
383
|
+
2.10. mcp_calls (list[MCPCall])
|
|
393
384
|
'''
|
|
394
385
|
client_kwargs: Incomplete
|
|
395
|
-
def __init__(self, model_name: str, api_key: str | None = None, base_url: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, mcp_servers: list[MCPServer] | None = None, code_interpreter: bool = False, web_search: bool = False) -> None:
|
|
386
|
+
def __init__(self, model_name: str, api_key: str | None = None, base_url: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, mcp_servers: list[MCPServer] | None = None, code_interpreter: bool = False, web_search: bool = False, simplify_events: bool = False) -> None:
|
|
396
387
|
'''Initializes a new instance of the OpenAILMInvoker class.
|
|
397
388
|
|
|
398
389
|
Args:
|
|
@@ -421,6 +412,9 @@ class OpenAILMInvoker(BaseLMInvoker):
|
|
|
421
412
|
language model. Defaults to None.
|
|
422
413
|
code_interpreter (bool, optional): Whether to enable the code interpreter. Defaults to False.
|
|
423
414
|
web_search (bool, optional): Whether to enable the web search. Defaults to False.
|
|
415
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
416
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
417
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
424
418
|
|
|
425
419
|
Raises:
|
|
426
420
|
ValueError:
|
|
@@ -153,18 +153,18 @@ class XAILMInvoker(BaseLMInvoker):
|
|
|
153
153
|
)
|
|
154
154
|
```
|
|
155
155
|
|
|
156
|
-
When streaming is enabled along with reasoning summary, the reasoning summary token will be streamed with the
|
|
157
|
-
`EventType.DATA` event type.
|
|
158
|
-
|
|
159
156
|
Streaming output example:
|
|
160
157
|
```python
|
|
161
|
-
{"type": "
|
|
162
|
-
{"type": "
|
|
163
|
-
{"type": "
|
|
164
|
-
{"type": "
|
|
158
|
+
{"type": "thinking_start", "value": ""}\', ...}
|
|
159
|
+
{"type": "thinking", "value": "Let me think "}\', ...}
|
|
160
|
+
{"type": "thinking", "value": "about it..."}\', ...}
|
|
161
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
165
162
|
{"type": "response", "value": "Golden retriever ", ...}
|
|
166
163
|
{"type": "response", "value": "is a good dog breed.", ...}
|
|
167
164
|
```
|
|
165
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
166
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
167
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
168
168
|
|
|
169
169
|
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
170
170
|
|
|
@@ -194,9 +194,9 @@ class XAILMInvoker(BaseLMInvoker):
|
|
|
194
194
|
|
|
195
195
|
Retry config examples:
|
|
196
196
|
```python
|
|
197
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
197
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
198
198
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
199
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
199
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
200
200
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
201
201
|
```
|
|
202
202
|
|
|
@@ -218,13 +218,13 @@ class XAILMInvoker(BaseLMInvoker):
|
|
|
218
218
|
```
|
|
219
219
|
|
|
220
220
|
When web search is enabled, the language model will search for relevant information and may cite the
|
|
221
|
-
relevant sources (including from X platform). The citations will be stored as `Chunk` objects in the
|
|
222
|
-
attribute in the output.
|
|
221
|
+
relevant sources (including from X platform). The citations will be stored as `Chunk` objects in the
|
|
222
|
+
`citations` attribute in the output.
|
|
223
223
|
|
|
224
224
|
Output example:
|
|
225
225
|
```python
|
|
226
226
|
LMOutput(
|
|
227
|
-
response="According to recent reports, the latest AI developments
|
|
227
|
+
response="According to recent reports, the latest AI developments... ([Source](https://example.com)).",
|
|
228
228
|
citations=[
|
|
229
229
|
Chunk(
|
|
230
230
|
id="search_result_1",
|
|
@@ -241,42 +241,23 @@ class XAILMInvoker(BaseLMInvoker):
|
|
|
241
241
|
)
|
|
242
242
|
```
|
|
243
243
|
|
|
244
|
-
When streaming is enabled, the live search activities will be streamed with the `EventType.DATA` event type.
|
|
245
|
-
This allows you to track the search process in real-time.
|
|
246
|
-
|
|
247
|
-
Streaming output example:
|
|
248
|
-
```python
|
|
249
|
-
{"type": "data", "value": \'{"data_type": "activity", "data_value": "{\\"query\\": \\"search query\\"}", ...}\', ...}
|
|
250
|
-
{"type": "response", "value": "According to recent reports, ", ...}
|
|
251
|
-
{"type": "response", "value": "the latest AI developments include...", ...}
|
|
252
|
-
```
|
|
253
|
-
|
|
254
244
|
Output types:
|
|
255
245
|
The output of the `XAILMInvoker` can either be:
|
|
256
|
-
1. `str`:
|
|
257
|
-
2. `LMOutput`: A Pydantic model
|
|
258
|
-
2.1. response (str)
|
|
259
|
-
2.2. tool_calls (list[ToolCall])
|
|
260
|
-
|
|
261
|
-
2.
|
|
262
|
-
|
|
263
|
-
2.
|
|
264
|
-
|
|
265
|
-
2.
|
|
266
|
-
parameter is set to `True`. Defaults to None.
|
|
267
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
268
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
269
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects, if the `reasoning_effort` parameter is set.
|
|
270
|
-
Defaults to an empty list.
|
|
271
|
-
2.8. citations (list[Chunk]): The citations, if the web_search is enabled and the language model decides
|
|
272
|
-
to cite the relevant sources. Defaults to an empty list.
|
|
273
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
274
|
-
Defaults to an empty list.
|
|
246
|
+
1. `str`: A text response.
|
|
247
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
248
|
+
2.1. response (str)
|
|
249
|
+
2.2. tool_calls (list[ToolCall])
|
|
250
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
251
|
+
2.4. token_usage (TokenUsage | None)
|
|
252
|
+
2.5. duration (float | None)
|
|
253
|
+
2.6. finish_details (dict[str, Any])
|
|
254
|
+
2.7. reasoning (list[Reasoning])
|
|
255
|
+
2.8. citations (list[Chunk])
|
|
275
256
|
'''
|
|
276
257
|
reasoning_effort: Incomplete
|
|
277
258
|
web_search: Incomplete
|
|
278
259
|
client_params: Incomplete
|
|
279
|
-
def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, web_search: bool = False) -> None:
|
|
260
|
+
def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, web_search: bool = False, simplify_events: bool = False) -> None:
|
|
280
261
|
"""Initializes a new instance of the XAILMInvoker class.
|
|
281
262
|
|
|
282
263
|
Args:
|
|
@@ -298,6 +279,9 @@ class XAILMInvoker(BaseLMInvoker):
|
|
|
298
279
|
reasoning_effort (ReasoningEffort | None, optional): The reasoning effort for reasoning models. Not allowed
|
|
299
280
|
for non-reasoning models. If None, the model will perform medium reasoning effort. Defaults to None.
|
|
300
281
|
web_search (bool, optional): Whether to enable the web search. Defaults to False.
|
|
282
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
283
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
284
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
301
285
|
|
|
302
286
|
Raises:
|
|
303
287
|
ValueError:
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from gllm_core.schema import Chunk as Chunk
|
|
2
|
+
from gllm_inference.schema.attachment import Attachment as Attachment
|
|
2
3
|
from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
|
|
3
4
|
from gllm_inference.schema.mcp import MCPCall as MCPCall
|
|
4
5
|
from gllm_inference.schema.reasoning import Reasoning as Reasoning
|
|
@@ -12,6 +13,8 @@ class LMOutput(BaseModel):
|
|
|
12
13
|
|
|
13
14
|
Attributes:
|
|
14
15
|
response (str): The text response. Defaults to an empty string.
|
|
16
|
+
attachments (list[Attachment]): The attachments, if the language model decides to output attachments.
|
|
17
|
+
Defaults to an empty list.
|
|
15
18
|
tool_calls (list[ToolCall]): The tool calls, if the language model decides to invoke tools.
|
|
16
19
|
Defaults to an empty list.
|
|
17
20
|
structured_output (dict[str, Any] | BaseModel | None): The structured output, if a response schema is defined
|
|
@@ -29,6 +32,7 @@ class LMOutput(BaseModel):
|
|
|
29
32
|
Defaults to an empty list.
|
|
30
33
|
"""
|
|
31
34
|
response: str
|
|
35
|
+
attachments: list[Attachment]
|
|
32
36
|
tool_calls: list[ToolCall]
|
|
33
37
|
structured_output: dict[str, Any] | BaseModel | None
|
|
34
38
|
token_usage: TokenUsage | None
|
|
Binary file
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: gllm-inference-binary
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.40
|
|
4
4
|
Summary: A library containing components related to model inferences in Gen AI applications.
|
|
5
5
|
Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
|
|
6
6
|
Requires-Python: <3.14,>=3.11
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
gllm_inference.cp313-win_amd64.pyd,sha256=
|
|
1
|
+
gllm_inference.cp313-win_amd64.pyd,sha256=1dYQoCaBFcwcg-fqBMMF3NW3vAzdeJv3_ZRy9de9nAs,3557376
|
|
2
2
|
gllm_inference.pyi,sha256=CM8fddhFC2U0VGu9_JWrokO5YDc3B-eXx8pSjLYRlGY,4750
|
|
3
3
|
gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
gllm_inference/constants.pyi,sha256=1OBoHfeWfW9bXH9kStNEH__MGnGp--jLfyheAeQnogY,302
|
|
@@ -12,15 +12,15 @@ gllm_inference/catalog/catalog.pyi,sha256=eWPqgQKi-SJGHabi_XOTEKpAj96OSRypKsb5ZE
|
|
|
12
12
|
gllm_inference/catalog/lm_request_processor_catalog.pyi,sha256=FiveqPDkV58XbDO2znXL-Ix5tFbZwNiVnitlEa90YOY,5536
|
|
13
13
|
gllm_inference/catalog/prompt_builder_catalog.pyi,sha256=iViWB4SaezzjQY4UY1YxeoXUNxqxa2cTJGaD9JSx4Q8,3279
|
|
14
14
|
gllm_inference/em_invoker/__init__.pyi,sha256=pmbsjmsqXwfe4WPykMnrmasKrYuylJWnf2s0pbo0ioM,997
|
|
15
|
-
gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=
|
|
16
|
-
gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=
|
|
15
|
+
gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=TXC5Kgf1eZqK2FHKAyeG3LB1SEsSEStnbk9bI1mjC5k,5049
|
|
16
|
+
gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=kQETh2r-WR_H3APtt4QavmfwGOR3KB4k6USNYvFateY,5831
|
|
17
17
|
gllm_inference/em_invoker/em_invoker.pyi,sha256=YDYJ8TGScsz5Gg-OBnEENN1tI1RYvwoddypxUr6SAWw,5191
|
|
18
|
-
gllm_inference/em_invoker/google_em_invoker.pyi,sha256=
|
|
18
|
+
gllm_inference/em_invoker/google_em_invoker.pyi,sha256=zZYjeLp9ncwIVM4UHqDJSVOFn1eXiaz9Ba24-_fCF2c,6953
|
|
19
19
|
gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=nhX6LynrjhfySEt_44OlLoSBd15hoz3giWyNM9CYLKY,3544
|
|
20
20
|
gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=SbvCbOhdpkq6IyPhGd_IlxD8hbXDZID2rIehY6mJOIs,2923
|
|
21
|
-
gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=
|
|
22
|
-
gllm_inference/em_invoker/twelevelabs_em_invoker.pyi,sha256=
|
|
23
|
-
gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=
|
|
21
|
+
gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=dwZr9rjrjm060HEnyaPR9-jFJpxSi7fWx7i9ZB4aEY4,6313
|
|
22
|
+
gllm_inference/em_invoker/twelevelabs_em_invoker.pyi,sha256=4E-xCtkkiry_tuMiI9jUk6l6iwy6iPQNxaq67AqHvjk,5448
|
|
23
|
+
gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=nlcyjYnd3JvKy8UCGzjfXQLR4UmQIJnRbnNwnDK3xng,5621
|
|
24
24
|
gllm_inference/em_invoker/langchain/__init__.pyi,sha256=aOTlRvS9aG1tBErjsmhe75s4Sq-g2z9ArfGqNW7QyEs,151
|
|
25
25
|
gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi,sha256=BBSDazMOckO9Aw17tC3LGUTPqLb01my1xUZLtKZlwJY,3388
|
|
26
26
|
gllm_inference/em_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -36,18 +36,18 @@ gllm_inference/exceptions/error_parser.pyi,sha256=4aiJZhBzBOqlhdmpvaCvildGy7_Xxl
|
|
|
36
36
|
gllm_inference/exceptions/exceptions.pyi,sha256=6y3ECgHAStqMGgQv8Dv-Ui-5PDD07mSj6qaRZeSWea4,5857
|
|
37
37
|
gllm_inference/exceptions/provider_error_map.pyi,sha256=4AsAgbXAh91mxEW2YiomEuhBoeSNeAIo9WbT9WK8gQk,1233
|
|
38
38
|
gllm_inference/lm_invoker/__init__.pyi,sha256=jG1xc5fTOeIgeKKVYSnsMzQThKk9kTW38yO_MYtv540,1387
|
|
39
|
-
gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=
|
|
40
|
-
gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=
|
|
41
|
-
gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=
|
|
42
|
-
gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=
|
|
43
|
-
gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=
|
|
44
|
-
gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=
|
|
45
|
-
gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=
|
|
46
|
-
gllm_inference/lm_invoker/lm_invoker.pyi,sha256=
|
|
47
|
-
gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi,sha256=
|
|
48
|
-
gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=
|
|
49
|
-
gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=
|
|
50
|
-
gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=
|
|
39
|
+
gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=JSgKUk9d1ZHlitv_ZjHlAk2hIW-J7u6yslVHflIeUro,16726
|
|
40
|
+
gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=FYfRNPG-oD4wIfitjTHnGib1uMZL7Pid0gbrRsymAHU,14601
|
|
41
|
+
gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=dsNxj3ZfHxUplg6nBLgxVGooGYq1QP89gYzCnmRCz3g,11810
|
|
42
|
+
gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=LR0EM4vTfufq9OWk8JVIwLyFeJFTguPNmPgJBUooSq4,8342
|
|
43
|
+
gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=aSmEgoYj_V72Nb6erDResphw9RaHfbE5C6PhqpMfEeQ,17674
|
|
44
|
+
gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=tJIxkFUKjLF-yz0niaDjN3L0QNCbn4sT8hmPKtERpog,12742
|
|
45
|
+
gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=IJxRUkmgXY8oQwS7tJoskO8fiESB7M4pyvpE64pyXDo,12648
|
|
46
|
+
gllm_inference/lm_invoker/lm_invoker.pyi,sha256=vUmMNEl7F__PavQJ42scoYGyWdEvZOw2Bwxhoqv_gKE,8659
|
|
47
|
+
gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi,sha256=uYJFgi4tJGab77232IC1gdoU9h9AqoClIUj6tM6O47s,15177
|
|
48
|
+
gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=T9sShA_9fgEuaaAuT2gJZq_EYNbEhf3IkWwMCwfszY8,4244
|
|
49
|
+
gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=JJ-EEoUZVU147UC0oU11EimWuaEhC9p5lBy-PVW60fM,23419
|
|
50
|
+
gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=gyi12K7M9HkjNX6pU6NVv5Uq3-aHErixO-PVhHjioo8,14632
|
|
51
51
|
gllm_inference/lm_invoker/batch/__init__.pyi,sha256=vJOTHRJ83oq8Bq0UsMdID9_HW5JAxr06gUs4aPRZfEE,130
|
|
52
52
|
gllm_inference/lm_invoker/batch/batch_operations.pyi,sha256=o2U17M41RKVFW6j_oxy-SxU1JqUtVt75pKRxrqXzorE,5499
|
|
53
53
|
gllm_inference/lm_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -103,7 +103,7 @@ gllm_inference/schema/config.pyi,sha256=NVmjQK6HipIE0dKSfx12hgIC0O-S1HEcAc-TWlXA
|
|
|
103
103
|
gllm_inference/schema/enums.pyi,sha256=U30RGvNFcNNJxTZZPt8vK7SFp3W4KSPVFxTZaiF1eLU,1375
|
|
104
104
|
gllm_inference/schema/events.pyi,sha256=ifF75efM1TaEjw4AQmPkoQJUSl8d3Gt9PsBhTwSGsJ4,4020
|
|
105
105
|
gllm_inference/schema/lm_input.pyi,sha256=HxQiZgY7zcXh_Dw8nK8LSeBTZEHMPZVwmPmnfgSsAbs,197
|
|
106
|
-
gllm_inference/schema/lm_output.pyi,sha256=
|
|
106
|
+
gllm_inference/schema/lm_output.pyi,sha256=DIV8BiIOPaSnMKxzKzH_Mp7j7-MScWCvmllegJDLqFg,2479
|
|
107
107
|
gllm_inference/schema/mcp.pyi,sha256=4SgQ83pEowfWm2p-w9lupV4NayqqVBOy7SuYxIFeWRs,1045
|
|
108
108
|
gllm_inference/schema/message.pyi,sha256=jJV6A0ihEcun2OhzyMtNkiHnf7d6v5R-GdpTBGfJ0AQ,2272
|
|
109
109
|
gllm_inference/schema/model_id.pyi,sha256=NuaS4XlKDRJJezj45CEzn8reDDeII9XeRARmM5SZPqA,5408
|
|
@@ -117,7 +117,7 @@ gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXv
|
|
|
117
117
|
gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
|
|
118
118
|
gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
|
|
119
119
|
gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
|
|
120
|
-
gllm_inference_binary-0.5.
|
|
121
|
-
gllm_inference_binary-0.5.
|
|
122
|
-
gllm_inference_binary-0.5.
|
|
123
|
-
gllm_inference_binary-0.5.
|
|
120
|
+
gllm_inference_binary-0.5.40.dist-info/METADATA,sha256=nB6jb13Rpa3SqeBaMsTuF6mTdRMKSkBwzDzuSONeHJc,5770
|
|
121
|
+
gllm_inference_binary-0.5.40.dist-info/WHEEL,sha256=O_u6PJIQ2pIcyIInxVQ9r-yArMuUZbBIaF1kpYVkYxA,96
|
|
122
|
+
gllm_inference_binary-0.5.40.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
|
|
123
|
+
gllm_inference_binary-0.5.40.dist-info/RECORD,,
|
|
File without changes
|
{gllm_inference_binary-0.5.38.dist-info → gllm_inference_binary-0.5.40.dist-info}/top_level.txt
RENAMED
|
File without changes
|