gllm-inference-binary 0.5.38__cp313-cp313-win_amd64.whl → 0.5.41__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gllm-inference-binary might be problematic. Click here for more details.
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/bedrock_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/google_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/openai_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/voyage_em_invoker.pyi +2 -2
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +22 -28
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +24 -29
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +10 -20
- gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +11 -21
- gllm_inference/lm_invoker/google_lm_invoker.pyi +46 -28
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi +10 -20
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi +25 -30
- gllm_inference/lm_invoker/lm_invoker.pyi +4 -1
- gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +22 -28
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +4 -1
- gllm_inference/lm_invoker/openai_lm_invoker.pyi +45 -50
- gllm_inference/lm_invoker/xai_lm_invoker.pyi +26 -42
- gllm_inference/schema/events.pyi +15 -15
- gllm_inference/schema/lm_output.pyi +4 -0
- gllm_inference.cp313-win_amd64.pyd +0 -0
- gllm_inference.pyi +1 -1
- {gllm_inference_binary-0.5.38.dist-info → gllm_inference_binary-0.5.41.dist-info}/METADATA +1 -1
- {gllm_inference_binary-0.5.38.dist-info → gllm_inference_binary-0.5.41.dist-info}/RECORD +26 -26
- {gllm_inference_binary-0.5.38.dist-info → gllm_inference_binary-0.5.41.dist-info}/WHEEL +0 -0
- {gllm_inference_binary-0.5.38.dist-info → gllm_inference_binary-0.5.41.dist-info}/top_level.txt +0 -0
|
@@ -57,9 +57,9 @@ class AzureOpenAIEMInvoker(OpenAIEMInvoker):
|
|
|
57
57
|
|
|
58
58
|
Retry config examples:
|
|
59
59
|
```python
|
|
60
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
60
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
61
61
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
62
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
62
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
63
63
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
64
64
|
```
|
|
65
65
|
|
|
@@ -67,9 +67,9 @@ class BedrockEMInvoker(BaseEMInvoker):
|
|
|
67
67
|
|
|
68
68
|
Retry config examples:
|
|
69
69
|
```python
|
|
70
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
70
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
71
71
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
72
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
72
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
73
73
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
74
74
|
```
|
|
75
75
|
|
|
@@ -89,9 +89,9 @@ class GoogleEMInvoker(BaseEMInvoker):
|
|
|
89
89
|
|
|
90
90
|
Retry config examples:
|
|
91
91
|
```python
|
|
92
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
92
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
93
93
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
94
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
94
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
95
95
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
96
96
|
```
|
|
97
97
|
|
|
@@ -85,9 +85,9 @@ class OpenAIEMInvoker(BaseEMInvoker):
|
|
|
85
85
|
|
|
86
86
|
Retry config examples:
|
|
87
87
|
```python
|
|
88
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
88
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
89
89
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
90
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
90
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
91
91
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
92
92
|
```
|
|
93
93
|
|
|
@@ -71,9 +71,9 @@ class TwelveLabsEMInvoker(BaseEMInvoker):
|
|
|
71
71
|
|
|
72
72
|
Retry config examples:
|
|
73
73
|
```python
|
|
74
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
74
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
75
75
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
76
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
76
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
77
77
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
78
78
|
```
|
|
79
79
|
|
|
@@ -74,9 +74,9 @@ class VoyageEMInvoker(BaseEMInvoker):
|
|
|
74
74
|
|
|
75
75
|
Retry config examples:
|
|
76
76
|
```python
|
|
77
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
77
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
78
78
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
79
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
79
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
80
80
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
81
81
|
```
|
|
82
82
|
|
|
@@ -149,9 +149,9 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
149
149
|
|
|
150
150
|
Retry config examples:
|
|
151
151
|
```python
|
|
152
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
152
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
153
153
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
154
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
154
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
155
155
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
156
156
|
```
|
|
157
157
|
|
|
@@ -188,17 +188,18 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
188
188
|
)
|
|
189
189
|
```
|
|
190
190
|
|
|
191
|
-
When streaming is enabled, the thinking token will be streamed with the `EventType.DATA` event type.
|
|
192
|
-
|
|
193
191
|
Streaming output example:
|
|
194
192
|
```python
|
|
195
|
-
{"type": "
|
|
196
|
-
{"type": "
|
|
197
|
-
{"type": "
|
|
198
|
-
{"type": "
|
|
193
|
+
{"type": "thinking_start", "value": "", ...}
|
|
194
|
+
{"type": "thinking", "value": "Let me think "\', ...}
|
|
195
|
+
{"type": "thinking", "value": "about it..."}\', ...}
|
|
196
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
199
197
|
{"type": "response", "value": "Golden retriever ", ...}
|
|
200
198
|
{"type": "response", "value": "is a good dog breed.", ...}
|
|
201
199
|
```
|
|
200
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
201
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
202
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
202
203
|
|
|
203
204
|
Batch processing:
|
|
204
205
|
The `AnthropicLMInvoker` supports batch processing, which allows the language model to process multiple
|
|
@@ -265,30 +266,20 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
265
266
|
|
|
266
267
|
Output types:
|
|
267
268
|
The output of the `AnthropicLMInvoker` can either be:
|
|
268
|
-
1. `str`:
|
|
269
|
-
2. `LMOutput`: A Pydantic model
|
|
270
|
-
2.1. response (str)
|
|
271
|
-
2.2. tool_calls (list[ToolCall])
|
|
272
|
-
|
|
273
|
-
2.
|
|
274
|
-
|
|
275
|
-
2.
|
|
276
|
-
|
|
277
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
278
|
-
parameter is set to `True`. Defaults to None.
|
|
279
|
-
2.6. finish_details (dict[str, Any]): The details about how the generation finished, if the
|
|
280
|
-
`output_analytics` parameter is set to `True`. Defaults to an empty dictionary.
|
|
281
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects, if the `thinking` parameter is set to `True`.
|
|
282
|
-
Defaults to an empty list.
|
|
283
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
284
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
285
|
-
Defaults to an empty list.
|
|
286
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
269
|
+
1. `str`: A text response.
|
|
270
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
271
|
+
2.1. response (str)
|
|
272
|
+
2.2. tool_calls (list[ToolCall])
|
|
273
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
274
|
+
2.4. token_usage (TokenUsage | None)
|
|
275
|
+
2.5. duration (float | None)
|
|
276
|
+
2.6. finish_details (dict[str, Any])
|
|
277
|
+
2.7. reasoning (list[Reasoning])
|
|
287
278
|
'''
|
|
288
279
|
client: Incomplete
|
|
289
280
|
thinking: Incomplete
|
|
290
281
|
thinking_budget: Incomplete
|
|
291
|
-
def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool = False, thinking_budget: int =
|
|
282
|
+
def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool = False, thinking_budget: int = ..., simplify_events: bool = False) -> None:
|
|
292
283
|
"""Initializes the AnthropicLmInvoker instance.
|
|
293
284
|
|
|
294
285
|
Args:
|
|
@@ -309,6 +300,9 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
309
300
|
thinking (bool, optional): Whether to enable thinking. Only allowed for thinking models. Defaults to False.
|
|
310
301
|
thinking_budget (int, optional): The tokens allocated for the thinking process. Must be greater than or
|
|
311
302
|
equal to 1024. Only allowed for thinking models. Defaults to DEFAULT_THINKING_BUDGET.
|
|
303
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
304
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
305
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
312
306
|
|
|
313
307
|
Raises:
|
|
314
308
|
ValueError:
|
|
@@ -152,9 +152,9 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
|
|
|
152
152
|
|
|
153
153
|
Retry config examples:
|
|
154
154
|
```python
|
|
155
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
155
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
156
156
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
157
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
157
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
158
158
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
159
159
|
```
|
|
160
160
|
|
|
@@ -191,43 +191,35 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
|
|
|
191
191
|
)
|
|
192
192
|
```
|
|
193
193
|
|
|
194
|
-
When streaming is enabled along with reasoning summary, the reasoning summary token will be streamed with the
|
|
195
|
-
`EventType.DATA` event type.
|
|
196
|
-
|
|
197
194
|
Streaming output example:
|
|
198
195
|
```python
|
|
199
|
-
{"type": "
|
|
200
|
-
{"type": "
|
|
201
|
-
{"type": "
|
|
202
|
-
{"type": "
|
|
196
|
+
{"type": "thinking_start", "value": ""}\', ...}
|
|
197
|
+
{"type": "thinking", "value": "Let me think "}\', ...}
|
|
198
|
+
{"type": "thinking", "value": "about it..."}\', ...}
|
|
199
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
200
|
+
{"type": "response", "value": "Golden retriever ", ...}
|
|
201
|
+
{"type": "response", "value": "is a good dog breed.", ...}
|
|
203
202
|
```
|
|
203
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
204
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
205
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
204
206
|
|
|
205
207
|
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
206
208
|
|
|
207
209
|
Output types:
|
|
208
210
|
The output of the `AzureOpenAILMInvoker` can either be:
|
|
209
|
-
1. `str`:
|
|
210
|
-
2. `LMOutput`: A Pydantic model
|
|
211
|
-
2.1. response (str)
|
|
212
|
-
2.2. tool_calls (list[ToolCall])
|
|
213
|
-
|
|
214
|
-
2.
|
|
215
|
-
|
|
216
|
-
2.
|
|
217
|
-
|
|
218
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
219
|
-
parameter is set to `True`. Defaults to None.
|
|
220
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
221
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
222
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects, if the `reasoning_summary` parameter is provided
|
|
223
|
-
for reasoning models. Defaults to an empty list.
|
|
224
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
225
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
226
|
-
Defaults to an empty list.
|
|
227
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
211
|
+
1. `str`: A text response.
|
|
212
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
213
|
+
2.1. response (str)
|
|
214
|
+
2.2. tool_calls (list[ToolCall])
|
|
215
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
216
|
+
2.4. token_usage (TokenUsage | None)
|
|
217
|
+
2.5. duration (float | None)
|
|
218
|
+
2.6. finish_details (dict[str, Any] | None)
|
|
219
|
+
2.7. reasoning (list[Reasoning])
|
|
228
220
|
'''
|
|
229
221
|
client_kwargs: Incomplete
|
|
230
|
-
def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None) -> None:
|
|
222
|
+
def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, simplify_events: bool = False) -> None:
|
|
231
223
|
"""Initializes a new instance of the AzureOpenAILMInvoker class.
|
|
232
224
|
|
|
233
225
|
Args:
|
|
@@ -251,6 +243,9 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
|
|
|
251
243
|
for non-reasoning models. If None, the model will perform medium reasoning effort. Defaults to None.
|
|
252
244
|
reasoning_summary (ReasoningSummary | None, optional): The reasoning summary level for reasoning models.
|
|
253
245
|
Not allowed for non-reasoning models. If None, no summary will be generated. Defaults to None.
|
|
246
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
247
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
248
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
254
249
|
|
|
255
250
|
Raises:
|
|
256
251
|
ValueError:
|
|
@@ -149,9 +149,9 @@ class BedrockLMInvoker(BaseLMInvoker):
|
|
|
149
149
|
|
|
150
150
|
Retry config examples:
|
|
151
151
|
```python
|
|
152
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
152
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
153
153
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
154
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
154
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
155
155
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
156
156
|
```
|
|
157
157
|
|
|
@@ -162,24 +162,14 @@ class BedrockLMInvoker(BaseLMInvoker):
|
|
|
162
162
|
|
|
163
163
|
Output types:
|
|
164
164
|
The output of the `BedrockLMInvoker` can either be:
|
|
165
|
-
1. `str`:
|
|
166
|
-
2. `LMOutput`: A Pydantic model
|
|
167
|
-
2.1. response (str)
|
|
168
|
-
2.2. tool_calls (list[ToolCall])
|
|
169
|
-
|
|
170
|
-
2.
|
|
171
|
-
|
|
172
|
-
2.
|
|
173
|
-
set to `True`. Defaults to None.
|
|
174
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
175
|
-
parameter is set to `True`. Defaults to None.
|
|
176
|
-
2.6. finish_details (dict[str, Any]): The details about how the generation finished, if the
|
|
177
|
-
`output_analytics` parameter is set to `True`. Defaults to an empty dictionary.
|
|
178
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects. Currently not supported. Defaults to an empty list.
|
|
179
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
180
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
181
|
-
Defaults to an empty list.
|
|
182
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
165
|
+
1. `str`: A text response.
|
|
166
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
167
|
+
2.1. response (str)
|
|
168
|
+
2.2. tool_calls (list[ToolCall])
|
|
169
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
170
|
+
2.4. token_usage (TokenUsage | None)
|
|
171
|
+
2.5. duration (float | None)
|
|
172
|
+
2.6. finish_details (dict[str, Any] | None)
|
|
183
173
|
'''
|
|
184
174
|
session: Incomplete
|
|
185
175
|
client_kwargs: Incomplete
|
|
@@ -3,7 +3,7 @@ from gllm_core.event import EventEmitter as EventEmitter
|
|
|
3
3
|
from gllm_core.schema.tool import Tool as Tool
|
|
4
4
|
from gllm_core.utils.retry import RetryConfig as RetryConfig
|
|
5
5
|
from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
|
|
6
|
-
from gllm_inference.lm_invoker.
|
|
6
|
+
from gllm_inference.lm_invoker.openai_chat_completions_lm_invoker import OpenAIChatCompletionsLMInvoker as OpenAIChatCompletionsLMInvoker
|
|
7
7
|
from gllm_inference.lm_invoker.schema.datasaur import InputType as InputType, Key as Key
|
|
8
8
|
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
|
|
9
9
|
from langchain_core.tools import Tool as LangChainTool
|
|
@@ -11,7 +11,7 @@ from typing import Any
|
|
|
11
11
|
|
|
12
12
|
SUPPORTED_ATTACHMENTS: Incomplete
|
|
13
13
|
|
|
14
|
-
class DatasaurLMInvoker(
|
|
14
|
+
class DatasaurLMInvoker(OpenAIChatCompletionsLMInvoker):
|
|
15
15
|
'''A language model invoker to interact with Datasaur LLM Projects Deployment API.
|
|
16
16
|
|
|
17
17
|
Attributes:
|
|
@@ -72,9 +72,9 @@ class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
|
|
|
72
72
|
|
|
73
73
|
Retry config examples:
|
|
74
74
|
```python
|
|
75
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
75
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
76
76
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
77
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
77
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
78
78
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
79
79
|
```
|
|
80
80
|
|
|
@@ -103,23 +103,13 @@ class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
|
|
|
103
103
|
|
|
104
104
|
Output types:
|
|
105
105
|
The output of the `DatasaurLMInvoker` can either be:
|
|
106
|
-
1. `str`:
|
|
107
|
-
2. `LMOutput`: A Pydantic model
|
|
108
|
-
2.1. response (str)
|
|
109
|
-
2.2.
|
|
110
|
-
2.3.
|
|
111
|
-
|
|
112
|
-
2.
|
|
113
|
-
set to `True`. Defaults to None.
|
|
114
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
115
|
-
parameter is set to `True`. Defaults to None.
|
|
116
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
117
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
118
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects. Currently not supported. Defaults to an empty list.
|
|
119
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
120
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
121
|
-
Defaults to an empty list.
|
|
122
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
106
|
+
1. `str`: A text response.
|
|
107
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
108
|
+
2.1. response (str)
|
|
109
|
+
2.2. token_usage (TokenUsage | None)
|
|
110
|
+
2.3. duration (float | None)
|
|
111
|
+
2.4. finish_details (dict[str, Any] | None)
|
|
112
|
+
2.5. citations (list[Chunk])
|
|
123
113
|
'''
|
|
124
114
|
client_kwargs: Incomplete
|
|
125
115
|
citations: Incomplete
|
|
@@ -14,6 +14,7 @@ from typing import Any
|
|
|
14
14
|
SUPPORTED_ATTACHMENTS: Incomplete
|
|
15
15
|
DEFAULT_THINKING_BUDGET: int
|
|
16
16
|
REQUIRE_THINKING_MODEL_PREFIX: Incomplete
|
|
17
|
+
IMAGE_GENERATION_MODELS: Incomplete
|
|
17
18
|
YOUTUBE_URL_PATTERN: Incomplete
|
|
18
19
|
|
|
19
20
|
class GoogleLMInvoker(BaseLMInvoker):
|
|
@@ -30,6 +31,7 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
30
31
|
structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema dictionary.
|
|
31
32
|
output_analytics (bool): Whether to output the invocation analytics.
|
|
32
33
|
retry_config (RetryConfig | None): The retry configuration for the language model.
|
|
34
|
+
generate_image (bool): Whether to generate image. Only allowed for image generation models.
|
|
33
35
|
thinking (bool): Whether to enable thinking. Only allowed for thinking models.
|
|
34
36
|
thinking_budget (int): The tokens allowed for thinking process. Only allowed for thinking models.
|
|
35
37
|
If set to -1, the model will control the budget automatically.
|
|
@@ -80,6 +82,26 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
80
82
|
result = await lm_invoker.invoke([text, image])
|
|
81
83
|
```
|
|
82
84
|
|
|
85
|
+
Image generation:
|
|
86
|
+
The `GoogleLMInvoker` supports image generation. This can be done by using an image generation model,
|
|
87
|
+
such as `gemini-2.5-flash-image`. Streaming is disabled for image generation models.
|
|
88
|
+
The generated image will be stored in the `attachments` attribute in the output.
|
|
89
|
+
|
|
90
|
+
Usage example:
|
|
91
|
+
```python
|
|
92
|
+
lm_invoker = GoogleLMInvoker("gemini-2.5-flash-image")
|
|
93
|
+
result = await lm_invoker.invoke("Create a picture...")
|
|
94
|
+
result.attachments[0].write_to_file("path/to/local/image.png")
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
Output example:
|
|
98
|
+
```python
|
|
99
|
+
LMOutput(
|
|
100
|
+
response="Let me call the tools...",
|
|
101
|
+
attachments=[Attachment(filename="image.png", mime_type="image/png", data=b"...")],
|
|
102
|
+
)
|
|
103
|
+
```
|
|
104
|
+
|
|
83
105
|
Tool calling:
|
|
84
106
|
Tool calling is a feature that allows the language model to call tools to perform tasks.
|
|
85
107
|
Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
|
|
@@ -182,9 +204,9 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
182
204
|
|
|
183
205
|
Retry config examples:
|
|
184
206
|
```python
|
|
185
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
207
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
186
208
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
187
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
209
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
188
210
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
189
211
|
```
|
|
190
212
|
|
|
@@ -216,17 +238,18 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
216
238
|
)
|
|
217
239
|
```
|
|
218
240
|
|
|
219
|
-
When streaming is enabled, the thinking token will be streamed with the `EventType.DATA` event type.
|
|
220
|
-
|
|
221
241
|
Streaming output example:
|
|
222
242
|
```python
|
|
223
|
-
{"type": "
|
|
224
|
-
{"type": "
|
|
225
|
-
{"type": "
|
|
226
|
-
{"type": "
|
|
243
|
+
{"type": "thinking_start", "value": "", ...}
|
|
244
|
+
{"type": "thinking", "value": "Let me think "\', ...}
|
|
245
|
+
{"type": "thinking", "value": "about it...", ...}
|
|
246
|
+
{"type": "thinking_end", "value": ""}\', ...}
|
|
227
247
|
{"type": "response", "value": "Golden retriever ", ...}
|
|
228
248
|
{"type": "response", "value": "is a good dog breed.", ...}
|
|
229
249
|
```
|
|
250
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
251
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
252
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
230
253
|
|
|
231
254
|
When thinking is enabled, the amount of tokens allocated for the thinking process can be set via the
|
|
232
255
|
`thinking_budget` parameter. The `thinking_budget`:
|
|
@@ -236,30 +259,22 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
236
259
|
|
|
237
260
|
Output types:
|
|
238
261
|
The output of the `GoogleLMInvoker` can either be:
|
|
239
|
-
1. `str`:
|
|
240
|
-
2. `LMOutput`: A Pydantic model
|
|
241
|
-
2.1. response (str)
|
|
242
|
-
2.2.
|
|
243
|
-
|
|
244
|
-
2.
|
|
245
|
-
|
|
246
|
-
2.
|
|
247
|
-
|
|
248
|
-
2.
|
|
249
|
-
parameter is set to `True`. Defaults to None.
|
|
250
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
251
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
252
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects, if the `thinking` parameter is set to `True`.
|
|
253
|
-
Defaults to an empty list.
|
|
254
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
255
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
256
|
-
Defaults to an empty list.
|
|
257
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
262
|
+
1. `str`: A text response.
|
|
263
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
264
|
+
2.1. response (str)
|
|
265
|
+
2.2. attachments (list[Attachment])
|
|
266
|
+
2.3. tool_calls (list[ToolCall])
|
|
267
|
+
2.4. structured_output (dict[str, Any] | BaseModel | None)
|
|
268
|
+
2.5. token_usage (TokenUsage | None)
|
|
269
|
+
2.6. duration (float | None)
|
|
270
|
+
2.7. finish_details (dict[str, Any])
|
|
271
|
+
2.8. reasoning (list[Reasoning])
|
|
258
272
|
'''
|
|
259
273
|
client_params: Incomplete
|
|
274
|
+
generate_image: Incomplete
|
|
260
275
|
thinking: Incomplete
|
|
261
276
|
thinking_budget: Incomplete
|
|
262
|
-
def __init__(self, model_name: str, api_key: str | None = None, credentials_path: str | None = None, project_id: str | None = None, location: str = 'us-central1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool | None = None, thinking_budget: int =
|
|
277
|
+
def __init__(self, model_name: str, api_key: str | None = None, credentials_path: str | None = None, project_id: str | None = None, location: str = 'us-central1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool | None = None, thinking_budget: int = ..., simplify_events: bool = False) -> None:
|
|
263
278
|
'''Initializes a new instance of the GoogleLMInvoker class.
|
|
264
279
|
|
|
265
280
|
Args:
|
|
@@ -288,6 +303,9 @@ class GoogleLMInvoker(BaseLMInvoker):
|
|
|
288
303
|
Defaults to True for Gemini 2.5 Pro models and False for other models.
|
|
289
304
|
thinking_budget (int, optional): The tokens allowed for thinking process. Only allowed for thinking models.
|
|
290
305
|
Defaults to -1, in which case the model will control the budget automatically.
|
|
306
|
+
simplify_events (bool, optional): Temporary parameter to control the streamed events format.
|
|
307
|
+
When True, uses the simplified events format. When False, uses the legacy events format for
|
|
308
|
+
backward compatibility. Will be removed in v0.6. Defaults to False.
|
|
291
309
|
|
|
292
310
|
Note:
|
|
293
311
|
If neither `api_key` nor `credentials_path` is provided, Google Gen AI will be used by default.
|
|
@@ -175,9 +175,9 @@ class LangChainLMInvoker(BaseLMInvoker):
|
|
|
175
175
|
|
|
176
176
|
Retry config examples:
|
|
177
177
|
```python
|
|
178
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
178
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
179
179
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
180
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
180
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
181
181
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
182
182
|
```
|
|
183
183
|
|
|
@@ -188,24 +188,14 @@ class LangChainLMInvoker(BaseLMInvoker):
|
|
|
188
188
|
|
|
189
189
|
Output types:
|
|
190
190
|
The output of the `LangChainLMInvoker` can either be:
|
|
191
|
-
1. `str`:
|
|
192
|
-
2. `LMOutput`: A Pydantic model
|
|
193
|
-
2.1. response (str)
|
|
194
|
-
2.2. tool_calls (list[ToolCall])
|
|
195
|
-
|
|
196
|
-
2.
|
|
197
|
-
|
|
198
|
-
2.
|
|
199
|
-
set to `True`. Defaults to None.
|
|
200
|
-
2.5. duration (float | None): The duration of the invocation in seconds, if the `output_analytics`
|
|
201
|
-
parameter is set to `True`. Defaults to None.
|
|
202
|
-
2.6. finish_details (dict[str, Any] | None): The details about how the generation finished, if the
|
|
203
|
-
`output_analytics` parameter is set to `True`. Defaults to None.
|
|
204
|
-
2.7. reasoning (list[Reasoning]): The reasoning objects. Currently not supported. Defaults to an empty list.
|
|
205
|
-
2.8. citations (list[Chunk]): The citations. Currently not supported. Defaults to an empty list.
|
|
206
|
-
2.9. code_exec_results (list[CodeExecResult]): The code execution results. Currently not supported.
|
|
207
|
-
Defaults to an empty list.
|
|
208
|
-
2.10. mcp_calls (list[MCPCall]): The MCP calls. Currently not supported. Defaults to an empty list.
|
|
191
|
+
1. `str`: A text response.
|
|
192
|
+
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
193
|
+
2.1. response (str)
|
|
194
|
+
2.2. tool_calls (list[ToolCall])
|
|
195
|
+
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
196
|
+
2.4. token_usage (TokenUsage | None)
|
|
197
|
+
2.5. duration (float | None)
|
|
198
|
+
2.6. finish_details (dict[str, Any])
|
|
209
199
|
'''
|
|
210
200
|
model: Incomplete
|
|
211
201
|
def __init__(self, model: BaseChatModel | None = None, model_class_path: str | None = None, model_name: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
|