gllm-inference-binary 0.5.40__cp311-cp311-win_amd64.whl → 0.5.66__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gllm_inference/builder/_build_invoker.pyi +28 -0
- gllm_inference/builder/build_em_invoker.pyi +12 -16
- gllm_inference/builder/build_lm_invoker.pyi +65 -17
- gllm_inference/constants.pyi +3 -2
- gllm_inference/em_invoker/__init__.pyi +3 -1
- gllm_inference/em_invoker/bedrock_em_invoker.pyi +16 -4
- gllm_inference/em_invoker/cohere_em_invoker.pyi +127 -0
- gllm_inference/em_invoker/jina_em_invoker.pyi +103 -0
- gllm_inference/em_invoker/schema/bedrock.pyi +7 -0
- gllm_inference/em_invoker/schema/cohere.pyi +20 -0
- gllm_inference/em_invoker/schema/jina.pyi +29 -0
- gllm_inference/exceptions/provider_error_map.pyi +1 -0
- gllm_inference/lm_invoker/__init__.pyi +3 -1
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +95 -109
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +92 -109
- gllm_inference/lm_invoker/batch/batch_operations.pyi +2 -1
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +52 -65
- gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +36 -36
- gllm_inference/lm_invoker/google_lm_invoker.pyi +195 -110
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi +52 -64
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi +86 -106
- gllm_inference/lm_invoker/lm_invoker.pyi +20 -1
- gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +87 -107
- gllm_inference/lm_invoker/openai_lm_invoker.pyi +237 -186
- gllm_inference/lm_invoker/portkey_lm_invoker.pyi +296 -0
- gllm_inference/lm_invoker/schema/google.pyi +12 -0
- gllm_inference/lm_invoker/schema/openai.pyi +22 -0
- gllm_inference/lm_invoker/schema/portkey.pyi +31 -0
- gllm_inference/lm_invoker/sea_lion_lm_invoker.pyi +48 -0
- gllm_inference/lm_invoker/xai_lm_invoker.pyi +94 -131
- gllm_inference/model/__init__.pyi +5 -1
- gllm_inference/model/em/cohere_em.pyi +17 -0
- gllm_inference/model/em/jina_em.pyi +22 -0
- gllm_inference/model/lm/anthropic_lm.pyi +2 -0
- gllm_inference/model/lm/google_lm.pyi +1 -0
- gllm_inference/model/lm/sea_lion_lm.pyi +16 -0
- gllm_inference/model/lm/xai_lm.pyi +19 -0
- gllm_inference/prompt_builder/format_strategy/__init__.pyi +4 -0
- gllm_inference/prompt_builder/format_strategy/format_strategy.pyi +55 -0
- gllm_inference/prompt_builder/format_strategy/jinja_format_strategy.pyi +45 -0
- gllm_inference/prompt_builder/format_strategy/string_format_strategy.pyi +20 -0
- gllm_inference/prompt_builder/prompt_builder.pyi +23 -6
- gllm_inference/schema/__init__.pyi +4 -3
- gllm_inference/schema/activity.pyi +13 -11
- gllm_inference/schema/attachment.pyi +20 -6
- gllm_inference/schema/enums.pyi +30 -1
- gllm_inference/schema/events.pyi +69 -73
- gllm_inference/schema/formatter.pyi +31 -0
- gllm_inference/schema/lm_output.pyi +245 -23
- gllm_inference/schema/model_id.pyi +27 -3
- gllm_inference/utils/validation.pyi +3 -0
- gllm_inference.cp311-win_amd64.pyd +0 -0
- gllm_inference.pyi +23 -13
- {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/METADATA +10 -6
- {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/RECORD +57 -40
- {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/WHEEL +0 -0
- {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/top_level.txt +0 -0
|
@@ -57,80 +57,116 @@ class LiteLLMLMInvoker(OpenAIChatCompletionsLMInvoker):
|
|
|
57
57
|
result = await lm_invoker.invoke([text, image])
|
|
58
58
|
```
|
|
59
59
|
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
`tool_calls` attribute in the output.
|
|
65
|
-
|
|
66
|
-
Usage example:
|
|
67
|
-
```python
|
|
68
|
-
lm_invoker = LiteLLMLMInvoker(..., tools=[tool_1, tool_2])
|
|
69
|
-
```
|
|
60
|
+
Text output:
|
|
61
|
+
The `LiteLLMLMInvoker` generates text outputs by default.
|
|
62
|
+
Text outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
|
|
63
|
+
via the `texts` (all text outputs) or `text` (first text output) properties.
|
|
70
64
|
|
|
71
65
|
Output example:
|
|
72
66
|
```python
|
|
73
|
-
LMOutput(
|
|
74
|
-
response="Let me call the tools...",
|
|
75
|
-
tool_calls=[
|
|
76
|
-
ToolCall(id="123", name="tool_1", args={"key": "value"}),
|
|
77
|
-
ToolCall(id="456", name="tool_2", args={"key": "value"}),
|
|
78
|
-
]
|
|
79
|
-
)
|
|
67
|
+
LMOutput(outputs=[LMOutputItem(type="text", output="Hello, there!")])
|
|
80
68
|
```
|
|
81
69
|
|
|
82
70
|
Structured output:
|
|
83
|
-
|
|
71
|
+
The `LiteLLMLMInvoker` can be configured to generate structured outputs.
|
|
84
72
|
This feature can be enabled by providing a schema to the `response_schema` parameter.
|
|
85
73
|
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
For this reason, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
|
|
74
|
+
Structured outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
|
|
75
|
+
via the `structureds` (all structured outputs) or `structured` (first structured output) properties.
|
|
89
76
|
|
|
90
|
-
The
|
|
91
|
-
|
|
77
|
+
The schema must either be one of the following:
|
|
78
|
+
1. A Pydantic BaseModel class
|
|
79
|
+
The structured output will be a Pydantic model.
|
|
80
|
+
2. A JSON schema dictionary
|
|
81
|
+
JSON dictionary schema must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
|
|
82
|
+
Thus, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
|
|
83
|
+
The structured output will be a dictionary.
|
|
92
84
|
|
|
93
|
-
When enabled, the structured output is stored in the `structured_output` attribute in the output.
|
|
94
|
-
1. If the schema is a JSON schema dictionary, the structured output is a dictionary.
|
|
95
|
-
2. If the schema is a Pydantic BaseModel class, the structured output is a Pydantic model.
|
|
96
|
-
|
|
97
|
-
# Example 1: Using a JSON schema dictionary
|
|
98
85
|
Usage example:
|
|
99
86
|
```python
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
"type": "object",
|
|
109
|
-
}
|
|
110
|
-
lm_invoker = LiteLLMLMInvoker(..., response_schema=schema)
|
|
87
|
+
class Animal(BaseModel):
|
|
88
|
+
name: str
|
|
89
|
+
color: str
|
|
90
|
+
|
|
91
|
+
json_schema = Animal.model_json_schema()
|
|
92
|
+
|
|
93
|
+
lm_invoker = LiteLLMLMInvoker(..., response_schema=Animal) # Using Pydantic BaseModel class
|
|
94
|
+
lm_invoker = LiteLLMLMInvoker(..., response_schema=json_schema) # Using JSON schema dictionary
|
|
111
95
|
```
|
|
96
|
+
|
|
112
97
|
Output example:
|
|
113
98
|
```python
|
|
114
|
-
|
|
99
|
+
# Using Pydantic BaseModel class outputs a Pydantic model
|
|
100
|
+
LMOutput(outputs=[LMOutputItem(type="structured", output=Animal(name="dog", color="white"))])
|
|
101
|
+
|
|
102
|
+
# Using JSON schema dictionary outputs a dictionary
|
|
103
|
+
LMOutput(outputs=[LMOutputItem(type="structured", output={"name": "dog", "color": "white"})])
|
|
115
104
|
```
|
|
116
105
|
|
|
117
|
-
|
|
106
|
+
When structured output is enabled, streaming is disabled.
|
|
107
|
+
|
|
108
|
+
Tool calling:
|
|
109
|
+
The `LiteLLMLMInvoker` can be configured to call tools to perform certain tasks.
|
|
110
|
+
This feature can be enabled by providing a list of `Tool` objects to the `tools` parameter.
|
|
111
|
+
|
|
112
|
+
Tool calls outputs are stored in the `outputs` attribute of the `LMOutput` object and
|
|
113
|
+
can be accessed via the `tool_calls` property.
|
|
114
|
+
|
|
118
115
|
Usage example:
|
|
119
116
|
```python
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
color: str
|
|
117
|
+
lm_invoker = LiteLLMLMInvoker(..., tools=[tool_1, tool_2])
|
|
118
|
+
```
|
|
123
119
|
|
|
124
|
-
|
|
120
|
+
Output example:
|
|
121
|
+
```python
|
|
122
|
+
LMOutput(
|
|
123
|
+
outputs=[
|
|
124
|
+
LMOutputItem(type="text", output="I\'m using tools..."),
|
|
125
|
+
LMOutputItem(type="tool_call", output=ToolCall(id="123", name="tool_1", args={"key": "value"})),
|
|
126
|
+
LMOutputItem(type="tool_call", output=ToolCall(id="456", name="tool_2", args={"key": "value"})),
|
|
127
|
+
]
|
|
128
|
+
)
|
|
125
129
|
```
|
|
130
|
+
|
|
131
|
+
Reasoning:
|
|
132
|
+
The `LiteLLMLMInvoker` performs step-by-step reasoning before generating a response when reasoning
|
|
133
|
+
models are used, such as GPT-5 models and o-series models.
|
|
134
|
+
|
|
135
|
+
The reasoning effort can be set via the `reasoning_effort` parameter, which guides the models on the amount
|
|
136
|
+
of reasoning tokens to generate. Available options include `minimal`, `low`, `medium`, and `high`.
|
|
137
|
+
|
|
138
|
+
Some models may also output the reasoning tokens. In this case, the reasoning tokens are stored in
|
|
139
|
+
the `outputs` attribute of the `LMOutput` object and can be accessed via the `thinkings` property.
|
|
140
|
+
|
|
126
141
|
Output example:
|
|
127
142
|
```python
|
|
128
|
-
LMOutput(
|
|
143
|
+
LMOutput(
|
|
144
|
+
outputs=[
|
|
145
|
+
LMOutputItem(type="thinking", output=Reasoning(reasoning="I\'m thinking...", ...)),
|
|
146
|
+
LMOutputItem(type="text", output="Golden retriever is a good dog breed."),
|
|
147
|
+
]
|
|
148
|
+
)
|
|
129
149
|
```
|
|
130
150
|
|
|
151
|
+
Streaming output example:
|
|
152
|
+
```python
|
|
153
|
+
{"type": "thinking_start", "value": "", ...}
|
|
154
|
+
{"type": "thinking", "value": "I\'m ", ...}
|
|
155
|
+
{"type": "thinking", "value": "thinking...", ...}
|
|
156
|
+
{"type": "thinking_end", "value": "", ...}
|
|
157
|
+
{"type": "response", "value": "Golden retriever ", ...}
|
|
158
|
+
{"type": "response", "value": "is a good dog breed.", ...}
|
|
159
|
+
```
|
|
160
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
161
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
162
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
163
|
+
|
|
164
|
+
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
165
|
+
|
|
131
166
|
Analytics tracking:
|
|
132
|
-
|
|
167
|
+
The `LiteLLMLMInvoker` can be configured to output additional information about the invocation.
|
|
133
168
|
This feature can be enabled by setting the `output_analytics` parameter to `True`.
|
|
169
|
+
|
|
134
170
|
When enabled, the following attributes will be stored in the output:
|
|
135
171
|
1. `token_usage`: The token usage.
|
|
136
172
|
2. `duration`: The duration in seconds.
|
|
@@ -139,15 +175,14 @@ class LiteLLMLMInvoker(OpenAIChatCompletionsLMInvoker):
|
|
|
139
175
|
Output example:
|
|
140
176
|
```python
|
|
141
177
|
LMOutput(
|
|
142
|
-
|
|
178
|
+
outputs=[...],
|
|
143
179
|
token_usage=TokenUsage(input_tokens=100, output_tokens=50),
|
|
144
180
|
duration=0.729,
|
|
145
|
-
finish_details={"
|
|
181
|
+
finish_details={"stop_reason": "end_turn"},
|
|
146
182
|
)
|
|
147
183
|
```
|
|
148
184
|
|
|
149
|
-
When streaming is enabled, token usage is not supported.
|
|
150
|
-
regardless of the value of the `output_analytics` parameter.
|
|
185
|
+
When streaming is enabled, token usage is not supported.
|
|
151
186
|
|
|
152
187
|
Retry and timeout:
|
|
153
188
|
The `LiteLLMLMInvoker` supports retry and timeout configuration.
|
|
@@ -157,8 +192,6 @@ class LiteLLMLMInvoker(OpenAIChatCompletionsLMInvoker):
|
|
|
157
192
|
Retry config examples:
|
|
158
193
|
```python
|
|
159
194
|
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
160
|
-
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
161
|
-
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
162
195
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
163
196
|
```
|
|
164
197
|
|
|
@@ -166,59 +199,6 @@ class LiteLLMLMInvoker(OpenAIChatCompletionsLMInvoker):
|
|
|
166
199
|
```python
|
|
167
200
|
lm_invoker = LiteLLMLMInvoker(..., retry_config=retry_config)
|
|
168
201
|
```
|
|
169
|
-
|
|
170
|
-
Reasoning:
|
|
171
|
-
Some language models support advanced reasoning capabilities. When using such reasoning-capable models,
|
|
172
|
-
you can configure how much reasoning the model should perform before generating a final response by setting
|
|
173
|
-
reasoning-related parameters.
|
|
174
|
-
|
|
175
|
-
The reasoning effort of reasoning models can be set via the `reasoning_effort` parameter. This parameter
|
|
176
|
-
will guide the models on how many reasoning tokens it should generate before creating a response to the prompt.
|
|
177
|
-
The reasoning effort is only supported by some language models.
|
|
178
|
-
Available options include:
|
|
179
|
-
1. "low": Favors speed and economical token usage.
|
|
180
|
-
2. "medium": Favors a balance between speed and reasoning accuracy.
|
|
181
|
-
3. "high": Favors more complete reasoning at the cost of more tokens generated and slower responses.
|
|
182
|
-
This may differ between models. When not set, the reasoning effort will be equivalent to None by default.
|
|
183
|
-
|
|
184
|
-
When using reasoning models, some providers might output the reasoning summary. These will be stored in the
|
|
185
|
-
`reasoning` attribute in the output.
|
|
186
|
-
|
|
187
|
-
Output example:
|
|
188
|
-
```python
|
|
189
|
-
LMOutput(
|
|
190
|
-
response="Golden retriever is a good dog breed.",
|
|
191
|
-
reasoning=[Reasoning(id="", reasoning="Let me think about it...")],
|
|
192
|
-
)
|
|
193
|
-
```
|
|
194
|
-
|
|
195
|
-
Streaming output example:
|
|
196
|
-
```python
|
|
197
|
-
{"type": "thinking_start", "value": ""}\', ...}
|
|
198
|
-
{"type": "thinking", "value": "Let me think "}\', ...}
|
|
199
|
-
{"type": "thinking", "value": "about it..."}\', ...}
|
|
200
|
-
{"type": "thinking_end", "value": ""}\', ...}
|
|
201
|
-
{"type": "response", "value": "Golden retriever ", ...}
|
|
202
|
-
{"type": "response", "value": "is a good dog breed.", ...}
|
|
203
|
-
```
|
|
204
|
-
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
205
|
-
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
206
|
-
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
207
|
-
|
|
208
|
-
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
Output types:
|
|
212
|
-
The output of the `LiteLLMLMInvoker` can either be:
|
|
213
|
-
1. `str`: A text response.
|
|
214
|
-
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
215
|
-
2.1. response (str)
|
|
216
|
-
2.2. tool_calls (list[ToolCall])
|
|
217
|
-
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
218
|
-
2.4. token_usage (TokenUsage | None)
|
|
219
|
-
2.5. duration (float | None)
|
|
220
|
-
2.6. finish_details (dict[str, Any])
|
|
221
|
-
2.7. reasoning (list[Reasoning])
|
|
222
202
|
'''
|
|
223
203
|
completion: Incomplete
|
|
224
204
|
def __init__(self, model_id: str, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, simplify_events: bool = False) -> None:
|
|
@@ -7,7 +7,7 @@ from gllm_core.utils import RetryConfig
|
|
|
7
7
|
from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT
|
|
8
8
|
from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_to_base_invoker_error as convert_to_base_invoker_error
|
|
9
9
|
from gllm_inference.lm_invoker.batch import BatchOperations as BatchOperations
|
|
10
|
-
from gllm_inference.schema import
|
|
10
|
+
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, LMInput as LMInput, LMOutput as LMOutput, Message as Message, MessageContent as MessageContent, MessageRole as MessageRole, ModelId as ModelId, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
|
|
11
11
|
from langchain_core.tools import Tool as LangChainTool
|
|
12
12
|
from typing import Any
|
|
13
13
|
|
|
@@ -21,6 +21,7 @@ class Key:
|
|
|
21
21
|
DATA_TYPE: str
|
|
22
22
|
DATA_VALUE: str
|
|
23
23
|
DEFAULT: str
|
|
24
|
+
DEFS: str
|
|
24
25
|
DESCRIPTION: str
|
|
25
26
|
FUNC: str
|
|
26
27
|
ID: str
|
|
@@ -132,6 +133,24 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
|
|
|
132
133
|
This method clears the response schema for the language model by calling the `set_response_schema` method with
|
|
133
134
|
None.
|
|
134
135
|
"""
|
|
136
|
+
async def count_input_tokens(self, messages: LMInput) -> int:
|
|
137
|
+
"""Counts the input tokens for an invocation request inputs.
|
|
138
|
+
|
|
139
|
+
This method counts the input tokens for an invocation request inputs. This method is useful for:
|
|
140
|
+
1. Estimating the cost of an invocation request before invoking the language model.
|
|
141
|
+
2. Checking if the invocation request is too large to be processed by the language model.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
messages (LMInput): The input messages for the language model.
|
|
145
|
+
1. If a list of Message objects is provided, it is used as is.
|
|
146
|
+
2. If a list of MessageContent or a string is provided, it is converted into a user message.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
int: The number of input tokens for the invocation request.
|
|
150
|
+
|
|
151
|
+
Raises:
|
|
152
|
+
TimeoutError: If the invocation times out.
|
|
153
|
+
"""
|
|
135
154
|
async def invoke(self, messages: LMInput, hyperparameters: dict[str, Any] | None = None, event_emitter: EventEmitter | None = None) -> str | LMOutput:
|
|
136
155
|
"""Invokes the language model.
|
|
137
156
|
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
from _typeshed import Incomplete
|
|
2
2
|
from gllm_core.event import EventEmitter as EventEmitter
|
|
3
3
|
from gllm_core.schema.tool import Tool as Tool
|
|
4
|
-
from gllm_core.utils
|
|
4
|
+
from gllm_core.utils import RetryConfig as RetryConfig
|
|
5
5
|
from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES, OPENAI_DEFAULT_URL as OPENAI_DEFAULT_URL
|
|
6
6
|
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
7
7
|
from gllm_inference.lm_invoker.schema.openai_chat_completions import InputType as InputType, Key as Key, ReasoningEffort as ReasoningEffort
|
|
8
8
|
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ThinkingEvent as ThinkingEvent, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
|
|
9
|
-
from gllm_inference.utils import validate_string_enum as validate_string_enum
|
|
10
9
|
from langchain_core.tools import Tool as LangChainTool
|
|
11
10
|
from typing import Any
|
|
12
11
|
|
|
@@ -72,80 +71,116 @@ class OpenAIChatCompletionsLMInvoker(BaseLMInvoker):
|
|
|
72
71
|
result = await lm_invoker.invoke([text, image])
|
|
73
72
|
```
|
|
74
73
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
`tool_calls` attribute in the output.
|
|
80
|
-
|
|
81
|
-
Usage example:
|
|
82
|
-
```python
|
|
83
|
-
lm_invoker = OpenAIChatCompletionsLMInvoker(..., tools=[tool_1, tool_2])
|
|
84
|
-
```
|
|
74
|
+
Text output:
|
|
75
|
+
The `OpenAIChatCompletionsLMInvoker` generates text outputs by default.
|
|
76
|
+
Text outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
|
|
77
|
+
via the `texts` (all text outputs) or `text` (first text output) properties.
|
|
85
78
|
|
|
86
79
|
Output example:
|
|
87
80
|
```python
|
|
88
|
-
LMOutput(
|
|
89
|
-
response="Let me call the tools...",
|
|
90
|
-
tool_calls=[
|
|
91
|
-
ToolCall(id="123", name="tool_1", args={"key": "value"}),
|
|
92
|
-
ToolCall(id="456", name="tool_2", args={"key": "value"}),
|
|
93
|
-
]
|
|
94
|
-
)
|
|
81
|
+
LMOutput(outputs=[LMOutputItem(type="text", output="Hello, there!")])
|
|
95
82
|
```
|
|
96
83
|
|
|
97
84
|
Structured output:
|
|
98
|
-
|
|
85
|
+
The `OpenAIChatCompletionsLMInvoker` can be configured to generate structured outputs.
|
|
99
86
|
This feature can be enabled by providing a schema to the `response_schema` parameter.
|
|
100
87
|
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
For this reason, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
|
|
88
|
+
Structured outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
|
|
89
|
+
via the `structureds` (all structured outputs) or `structured` (first structured output) properties.
|
|
104
90
|
|
|
105
|
-
The
|
|
106
|
-
|
|
91
|
+
The schema must either be one of the following:
|
|
92
|
+
1. A Pydantic BaseModel class
|
|
93
|
+
The structured output will be a Pydantic model.
|
|
94
|
+
2. A JSON schema dictionary
|
|
95
|
+
JSON dictionary schema must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
|
|
96
|
+
Thus, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
|
|
97
|
+
The structured output will be a dictionary.
|
|
107
98
|
|
|
108
|
-
When enabled, the structured output is stored in the `structured_output` attribute in the output.
|
|
109
|
-
1. If the schema is a JSON schema dictionary, the structured output is a dictionary.
|
|
110
|
-
2. If the schema is a Pydantic BaseModel class, the structured output is a Pydantic model.
|
|
111
|
-
|
|
112
|
-
# Example 1: Using a JSON schema dictionary
|
|
113
99
|
Usage example:
|
|
114
100
|
```python
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
"type": "object",
|
|
124
|
-
}
|
|
125
|
-
lm_invoker = OpenAIChatCompletionsLMInvoker(..., response_schema=schema)
|
|
101
|
+
class Animal(BaseModel):
|
|
102
|
+
name: str
|
|
103
|
+
color: str
|
|
104
|
+
|
|
105
|
+
json_schema = Animal.model_json_schema()
|
|
106
|
+
|
|
107
|
+
lm_invoker = OpenAIChatCompletionsLMInvoker(..., response_schema=Animal) # Using Pydantic BaseModel class
|
|
108
|
+
lm_invoker = OpenAIChatCompletionsLMInvoker(..., response_schema=json_schema) # Using JSON schema dictionary
|
|
126
109
|
```
|
|
110
|
+
|
|
127
111
|
Output example:
|
|
128
112
|
```python
|
|
129
|
-
|
|
113
|
+
# Using Pydantic BaseModel class outputs a Pydantic model
|
|
114
|
+
LMOutput(outputs=[LMOutputItem(type="structured", output=Animal(name="dog", color="white"))])
|
|
115
|
+
|
|
116
|
+
# Using JSON schema dictionary outputs a dictionary
|
|
117
|
+
LMOutput(outputs=[LMOutputItem(type="structured", output={"name": "dog", "color": "white"})])
|
|
130
118
|
```
|
|
131
119
|
|
|
132
|
-
|
|
120
|
+
When structured output is enabled, streaming is disabled.
|
|
121
|
+
|
|
122
|
+
Tool calling:
|
|
123
|
+
The `OpenAIChatCompletionsLMInvoker` can be configured to call tools to perform certain tasks.
|
|
124
|
+
This feature can be enabled by providing a list of `Tool` objects to the `tools` parameter.
|
|
125
|
+
|
|
126
|
+
Tool calls outputs are stored in the `outputs` attribute of the `LMOutput` object and
|
|
127
|
+
can be accessed via the `tool_calls` property.
|
|
128
|
+
|
|
133
129
|
Usage example:
|
|
134
130
|
```python
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
color: str
|
|
131
|
+
lm_invoker = OpenAIChatCompletionsLMInvoker(..., tools=[tool_1, tool_2])
|
|
132
|
+
```
|
|
138
133
|
|
|
139
|
-
|
|
134
|
+
Output example:
|
|
135
|
+
```python
|
|
136
|
+
LMOutput(
|
|
137
|
+
outputs=[
|
|
138
|
+
LMOutputItem(type="text", output="I\'m using tools..."),
|
|
139
|
+
LMOutputItem(type="tool_call", output=ToolCall(id="123", name="tool_1", args={"key": "value"})),
|
|
140
|
+
LMOutputItem(type="tool_call", output=ToolCall(id="456", name="tool_2", args={"key": "value"})),
|
|
141
|
+
]
|
|
142
|
+
)
|
|
140
143
|
```
|
|
144
|
+
|
|
145
|
+
Reasoning:
|
|
146
|
+
The `OpenAILMInvoker` performs step-by-step reasoning before generating a response when reasoning
|
|
147
|
+
models are used, such as GPT-5 models and o-series models.
|
|
148
|
+
|
|
149
|
+
The reasoning effort can be set via the `reasoning_effort` parameter, which guides the models on the amount
|
|
150
|
+
of reasoning tokens to generate. Available options include `minimal`, `low`, `medium`, and `high`.
|
|
151
|
+
|
|
152
|
+
Some models may also output the reasoning tokens. In this case, the reasoning tokens are stored in
|
|
153
|
+
the `outputs` attribute of the `LMOutput` object and can be accessed via the `thinkings` property.
|
|
154
|
+
|
|
141
155
|
Output example:
|
|
142
156
|
```python
|
|
143
|
-
LMOutput(
|
|
157
|
+
LMOutput(
|
|
158
|
+
outputs=[
|
|
159
|
+
LMOutputItem(type="thinking", output=Reasoning(reasoning="I\'m thinking...", ...)),
|
|
160
|
+
LMOutputItem(type="text", output="Golden retriever is a good dog breed."),
|
|
161
|
+
]
|
|
162
|
+
)
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
Streaming output example:
|
|
166
|
+
```python
|
|
167
|
+
{"type": "thinking_start", "value": "", ...}
|
|
168
|
+
{"type": "thinking", "value": "I\'m ", ...}
|
|
169
|
+
{"type": "thinking", "value": "thinking...", ...}
|
|
170
|
+
{"type": "thinking_end", "value": "", ...}
|
|
171
|
+
{"type": "response", "value": "Golden retriever ", ...}
|
|
172
|
+
{"type": "response", "value": "is a good dog breed.", ...}
|
|
144
173
|
```
|
|
174
|
+
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
175
|
+
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
176
|
+
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
177
|
+
|
|
178
|
+
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
145
179
|
|
|
146
180
|
Analytics tracking:
|
|
147
|
-
|
|
181
|
+
The `OpenAIChatCompletionsLMInvoker` can be configured to output additional information about the invocation.
|
|
148
182
|
This feature can be enabled by setting the `output_analytics` parameter to `True`.
|
|
183
|
+
|
|
149
184
|
When enabled, the following attributes will be stored in the output:
|
|
150
185
|
1. `token_usage`: The token usage.
|
|
151
186
|
2. `duration`: The duration in seconds.
|
|
@@ -154,15 +189,14 @@ class OpenAIChatCompletionsLMInvoker(BaseLMInvoker):
|
|
|
154
189
|
Output example:
|
|
155
190
|
```python
|
|
156
191
|
LMOutput(
|
|
157
|
-
|
|
192
|
+
outputs=[...],
|
|
158
193
|
token_usage=TokenUsage(input_tokens=100, output_tokens=50),
|
|
159
194
|
duration=0.729,
|
|
160
|
-
finish_details={"
|
|
195
|
+
finish_details={"stop_reason": "end_turn"},
|
|
161
196
|
)
|
|
162
197
|
```
|
|
163
198
|
|
|
164
|
-
When streaming is enabled, token usage is not supported.
|
|
165
|
-
regardless of the value of the `output_analytics` parameter.
|
|
199
|
+
When streaming is enabled, token usage is not supported.
|
|
166
200
|
|
|
167
201
|
Retry and timeout:
|
|
168
202
|
The `OpenAIChatCompletionsLMInvoker` supports retry and timeout configuration.
|
|
@@ -172,8 +206,6 @@ class OpenAIChatCompletionsLMInvoker(BaseLMInvoker):
|
|
|
172
206
|
Retry config examples:
|
|
173
207
|
```python
|
|
174
208
|
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
175
|
-
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
176
|
-
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
177
209
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
178
210
|
```
|
|
179
211
|
|
|
@@ -181,58 +213,6 @@ class OpenAIChatCompletionsLMInvoker(BaseLMInvoker):
|
|
|
181
213
|
```python
|
|
182
214
|
lm_invoker = OpenAIChatCompletionsLMInvoker(..., retry_config=retry_config)
|
|
183
215
|
```
|
|
184
|
-
|
|
185
|
-
Reasoning:
|
|
186
|
-
Some language models support advanced reasoning capabilities. When using such reasoning-capable models,
|
|
187
|
-
you can configure how much reasoning the model should perform before generating a final response by setting
|
|
188
|
-
reasoning-related parameters.
|
|
189
|
-
|
|
190
|
-
The reasoning effort of reasoning models can be set via the `reasoning_effort` parameter. This parameter
|
|
191
|
-
will guide the models on how many reasoning tokens it should generate before creating a response to the prompt.
|
|
192
|
-
The reasoning effort is only supported by some language models.
|
|
193
|
-
Available options include:
|
|
194
|
-
1. "low": Favors speed and economical token usage.
|
|
195
|
-
2. "medium": Favors a balance between speed and reasoning accuracy.
|
|
196
|
-
3. "high": Favors more complete reasoning at the cost of more tokens generated and slower responses.
|
|
197
|
-
This may differ between models. When not set, the reasoning effort will be equivalent to None by default.
|
|
198
|
-
|
|
199
|
-
When using reasoning models, some providers might output the reasoning summary. These will be stored in the
|
|
200
|
-
`reasoning` attribute in the output.
|
|
201
|
-
|
|
202
|
-
Output example:
|
|
203
|
-
```python
|
|
204
|
-
LMOutput(
|
|
205
|
-
response="Golden retriever is a good dog breed.",
|
|
206
|
-
reasoning=[Reasoning(id="", reasoning="Let me think about it...")],
|
|
207
|
-
)
|
|
208
|
-
```
|
|
209
|
-
|
|
210
|
-
Streaming output example:
|
|
211
|
-
```python
|
|
212
|
-
{"type": "thinking_start", "value": ""}\', ...}
|
|
213
|
-
{"type": "thinking", "value": "Let me think "}\', ...}
|
|
214
|
-
{"type": "thinking", "value": "about it..."}\', ...}
|
|
215
|
-
{"type": "thinking_end", "value": ""}\', ...}
|
|
216
|
-
{"type": "response", "value": "Golden retriever ", ...}
|
|
217
|
-
{"type": "response", "value": "is a good dog breed.", ...}
|
|
218
|
-
```
|
|
219
|
-
Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
|
|
220
|
-
To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
|
|
221
|
-
LM invoker initialization. The legacy event format support will be removed in v0.6.
|
|
222
|
-
|
|
223
|
-
Setting reasoning-related parameters for non-reasoning models will raise an error.
|
|
224
|
-
|
|
225
|
-
Output types:
|
|
226
|
-
The output of the `OpenAIChatCompletionsLMInvoker` can either be:
|
|
227
|
-
1. `str`: A text response.
|
|
228
|
-
2. `LMOutput`: A Pydantic model that may contain the following attributes:
|
|
229
|
-
2.1. response (str)
|
|
230
|
-
2.2. tool_calls (list[ToolCall])
|
|
231
|
-
2.3. structured_output (dict[str, Any] | BaseModel | None)
|
|
232
|
-
2.4. token_usage (TokenUsage | None)
|
|
233
|
-
2.5. duration (float | None)
|
|
234
|
-
2.6. finish_details (dict[str, Any])
|
|
235
|
-
2.7. reasoning (list[Reasoning])
|
|
236
216
|
'''
|
|
237
217
|
client_kwargs: Incomplete
|
|
238
218
|
def __init__(self, model_name: str, api_key: str | None = None, base_url: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, simplify_events: bool = False) -> None:
|