gllm-inference-binary 0.5.40__cp311-cp311-win_amd64.whl → 0.5.66__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. gllm_inference/builder/_build_invoker.pyi +28 -0
  2. gllm_inference/builder/build_em_invoker.pyi +12 -16
  3. gllm_inference/builder/build_lm_invoker.pyi +65 -17
  4. gllm_inference/constants.pyi +3 -2
  5. gllm_inference/em_invoker/__init__.pyi +3 -1
  6. gllm_inference/em_invoker/bedrock_em_invoker.pyi +16 -4
  7. gllm_inference/em_invoker/cohere_em_invoker.pyi +127 -0
  8. gllm_inference/em_invoker/jina_em_invoker.pyi +103 -0
  9. gllm_inference/em_invoker/schema/bedrock.pyi +7 -0
  10. gllm_inference/em_invoker/schema/cohere.pyi +20 -0
  11. gllm_inference/em_invoker/schema/jina.pyi +29 -0
  12. gllm_inference/exceptions/provider_error_map.pyi +1 -0
  13. gllm_inference/lm_invoker/__init__.pyi +3 -1
  14. gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +95 -109
  15. gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +92 -109
  16. gllm_inference/lm_invoker/batch/batch_operations.pyi +2 -1
  17. gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +52 -65
  18. gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +36 -36
  19. gllm_inference/lm_invoker/google_lm_invoker.pyi +195 -110
  20. gllm_inference/lm_invoker/langchain_lm_invoker.pyi +52 -64
  21. gllm_inference/lm_invoker/litellm_lm_invoker.pyi +86 -106
  22. gllm_inference/lm_invoker/lm_invoker.pyi +20 -1
  23. gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +87 -107
  24. gllm_inference/lm_invoker/openai_lm_invoker.pyi +237 -186
  25. gllm_inference/lm_invoker/portkey_lm_invoker.pyi +296 -0
  26. gllm_inference/lm_invoker/schema/google.pyi +12 -0
  27. gllm_inference/lm_invoker/schema/openai.pyi +22 -0
  28. gllm_inference/lm_invoker/schema/portkey.pyi +31 -0
  29. gllm_inference/lm_invoker/sea_lion_lm_invoker.pyi +48 -0
  30. gllm_inference/lm_invoker/xai_lm_invoker.pyi +94 -131
  31. gllm_inference/model/__init__.pyi +5 -1
  32. gllm_inference/model/em/cohere_em.pyi +17 -0
  33. gllm_inference/model/em/jina_em.pyi +22 -0
  34. gllm_inference/model/lm/anthropic_lm.pyi +2 -0
  35. gllm_inference/model/lm/google_lm.pyi +1 -0
  36. gllm_inference/model/lm/sea_lion_lm.pyi +16 -0
  37. gllm_inference/model/lm/xai_lm.pyi +19 -0
  38. gllm_inference/prompt_builder/format_strategy/__init__.pyi +4 -0
  39. gllm_inference/prompt_builder/format_strategy/format_strategy.pyi +55 -0
  40. gllm_inference/prompt_builder/format_strategy/jinja_format_strategy.pyi +45 -0
  41. gllm_inference/prompt_builder/format_strategy/string_format_strategy.pyi +20 -0
  42. gllm_inference/prompt_builder/prompt_builder.pyi +23 -6
  43. gllm_inference/schema/__init__.pyi +4 -3
  44. gllm_inference/schema/activity.pyi +13 -11
  45. gllm_inference/schema/attachment.pyi +20 -6
  46. gllm_inference/schema/enums.pyi +30 -1
  47. gllm_inference/schema/events.pyi +69 -73
  48. gllm_inference/schema/formatter.pyi +31 -0
  49. gllm_inference/schema/lm_output.pyi +245 -23
  50. gllm_inference/schema/model_id.pyi +27 -3
  51. gllm_inference/utils/validation.pyi +3 -0
  52. gllm_inference.cp311-win_amd64.pyd +0 -0
  53. gllm_inference.pyi +23 -13
  54. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/METADATA +10 -6
  55. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/RECORD +57 -40
  56. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/WHEEL +0 -0
  57. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/top_level.txt +0 -0
@@ -1,12 +1,14 @@
1
1
  from _typeshed import Incomplete
2
+ from anthropic.types import ContentBlockStopEvent as ContentBlockStopEvent, Message as Message, RawContentBlockDeltaEvent as RawContentBlockDeltaEvent, RawContentBlockStartEvent as RawContentBlockStartEvent
2
3
  from gllm_core.event import EventEmitter as EventEmitter
3
4
  from gllm_core.schema.tool import Tool as Tool
4
5
  from gllm_core.utils.retry import RetryConfig as RetryConfig
5
6
  from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
6
7
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
7
8
  from gllm_inference.lm_invoker.schema.anthropic import InputType as InputType, Key as Key, OutputType as OutputType
8
- from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, LMInput as LMInput, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ThinkingEvent as ThinkingEvent, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
9
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, LMInput as LMInput, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ThinkingEvent as ThinkingEvent, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
9
10
  from langchain_core.tools import Tool as LangChainTool
11
+ from pydantic import BaseModel as BaseModel
10
12
  from typing import Any
11
13
 
12
14
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -49,84 +51,123 @@ class AnthropicLMInvoker(BaseLMInvoker):
49
51
  result = await lm_invoker.invoke([text, image])
50
52
  ```
51
53
 
52
- Tool calling:
53
- Tool calling is a feature that allows the language model to call tools to perform tasks.
54
- Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
55
- When tools are provided and the model decides to call a tool, the tool calls are stored in the
56
- `tool_calls` attribute in the output.
54
+ Text output:
55
+ The `AnthropicLMInvoker` generates text outputs by default.
56
+ Text outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
57
+ via the `texts` (all text outputs) or `text` (first text output) properties.
58
+
59
+ Output example:
60
+ ```python
61
+ LMOutput(outputs=[LMOutputItem(type="text", output="Hello, there!")])
62
+ ```
63
+
64
+ Structured output:
65
+ The `AnthropicLMInvoker` can be configured to generate structured outputs.
66
+ This feature can be enabled by providing a schema to the `response_schema` parameter.
67
+
68
+ Structured outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
69
+ via the `structureds` (all structured outputs) or `structured` (first structured output) properties.
70
+
71
+ The schema must either be one of the following:
72
+ 1. A Pydantic BaseModel class
73
+ The structured output will be a Pydantic model.
74
+ 2. A JSON schema dictionary
75
+ JSON dictionary schema must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
76
+ Thus, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
77
+ The structured output will be a dictionary.
57
78
 
58
79
  Usage example:
59
80
  ```python
60
- lm_invoker = AnthropicLMInvoker(..., tools=[tool_1, tool_2])
81
+ class Animal(BaseModel):
82
+ name: str
83
+ color: str
84
+
85
+ json_schema = Animal.model_json_schema()
86
+
87
+ lm_invoker = AnthropicLMInvoker(..., response_schema=Animal) # Using Pydantic BaseModel class
88
+ lm_invoker = AnthropicLMInvoker(..., response_schema=json_schema) # Using JSON schema dictionary
61
89
  ```
62
90
 
63
91
  Output example:
64
92
  ```python
65
- LMOutput(
66
- response="Let me call the tools...",
67
- tool_calls=[
68
- ToolCall(id="123", name="tool_1", args={"key": "value"}),
69
- ToolCall(id="456", name="tool_2", args={"key": "value"}),
70
- ]
71
- )
72
- ```
93
+ # Using Pydantic BaseModel class outputs a Pydantic model
94
+ LMOutput(outputs=[LMOutputItem(type="structured", output=Animal(name="dog", color="white"))])
73
95
 
74
- Structured output:
75
- Structured output is a feature that allows the language model to output a structured response.
76
- This feature can be enabled by providing a schema to the `response_schema` parameter.
96
+ # Using JSON schema dictionary outputs a dictionary
97
+ LMOutput(outputs=[LMOutputItem(type="structured", output={"name": "dog", "color": "white"})])
98
+ ```
77
99
 
78
- The schema must be either a JSON schema dictionary or a Pydantic BaseModel class.
79
- If JSON schema is used, it must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
80
- For this reason, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
100
+ Structured output is not compatible with tool calling or thinking.
101
+ When structured output is enabled, streaming is disabled.
81
102
 
82
- Structured output is achieved by providing the schema name in the `tool_choice` parameter. This forces
83
- the model to call the provided schema as a tool. Thus, structured output is not compatible with:
84
- 1. Tool calling, since the tool calling is reserved to force the model to call the provided schema as a tool.
85
- 2. Thinking, since thinking is not allowed when a tool use is forced through the `tool_choice` parameter.
86
- The language model also doesn\'t need to stream anything when structured output is enabled. Thus, standard
87
- invocation will be performed regardless of whether the `event_emitter` parameter is provided or not.
103
+ Tool calling:
104
+ The `AnthropicLMInvoker` can be configured to call tools to perform certain tasks.
105
+ This feature can be enabled by providing a list of `Tool` objects to the `tools` parameter.
88
106
 
89
- When enabled, the structured output is stored in the `structured_output` attribute in the output.
90
- 1. If the schema is a JSON schema dictionary, the structured output is a dictionary.
91
- 2. If the schema is a Pydantic BaseModel class, the structured output is a Pydantic model.
107
+ Tool calls outputs are stored in the `outputs` attribute of the `LMOutput` object and
108
+ can be accessed via the `tool_calls` property.
92
109
 
93
- # Example 1: Using a JSON schema dictionary
94
110
  Usage example:
95
111
  ```python
96
- schema = {
97
- "title": "Animal",
98
- "description": "A description of an animal.",
99
- "properties": {
100
- "color": {"title": "Color", "type": "string"},
101
- "name": {"title": "Name", "type": "string"},
102
- },
103
- "required": ["name", "color"],
104
- "type": "object",
105
- }
106
- lm_invoker = AnthropicLMInvoker(..., response_schema=schema)
112
+ lm_invoker = AnthropicLMInvoker(..., tools=[tool_1, tool_2])
107
113
  ```
114
+
108
115
  Output example:
109
116
  ```python
110
- LMOutput(structured_output={"name": "Golden retriever", "color": "Golden"})
117
+ LMOutput(
118
+ outputs=[
119
+ LMOutputItem(type="text", output="I\'m using tools..."),
120
+ LMOutputItem(type="tool_call", output=ToolCall(id="123", name="tool_1", args={"key": "value"})),
121
+ LMOutputItem(type="tool_call", output=ToolCall(id="456", name="tool_2", args={"key": "value"})),
122
+ ]
123
+ )
111
124
  ```
112
125
 
113
- # Example 2: Using a Pydantic BaseModel class
126
+ Thinking:
127
+ The `AnthropicLMInvoker` can be configured to perform step-by-step thinking process before answering.
128
+ This feature can be enabled by setting the `thinking` parameter to `True`.
129
+
130
+ Thinking outputs are stored in the `outputs` attribute of the `LMOutput` object
131
+ and can be accessed via the `thinkings` property.
132
+
114
133
  Usage example:
115
134
  ```python
116
- class Animal(BaseModel):
117
- name: str
118
- color: str
119
-
120
- lm_invoker = AnthropicLMInvoker(..., response_schema=Animal)
135
+ lm_invoker = AnthropicLMInvoker(..., thinking=True, thinking_budget=1024)
121
136
  ```
137
+
122
138
  Output example:
123
139
  ```python
124
- LMOutput(structured_output=Animal(name="Golden retriever", color="Golden"))
140
+ LMOutput(
141
+ outputs=[
142
+ LMOutputItem(type="thinking", output=Reasoning(type="thinking", reasoning="I\'m thinking...", ...)),
143
+ LMOutputItem(type="text", output="Golden retriever is a good dog breed."),
144
+ ]
145
+ )
146
+ ```
147
+
148
+ Streaming output example:
149
+ ```python
150
+ {"type": "thinking_start", "value": "", ...}
151
+ {"type": "thinking", "value": "I\'m ", ...}
152
+ {"type": "thinking", "value": "thinking...", ...}
153
+ {"type": "thinking_end", "value": "", ...}
154
+ {"type": "response", "value": "Golden retriever ", ...}
155
+ {"type": "response", "value": "is a good dog breed.", ...}
125
156
  ```
157
+ Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
158
+ To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
159
+ LM invoker initialization. The legacy event format support will be removed in v0.6.
160
+
161
+ The amount of tokens allocated for the thinking process can be set via the `thinking_budget` parameter.
162
+ For more information, please refer to the following documentation:
163
+ https://docs.claude.com/en/docs/build-with-claude/extended-thinking#working-with-thinking-budgets.
164
+
165
+ Thinking is only available for certain models, starting from Claude Sonnet 3.7.
126
166
 
127
167
  Analytics tracking:
128
- Analytics tracking is a feature that allows the module to output additional information about the invocation.
168
+ The `AnthropicLMInvoker` can be configured to output additional information about the invocation.
129
169
  This feature can be enabled by setting the `output_analytics` parameter to `True`.
170
+
130
171
  When enabled, the following attributes will be stored in the output:
131
172
  1. `token_usage`: The token usage.
132
173
  2. `duration`: The duration in seconds.
@@ -135,7 +176,7 @@ class AnthropicLMInvoker(BaseLMInvoker):
135
176
  Output example:
136
177
  ```python
137
178
  LMOutput(
138
- response="Golden retriever is a good dog breed.",
179
+ outputs=[...],
139
180
  token_usage=TokenUsage(input_tokens=100, output_tokens=50),
140
181
  duration=0.729,
141
182
  finish_details={"stop_reason": "end_turn"},
@@ -150,8 +191,6 @@ class AnthropicLMInvoker(BaseLMInvoker):
150
191
  Retry config examples:
151
192
  ```python
152
193
  retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
153
- retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
154
- retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
155
194
  retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
156
195
  ```
157
196
 
@@ -160,47 +199,6 @@ class AnthropicLMInvoker(BaseLMInvoker):
160
199
  lm_invoker = AnthropicLMInvoker(..., retry_config=retry_config)
161
200
  ```
162
201
 
163
- Thinking:
164
- Thinking is a feature that allows the language model to have enhanced reasoning capabilities for complex tasks,
165
- while also providing transparency into its step-by-step thought process before it delivers its final answer.
166
- This feature is only available for certain models, starting from Claude 3.7 Sonnet.
167
- It can be enabled by setting the `thinking` parameter to `True`.
168
-
169
- When thinking is enabled, the amount of tokens allocated for the thinking process can be set via the
170
- `thinking_budget` parameter. The `thinking_budget`:
171
- 1. Must be greater than or equal to 1024.
172
- 2. Must be less than the `max_tokens` hyperparameter, as the `thinking_budget` is allocated from the
173
- `max_tokens`. For example, if `max_tokens=2048` and `thinking_budget=1024`, the language model will
174
- allocate at most 1024 tokens for thinking and the remaining 1024 tokens for generating the response.
175
-
176
- When enabled, the reasoning is stored in the `reasoning` attribute in the output.
177
-
178
- Usage example:
179
- ```python
180
- lm_invoker = AnthropicLMInvoker(..., thinking=True, thinking_budget=1024)
181
- ```
182
-
183
- Output example:
184
- ```python
185
- LMOutput(
186
- response="Golden retriever is a good dog breed.",
187
- reasoning=[Reasoning(type="thinking", reasoning="Let me think about it...", signature="x")],
188
- )
189
- ```
190
-
191
- Streaming output example:
192
- ```python
193
- {"type": "thinking_start", "value": "", ...}
194
- {"type": "thinking", "value": "Let me think "\', ...}
195
- {"type": "thinking", "value": "about it..."}\', ...}
196
- {"type": "thinking_end", "value": ""}\', ...}
197
- {"type": "response", "value": "Golden retriever ", ...}
198
- {"type": "response", "value": "is a good dog breed.", ...}
199
- ```
200
- Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
201
- To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
202
- LM invoker initialization. The legacy event format support will be removed in v0.6.
203
-
204
202
  Batch processing:
205
203
  The `AnthropicLMInvoker` supports batch processing, which allows the language model to process multiple
206
204
  requests in a single call. Batch processing is supported through the `batch` attribute.
@@ -214,7 +212,7 @@ class AnthropicLMInvoker(BaseLMInvoker):
214
212
  Output example:
215
213
  ```python
216
214
  {
217
- "request_1": LMOutput(response="The sky is blue."),
215
+ "request_1": LMOutput(outputs=[LMOutputItem(type="text", output="The sky is blue.")]),
218
216
  "request_2": LMOutput(finish_details={"type": "error", "error": {"message": "...", ...}, ...}),
219
217
  }
220
218
  ```
@@ -240,7 +238,7 @@ class AnthropicLMInvoker(BaseLMInvoker):
240
238
  Output example:
241
239
  ```python
242
240
  {
243
- "request_1": LMOutput(response="The sky is blue."),
241
+ "request_1": LMOutput(outputs=[LMOutputItem(type="text", output="The sky is blue.")]),
244
242
  "request_2": LMOutput(finish_details={"type": "error", "error": {"message": "...", ...}, ...}),
245
243
  }
246
244
  ```
@@ -263,18 +261,6 @@ class AnthropicLMInvoker(BaseLMInvoker):
263
261
  ```python
264
262
  await lm_invoker.batch.cancel(batch_id)
265
263
  ```
266
-
267
- Output types:
268
- The output of the `AnthropicLMInvoker` can either be:
269
- 1. `str`: A text response.
270
- 2. `LMOutput`: A Pydantic model that may contain the following attributes:
271
- 2.1. response (str)
272
- 2.2. tool_calls (list[ToolCall])
273
- 2.3. structured_output (dict[str, Any] | BaseModel | None)
274
- 2.4. token_usage (TokenUsage | None)
275
- 2.5. duration (float | None)
276
- 2.6. finish_details (dict[str, Any])
277
- 2.7. reasoning (list[Reasoning])
278
264
  '''
279
265
  client: Incomplete
280
266
  thinking: Incomplete
@@ -51,11 +51,60 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
51
51
  result = await lm_invoker.invoke([text, image])
52
52
  ```
53
53
 
54
+ Text output:
55
+ The `AzureOpenAILMInvoker` generates text outputs by default.
56
+ Text outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
57
+ via the `texts` (all text outputs) or `text` (first text output) properties.
58
+
59
+ Output example:
60
+ ```python
61
+ LMOutput(outputs=[LMOutputItem(type="text", output="Hello, there!")])
62
+ ```
63
+
64
+ Structured output:
65
+ The `AzureOpenAILMInvoker` can be configured to generate structured outputs.
66
+ This feature can be enabled by providing a schema to the `response_schema` parameter.
67
+
68
+ Structured outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
69
+ via the `structureds` (all structured outputs) or `structured` (first structured output) properties.
70
+
71
+ The schema must either be one of the following:
72
+ 1. A Pydantic BaseModel class
73
+ The structured output will be a Pydantic model.
74
+ 2. A JSON schema dictionary
75
+ JSON dictionary schema must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
76
+ Thus, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
77
+ The structured output will be a dictionary.
78
+
79
+ Usage example:
80
+ ```python
81
+ class Animal(BaseModel):
82
+ name: str
83
+ color: str
84
+
85
+ json_schema = Animal.model_json_schema()
86
+
87
+ lm_invoker = AzureOpenAILMInvoker(..., response_schema=Animal) # Using Pydantic BaseModel class
88
+ lm_invoker = AzureOpenAILMInvoker(..., response_schema=json_schema) # Using JSON schema dictionary
89
+ ```
90
+
91
+ Output example:
92
+ ```python
93
+ # Using Pydantic BaseModel class outputs a Pydantic model
94
+ LMOutput(outputs=[LMOutputItem(type="structured", output=Animal(name="dog", color="white"))])
95
+
96
+ # Using JSON schema dictionary outputs a dictionary
97
+ LMOutput(outputs=[LMOutputItem(type="structured", output={"name": "dog", "color": "white"})])
98
+ ```
99
+
100
+ When structured output is enabled, streaming is disabled.
101
+
54
102
  Tool calling:
55
- Tool calling is a feature that allows the language model to call tools to perform tasks.
56
- Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
57
- When tools are provided and the model decides to call a tool, the tool calls are stored in the
58
- `tool_calls` attribute in the output.
103
+ The `AzureOpenAILMInvoker` can be configured to call tools to perform certain tasks.
104
+ This feature can be enabled by providing a list of `Tool` objects to the `tools` parameter.
105
+
106
+ Tool calls outputs are stored in the `outputs` attribute of the `LMOutput` object and
107
+ can be accessed via the `tool_calls` property.
59
108
 
60
109
  Usage example:
61
110
  ```python
@@ -65,66 +114,62 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
65
114
  Output example:
66
115
  ```python
67
116
  LMOutput(
68
- response="Let me call the tools...",
69
- tool_calls=[
70
- ToolCall(id="123", name="tool_1", args={"key": "value"}),
71
- ToolCall(id="456", name="tool_2", args={"key": "value"}),
117
+ outputs=[
118
+ LMOutputItem(type="text", output="I\'m using tools..."),
119
+ LMOutputItem(type="tool_call", output=ToolCall(id="123", name="tool_1", args={"key": "value"})),
120
+ LMOutputItem(type="tool_call", output=ToolCall(id="456", name="tool_2", args={"key": "value"})),
72
121
  ]
73
122
  )
74
123
  ```
75
124
 
76
- Structured output:
77
- Structured output is a feature that allows the language model to output a structured response.
78
- This feature can be enabled by providing a schema to the `response_schema` parameter.
125
+ Reasoning:
126
+ The `AzureOpenAILMInvoker` performs step-by-step reasoning before generating a response when reasoning
127
+ models are used, such as GPT-5 models and o-series models.
79
128
 
80
- The schema must be either a JSON schema dictionary or a Pydantic BaseModel class.
81
- If JSON schema is used, it must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
82
- For this reason, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
129
+ The reasoning effort can be set via the `reasoning_effort` parameter, which guides the models on the amount
130
+ of reasoning tokens to generate. Available options include `minimal`, `low`, `medium`, and `high`.
83
131
 
84
- The language model also doesn\'t need to stream anything when structured output is enabled. Thus, standard
85
- invocation will be performed regardless of whether the `event_emitter` parameter is provided or not.
132
+ While the raw reasoning tokens are not available, the summary of the reasoning tokens can still be generated.
133
+ This can be done by passing the desired summary level via the `reasoning_summary` parameter.
134
+ Available options include `auto` and `detailed`.
86
135
 
87
- When enabled, the structured output is stored in the `structured_output` attribute in the output.
88
- 1. If the schema is a JSON schema dictionary, the structured output is a dictionary.
89
- 2. If the schema is a Pydantic BaseModel class, the structured output is a Pydantic model.
136
+ Reasoning summaries are stored in the `outputs` attribute of the `LMOutput` object
137
+ and can be accessed via the `thinkings` property.
90
138
 
91
- # Example 1: Using a JSON schema dictionary
92
139
  Usage example:
93
140
  ```python
94
- schema = {
95
- "title": "Animal",
96
- "description": "A description of an animal.",
97
- "properties": {
98
- "color": {"title": "Color", "type": "string"},
99
- "name": {"title": "Name", "type": "string"},
100
- },
101
- "required": ["name", "color"],
102
- "type": "object",
103
- }
104
- lm_invoker = AzureOpenAILMInvoker(..., response_schema=schema)
141
+ lm_invoker = AzureOpenAILMInvoker(..., reasoning_effort="high", reasoning_summary="detailed")
105
142
  ```
143
+
106
144
  Output example:
107
145
  ```python
108
- LMOutput(structured_output={"name": "Golden retriever", "color": "Golden"})
146
+ LMOutput(
147
+ outputs=[
148
+ LMOutputItem(type="thinking", output=Reasoning(type="thinking", reasoning="I\'m thinking...", ...)),
149
+ LMOutputItem(type="text", output="Golden retriever is a good dog breed."),
150
+ ]
151
+ )
109
152
  ```
110
153
 
111
- # Example 2: Using a Pydantic BaseModel class
112
- Usage example:
113
- ```python
114
- class Animal(BaseModel):
115
- name: str
116
- color: str
117
-
118
- lm_invoker = AzureOpenAILMInvoker(..., response_schema=Animal)
119
- ```
120
- Output example:
154
+ Streaming output example:
121
155
  ```python
122
- LMOutput(structured_output=Animal(name="Golden retriever", color="Golden"))
156
+ {"type": "thinking_start", "value": "", ...}
157
+ {"type": "thinking", "value": "I\'m ", ...}
158
+ {"type": "thinking", "value": "thinking...", ...}
159
+ {"type": "thinking_end", "value": "", ...}
160
+ {"type": "response", "value": "Golden retriever ", ...}
161
+ {"type": "response", "value": "is a good dog breed.", ...}
123
162
  ```
163
+ Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
164
+ To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
165
+ LM invoker initialization. The legacy event format support will be removed in v0.6.
166
+
167
+ Reasoning summary is not compatible with tool calling.
124
168
 
125
169
  Analytics tracking:
126
- Analytics tracking is a feature that allows the module to output additional information about the invocation.
170
+ The `AzureOpenAILMInvoker` can be configured to output additional information about the invocation.
127
171
  This feature can be enabled by setting the `output_analytics` parameter to `True`.
172
+
128
173
  When enabled, the following attributes will be stored in the output:
129
174
  1. `token_usage`: The token usage.
130
175
  2. `duration`: The duration in seconds.
@@ -133,15 +178,10 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
133
178
  Output example:
134
179
  ```python
135
180
  LMOutput(
136
- response="Golden retriever is a good dog breed.",
137
- token_usage=TokenUsage(
138
- input_tokens=1500,
139
- output_tokens=200,
140
- input_token_details=InputTokenDetails(cached_tokens=1200, uncached_tokens=300),
141
- output_token_details=OutputTokenDetails(reasoning_tokens=180, response_tokens=20),
142
- ),
181
+ outputs=[...],
182
+ token_usage=TokenUsage(input_tokens=100, output_tokens=50),
143
183
  duration=0.729,
144
- finish_details={"status": "completed", "incomplete_details": {"reason": None}},
184
+ finish_details={"stop_reason": "end_turn"},
145
185
  )
146
186
  ```
147
187
 
@@ -153,8 +193,6 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
153
193
  Retry config examples:
154
194
  ```python
155
195
  retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
156
- retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
157
- retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
158
196
  retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
159
197
  ```
160
198
 
@@ -162,61 +200,6 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
162
200
  ```python
163
201
  lm_invoker = AzureOpenAILMInvoker(..., retry_config=retry_config)
164
202
  ```
165
-
166
- Reasoning:
167
- Azure OpenAI\'s GPT-5 models and o-series models are classified as reasoning models. Reasoning models think
168
- before they answer, producing a long internal chain of thought before responding to the user. Reasoning models
169
- excel in complex problem solving, coding, scientific reasoning, and multi-step planning for agentic workflows.
170
-
171
- The reasoning effort of reasoning models can be set via the `reasoning_effort` parameter. This parameter
172
- will guide the models on how many reasoning tokens it should generate before creating a response.
173
- Available options include:
174
- 1. "minimal": Favors the least amount of reasoning, only supported for GPT-5 models onwards.
175
- 2. "low": Favors speed and economical token usage.
176
- 3. "medium": Favors a balance between speed and reasoning accuracy.
177
- 4. "high": Favors more complete reasoning at the cost of more tokens generated and slower responses.
178
-
179
- Azure OpenAI doesn\'t expose the raw reasoning tokens. However, the summary of the reasoning tokens can still be
180
- generated. The summary level can be set via the `reasoning_summary` parameter. Available options include:
181
- 1. "auto": The model decides the summary level automatically.
182
- 2. "detailed": The model will generate a detailed summary of the reasoning tokens.
183
- Reasoning summary is not compatible with tool calling.
184
- When enabled, the reasoning summary will be stored in the `reasoning` attribute in the output.
185
-
186
- Output example:
187
- ```python
188
- LMOutput(
189
- response="Golden retriever is a good dog breed.",
190
- reasoning=[Reasoning(id="x", reasoning="Let me think about it...")],
191
- )
192
- ```
193
-
194
- Streaming output example:
195
- ```python
196
- {"type": "thinking_start", "value": ""}\', ...}
197
- {"type": "thinking", "value": "Let me think "}\', ...}
198
- {"type": "thinking", "value": "about it..."}\', ...}
199
- {"type": "thinking_end", "value": ""}\', ...}
200
- {"type": "response", "value": "Golden retriever ", ...}
201
- {"type": "response", "value": "is a good dog breed.", ...}
202
- ```
203
- Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
204
- To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
205
- LM invoker initialization. The legacy event format support will be removed in v0.6.
206
-
207
- Setting reasoning-related parameters for non-reasoning models will raise an error.
208
-
209
- Output types:
210
- The output of the `AzureOpenAILMInvoker` can either be:
211
- 1. `str`: A text response.
212
- 2. `LMOutput`: A Pydantic model that may contain the following attributes:
213
- 2.1. response (str)
214
- 2.2. tool_calls (list[ToolCall])
215
- 2.3. structured_output (dict[str, Any] | BaseModel | None)
216
- 2.4. token_usage (TokenUsage | None)
217
- 2.5. duration (float | None)
218
- 2.6. finish_details (dict[str, Any] | None)
219
- 2.7. reasoning (list[Reasoning])
220
203
  '''
221
204
  client_kwargs: Incomplete
222
205
  def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, simplify_events: bool = False) -> None:
@@ -104,11 +104,12 @@ class BatchOperations:
104
104
  Returns:
105
105
  BatchStatus: The status of the batch job.
106
106
  """
107
- async def retrieve(self, batch_id: str) -> dict[str, LMOutput]:
107
+ async def retrieve(self, batch_id: str, **kwargs: Any) -> dict[str, LMOutput]:
108
108
  """Retrieves the results of a batch job.
109
109
 
110
110
  Args:
111
111
  batch_id (str): The ID of the batch job to get the results of.
112
+ **kwargs (Any): Additional keyword arguments.
112
113
 
113
114
  Returns:
114
115
  dict[str, LMOutput]: The results of the batch job.