gllm-inference-binary 0.5.52__cp311-cp311-win_amd64.whl → 0.5.54__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

@@ -102,49 +102,138 @@ class PortkeyLMInvoker(OpenAIChatCompletionsLMInvoker):
102
102
  result = await lm_invoker.invoke([text, image])
103
103
  ```
104
104
 
105
+ Text output:
106
+ The `PortkeyLMInvoker` generates text outputs by default.
107
+ Text outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
108
+ via the `texts` (all text outputs) or `text` (first text output) properties.
109
+
110
+ Output example:
111
+ ```python
112
+ LMOutput(outputs=[LMOutputItem(type="text", output="Hello, there!")])
113
+ ```
114
+
115
+ Structured output:
116
+ The `PortkeyLMInvoker` can be configured to generate structured outputs.
117
+ This feature can be enabled by providing a schema to the `response_schema` parameter.
118
+
119
+ Structured outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
120
+ via the `structureds` (all structured outputs) or `structured` (first structured output) properties.
121
+
122
+ The schema must either be one of the following:
123
+ 1. A Pydantic BaseModel class
124
+ The structured output will be a Pydantic model.
125
+ 2. A JSON schema dictionary
126
+ JSON dictionary schema must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
127
+ Thus, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
128
+ The structured output will be a dictionary.
129
+
130
+ Usage example:
131
+ ```python
132
+ class Animal(BaseModel):
133
+ name: str
134
+ color: str
135
+
136
+ json_schema = Animal.model_json_schema()
137
+
138
+ lm_invoker = PortkeyLMInvoker(..., response_schema=Animal) # Using Pydantic BaseModel class
139
+ lm_invoker = PortkeyLMInvoker(..., response_schema=json_schema) # Using JSON schema dictionary
140
+ ```
141
+
142
+ Output example:
143
+ ```python
144
+ # Using Pydantic BaseModel class outputs a Pydantic model
145
+ LMOutput(outputs=[LMOutputItem(type="structured", output=Animal(name="dog", color="white"))])
146
+
147
+ # Using JSON schema dictionary outputs a dictionary
148
+ LMOutput(outputs=[LMOutputItem(type="structured", output={"name": "dog", "color": "white"})])
149
+ ```
150
+
151
+ When structured output is enabled, streaming is disabled.
152
+
105
153
  Tool calling:
106
- Tools can be provided via the `tools` parameter to enable tool invocation.
154
+ The `PortkeyLMInvoker` can be configured to call tools to perform certain tasks.
155
+ This feature can be enabled by providing a list of `Tool` objects to the `tools` parameter.
156
+
157
+ Tool calls outputs are stored in the `outputs` attribute of the `LMOutput` object and
158
+ can be accessed via the `tool_calls` property.
107
159
 
160
+ Usage example:
108
161
  ```python
109
162
  lm_invoker = PortkeyLMInvoker(..., tools=[tool_1, tool_2])
110
163
  ```
164
+
111
165
  Output example:
112
166
  ```python
113
167
  LMOutput(
114
- response="Let me call the tools...",
115
- tool_calls=[
116
- ToolCall(id="123", name="tool_1", args={"key": "value"}),
168
+ outputs=[
169
+ LMOutputItem(type="text", output="I\'m using tools..."),
170
+ LMOutputItem(type="tool_call", output=ToolCall(id="123", name="tool_1", args={"key": "value"})),
171
+ LMOutputItem(type="tool_call", output=ToolCall(id="456", name="tool_2", args={"key": "value"})),
117
172
  ]
118
173
  )
119
174
  ```
120
175
 
121
- Structured output:
122
- The `response_schema` parameter enables structured responses (Pydantic BaseModel or JSON schema).
176
+ Thinking:
177
+ The `PortkeyLMInvoker` can be configured to perform step-by-step thinking process before answering.
178
+ This feature can be enabled by setting the `thinking` parameter to `True`.
123
179
 
180
+ Thinking outputs are stored in the `outputs` attribute of the `LMOutput` object
181
+ and can be accessed via the `thinkings` property.
182
+
183
+ Usage example:
124
184
  ```python
125
- class Animal(BaseModel):
126
- name: str
127
- color: str
128
- lm_invoker = PortkeyLMInvoker(..., response_schema=Animal)
185
+ lm_invoker = PortkeyLMInvoker(..., thinking=True, thinking_budget=1024)
129
186
  ```
187
+
130
188
  Output example:
131
189
  ```python
132
- LMOutput(structured_output=Animal(name="Golden retriever", color="Golden"))
190
+ LMOutput(
191
+ outputs=[
192
+ LMOutputItem(type="thinking", output=Reasoning(type="thinking", reasoning="I\'m thinking...", ...)),
193
+ LMOutputItem(type="text", output="Golden retriever is a good dog breed."),
194
+ ]
195
+ )
133
196
  ```
134
197
 
198
+ Streaming output example:
199
+ ```python
200
+ {"type": "thinking_start", "value": "", ...}
201
+ {"type": "thinking", "value": "I\'m ", ...}
202
+ {"type": "thinking", "value": "thinking...", ...}
203
+ {"type": "thinking_end", "value": "", ...}
204
+ {"type": "response", "value": "Golden retriever ", ...}
205
+ {"type": "response", "value": "is a good dog breed.", ...}
206
+ ```
207
+ Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
208
+ To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
209
+ LM invoker initialization. The legacy event format support will be removed in v0.6.
210
+
211
+ The amount of tokens allocated for the thinking process can be set via the `thinking_budget` parameter.
212
+ For more information, please refer to the following documentation:
213
+ https://portkey.ai/docs/product/ai-gateway/multimodal-capabilities/thinking-mode.
214
+
215
+ Thinking is only available for certain models depending on capabilities
216
+
135
217
  Analytics tracking:
136
- When `output_analytics=True`, the invoker includes token usage, duration, and finish details.
218
+ The `PortkeyLMInvoker` can be configured to output additional information about the invocation.
219
+ This feature can be enabled by setting the `output_analytics` parameter to `True`.
220
+
221
+ When enabled, the following attributes will be stored in the output:
222
+ 1. `token_usage`: The token usage.
223
+ 2. `duration`: The duration in seconds.
224
+ 3. `finish_details`: The details about how the generation finished.
137
225
 
226
+ Output example:
138
227
  ```python
139
228
  LMOutput(
140
- response="Golden retriever is a good dog breed.",
229
+ outputs=[...],
141
230
  token_usage=TokenUsage(input_tokens=100, output_tokens=50),
142
231
  duration=0.729,
143
- finish_details={"finish_reason": "stop"},
232
+ finish_details={"stop_reason": "end_turn"},
144
233
  )
145
234
  ```
146
235
 
147
- **Note:** When streaming is enabled, token usage analytics are not supported and will be `None`.
236
+ When streaming is enabled, token usage is not supported.
148
237
 
149
238
  Retry and timeout:
150
239
  The `PortkeyLMInvoker` supports retry and timeout configuration.
@@ -154,8 +243,6 @@ class PortkeyLMInvoker(OpenAIChatCompletionsLMInvoker):
154
243
  Retry config examples:
155
244
  ```python
156
245
  retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
157
- retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
158
- retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
159
246
  retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
160
247
  ```
161
248
 
@@ -163,57 +250,6 @@ class PortkeyLMInvoker(OpenAIChatCompletionsLMInvoker):
163
250
  ```python
164
251
  lm_invoker = PortkeyLMInvoker(..., retry_config=retry_config)
165
252
  ```
166
-
167
- Thinking:
168
- The `thinking` parameter enables enhanced reasoning capability for supported models.
169
- Thinking mode allocates additional “reasoning tokens” up to `thinking_budget` (minimum 1024).
170
- When enabled, the model’s reasoning trace is stored in the `reasoning` attribute.
171
-
172
- ```python
173
- lm_invoker = PortkeyLMInvoker(..., thinking=True, thinking_budget=1024)
174
- ```
175
- Output example:
176
- ```python
177
- LMOutput(
178
- response="Golden retriever is a good dog breed.",
179
- reasoning=[Reasoning(reasoning="Let me think about it...")],
180
- )
181
- ```
182
-
183
- Streaming output example:
184
- ```python
185
- {"type": "thinking_start", "value": ""}
186
- {"type": "thinking", "value": "Let me think "}
187
- {"type": "thinking", "value": "about it..."}
188
- {"type": "thinking_end", "value": ""}
189
- {"type": "response", "value": "Golden retriever "}
190
- {"type": "response", "value": "is a good dog breed."}
191
- ```
192
-
193
- Note: By default, the thinking token will be streamed with the legacy `EventType.DATA` event type.
194
- To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
195
- LM invoker initialization. The legacy event format support will be removed in v0.6.
196
-
197
- When thinking is enabled, the amount of tokens allocated for the thinking process can be set via the
198
- `thinking_budget` parameter. The `thinking_budget`:
199
- 1. Must be a positive integer.
200
- 2. Must be at least 1024.
201
- 3. Must be less than or equal to the model\'s maximum context length.
202
- For more information, please refer to https://portkey.ai/docs/product/ai-gateway/multimodal-capabilities/thinking-mode
203
-
204
- Setting reasoning-related parameters for non-reasoning models will raise an error.
205
-
206
- Output types:
207
- The output of the `PortkeyLMInvoker` can either be:
208
- 1. `str`: A simple text response.
209
- 2. `LMOutput`: A structured response model that may contain:
210
- 2.1. response (str)
211
- 2.2. tool_calls (list[ToolCall])
212
- 2.3. structured_output (dict[str, Any] | BaseModel | None)
213
- 2.4. token_usage (TokenUsage | None)
214
- 2.5. duration (float | None)
215
- 2.6. finish_details (dict[str, Any] | None)
216
- 2.7. reasoning (list[Reasoning])
217
253
  '''
218
254
  model_kwargs: Incomplete
219
255
  thinking: Incomplete
@@ -50,115 +50,108 @@ class XAILMInvoker(BaseLMInvoker):
50
50
  result = await lm_invoker.invoke([text, image])
51
51
  ```
52
52
 
53
- Tool calling:
54
- Tool calling is a feature that allows the language model to call tools to perform tasks.
55
- Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
56
- When tools are provided and the model decides to call a tool, the tool calls are stored in the
57
- `tool_calls` attribute in the output.
58
-
59
- Usage example:
60
- ```python
61
- lm_invoker = XAILMInvoker(..., tools=[tool_1, tool_2])
62
- ```
53
+ Text output:
54
+ The `XAILMInvoker` generates text outputs by default.
55
+ Text outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
56
+ via the `texts` (all text outputs) or `text` (first text output) properties.
63
57
 
64
58
  Output example:
65
59
  ```python
66
- LMOutput(
67
- response="Let me call the tools...",
68
- tool_calls=[
69
- ToolCall(id="123", name="tool_1", args={"key": "value"}),
70
- ToolCall(id="456", name="tool_2", args={"key": "value"}),
71
- ]
72
- )
60
+ LMOutput(outputs=[LMOutputItem(type="text", output="Hello, there!")])
73
61
  ```
74
62
 
75
63
  Structured output:
76
- Structured output is a feature that allows the language model to output a structured response.
64
+ The `XAILMInvoker` can be configured to generate structured outputs.
77
65
  This feature can be enabled by providing a schema to the `response_schema` parameter.
78
66
 
79
- The schema must be either a JSON schema dictionary or a Pydantic BaseModel class.
80
- If JSON schema is used, it must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
81
- For this reason, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
67
+ Structured outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
68
+ via the `structureds` (all structured outputs) or `structured` (first structured output) properties.
82
69
 
83
- The language model also doesn\'t need to stream anything when structured output is enabled. Thus, standard
84
- invocation will be performed regardless of whether the `event_emitter` parameter is provided or not.
70
+ The schema must either be one of the following:
71
+ 1. A Pydantic BaseModel class
72
+ The structured output will be a Pydantic model.
73
+ 2. A JSON schema dictionary
74
+ JSON dictionary schema must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
75
+ Thus, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
76
+ The structured output will be a dictionary.
85
77
 
86
- When enabled, the structured output is stored in the `structured_output` attribute in the output.
87
- 1. If the schema is a JSON schema dictionary, the structured output is a dictionary.
88
- 2. If the schema is a Pydantic BaseModel class, the structured output is a Pydantic model.
89
-
90
- # Example 1: Using a JSON schema dictionary
91
78
  Usage example:
92
79
  ```python
93
- schema = {
94
- "title": "Animal",
95
- "description": "A description of an animal.",
96
- "properties": {
97
- "color": {"title": "Color", "type": "string"},
98
- "name": {"title": "Name", "type": "string"},
99
- },
100
- "required": ["name", "color"],
101
- "type": "object",
102
- }
103
- lm_invoker = XAILMInvoker(..., response_schema=schema)
80
+ class Animal(BaseModel):
81
+ name: str
82
+ color: str
83
+
84
+ json_schema = Animal.model_json_schema()
85
+
86
+ lm_invoker = XAILMInvoker(..., response_schema=Animal) # Using Pydantic BaseModel class
87
+ lm_invoker = XAILMInvoker(..., response_schema=json_schema) # Using JSON schema dictionary
104
88
  ```
89
+
105
90
  Output example:
106
91
  ```python
107
- LMOutput(structured_output={"name": "Golden retriever", "color": "Golden"})
92
+ # Using Pydantic BaseModel class outputs a Pydantic model
93
+ LMOutput(outputs=[LMOutputItem(type="structured", output=Animal(name="dog", color="white"))])
94
+
95
+ # Using JSON schema dictionary outputs a dictionary
96
+ LMOutput(outputs=[LMOutputItem(type="structured", output={"name": "dog", "color": "white"})])
108
97
  ```
109
98
 
110
- # Example 2: Using a Pydantic BaseModel class
99
+ When structured output is enabled, streaming is disabled.
100
+
101
+ Tool calling:
102
+ The `XAILMInvoker` can be configured to call tools to perform certain tasks.
103
+ This feature can be enabled by providing a list of `Tool` objects to the `tools` parameter.
104
+
105
+ Tool calls outputs are stored in the `outputs` attribute of the `LMOutput` object and
106
+ can be accessed via the `tool_calls` property.
107
+
111
108
  Usage example:
112
109
  ```python
113
- class Animal(BaseModel):
114
- name: str
115
- color: str
116
-
117
- lm_invoker = XAILMInvoker(..., response_schema=Animal)
110
+ lm_invoker = XAILMInvoker(..., tools=[tool_1, tool_2])
118
111
  ```
112
+
119
113
  Output example:
120
114
  ```python
121
- LMOutput(structured_output=Animal(name="Golden retriever", color="Golden"))
115
+ LMOutput(
116
+ outputs=[
117
+ LMOutputItem(type="text", output="I\'m using tools..."),
118
+ LMOutputItem(type="tool_call", output=ToolCall(id="123", name="tool_1", args={"key": "value"})),
119
+ LMOutputItem(type="tool_call", output=ToolCall(id="456", name="tool_2", args={"key": "value"})),
120
+ ]
121
+ )
122
122
  ```
123
123
 
124
124
  Reasoning:
125
- Reasoning effort is a feature specific to xAI\'s reasoning models that allows you to control the level
126
- of reasoning performed by the model. This feature can be enabled by setting the `reasoning_effort` parameter.
127
- Valid values are "low" and "high".
125
+ The `XAILMInvoker` performs step-by-step reasoning before generating a response when reasoning
126
+ models are used, such as `grok-3-mini`.
127
+
128
+ For some models, the reasoning effort can be set via the `reasoning_effort` parameter, which guides
129
+ the models on the amount of reasoning tokens to generate. Available options include `low` and `high`.
128
130
 
129
- Please note that Grok 4 does not have a `reasoning_effort` parameter. If a `reasoning_effort` is provided,
130
- the request will return error.
131
+ Some models may also output the reasoning tokens. In this case, the reasoning tokens are stored in
132
+ the `outputs` attribute of the `LMOutput` object and can be accessed via the `thinkings` property.
131
133
 
132
134
  Usage example:
133
135
  ```python
134
- lm_invoker = XAILMInvoker(
135
- model_name="grok-3",
136
- reasoning_effort="high" # Enable high reasoning effort
137
- )
136
+ lm_invoker = XAILMInvoker(model_name="grok-3-mini", reasoning_effort="low")
138
137
  ```
139
138
 
140
- When reasoning effort is enabled, the model\'s internal reasoning process is captured and stored in the
141
- `reasoning` attribute in the output.
142
-
143
139
  Output example:
144
140
  ```python
145
141
  LMOutput(
146
- response="The answer is 42",
147
- reasoning=[
148
- Reasoning(
149
- id="reasoning_1",
150
- reasoning="First, I need to understand the question. The user is asking about..."
151
- )
142
+ outputs=[
143
+ LMOutputItem(type="thinking", output=Reasoning(reasoning="I\'m thinking...", ...)),
144
+ LMOutputItem(type="text", output="Golden retriever is a good dog breed."),
152
145
  ]
153
146
  )
154
147
  ```
155
148
 
156
149
  Streaming output example:
157
- ```python
158
- {"type": "thinking_start", "value": ""}\', ...}
159
- {"type": "thinking", "value": "Let me think "}\', ...}
160
- {"type": "thinking", "value": "about it..."}\', ...}
161
- {"type": "thinking_end", "value": ""}\', ...}
150
+ ```python
151
+ {"type": "thinking_start", "value": "", ...}
152
+ {"type": "thinking", "value": "I\'m ", ...}
153
+ {"type": "thinking", "value": "thinking...", ...}
154
+ {"type": "thinking_end", "value": "", ...}
162
155
  {"type": "response", "value": "Golden retriever ", ...}
163
156
  {"type": "response", "value": "is a good dog breed.", ...}
164
157
  ```
@@ -166,26 +159,48 @@ class XAILMInvoker(BaseLMInvoker):
166
159
  To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
167
160
  LM invoker initialization. The legacy event format support will be removed in v0.6.
168
161
 
169
- Setting reasoning-related parameters for non-reasoning models will raise an error.
162
+ Web Search:
163
+ The `XAILMInvoker` can be configured to search the web for relevant information.
164
+ This feature can be enabled by setting the `web_search` parameter to `True`.
165
+
166
+ Web search citations are stored in the `outputs` attribute of the `LMOutput` object and
167
+ can be accessed via the `citations` property.
168
+
169
+ Usage example:
170
+ ```python
171
+ lm_invoker = XAILMInvoker(..., web_search=True)
172
+ ```
173
+
174
+ Output example:
175
+ ```python
176
+ LMOutput(
177
+ outputs=[
178
+ LMOutputItem(type="citation", output=Chunk(id="123", content="...", metadata={...}, score=None)),
179
+ LMOutputItem(type="text", output="According to recent reports... ([Source](https://example.com))."),
180
+ ],
181
+ )
182
+ ```
170
183
 
171
184
  Analytics tracking:
172
- Analytics tracking is a feature that allows the module to output additional information about the invocation.
185
+ The `XAILMInvoker` can be configured to output additional information about the invocation.
173
186
  This feature can be enabled by setting the `output_analytics` parameter to `True`.
187
+
174
188
  When enabled, the following attributes will be stored in the output:
175
189
  1. `token_usage`: The token usage.
176
- 2. `finish_details`: The details about how the generation finished.
190
+ 2. `duration`: The duration in seconds.
191
+ 3. `finish_details`: The details about how the generation finished.
177
192
 
178
193
  Output example:
179
194
  ```python
180
195
  LMOutput(
181
- response="Golden retriever is a good dog breed.",
196
+ outputs=[...],
182
197
  token_usage=TokenUsage(input_tokens=100, output_tokens=50),
183
- finish_details={"finish_reason": "stop"},
198
+ duration=0.729,
199
+ finish_details={"stop_reason": "end_turn"},
184
200
  )
185
201
  ```
186
202
 
187
- When streaming is enabled, token usage is not supported. Therefore, the `token_usage` attribute will be `None`
188
- regardless of the value of the `output_analytics` parameter.
203
+ When streaming is enabled, token usage is not supported.
189
204
 
190
205
  Retry and timeout:
191
206
  The `XAILMInvoker` supports retry and timeout configuration.
@@ -195,8 +210,6 @@ class XAILMInvoker(BaseLMInvoker):
195
210
  Retry config examples:
196
211
  ```python
197
212
  retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
198
- retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
199
- retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
200
213
  retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
201
214
  ```
202
215
 
@@ -204,55 +217,6 @@ class XAILMInvoker(BaseLMInvoker):
204
217
  ```python
205
218
  lm_invoker = XAILMInvoker(..., retry_config=retry_config)
206
219
  ```
207
-
208
- Web Search:
209
- The web search is a feature that allows the language model to search the web for relevant information.
210
- This feature can be enabled by setting the `web_search` parameter to `True`.
211
-
212
- Usage example:
213
- ```python
214
- lm_invoker = XAILMInvoker(
215
- model_name="grok-3",
216
- web_search=True
217
- )
218
- ```
219
-
220
- When web search is enabled, the language model will search for relevant information and may cite the
221
- relevant sources (including from X platform). The citations will be stored as `Chunk` objects in the
222
- `citations` attribute in the output.
223
-
224
- Output example:
225
- ```python
226
- LMOutput(
227
- response="According to recent reports, the latest AI developments... ([Source](https://example.com)).",
228
- citations=[
229
- Chunk(
230
- id="search_result_1",
231
- content="Latest AI developments report",
232
- metadata={
233
- "start_index": 164,
234
- "end_index": 275,
235
- "title": "Example title",
236
- "url": "https://www.example.com",
237
- "type": "url_citation",
238
- },
239
- ),
240
- ],
241
- )
242
- ```
243
-
244
- Output types:
245
- The output of the `XAILMInvoker` can either be:
246
- 1. `str`: A text response.
247
- 2. `LMOutput`: A Pydantic model that may contain the following attributes:
248
- 2.1. response (str)
249
- 2.2. tool_calls (list[ToolCall])
250
- 2.3. structured_output (dict[str, Any] | BaseModel | None)
251
- 2.4. token_usage (TokenUsage | None)
252
- 2.5. duration (float | None)
253
- 2.6. finish_details (dict[str, Any])
254
- 2.7. reasoning (list[Reasoning])
255
- 2.8. citations (list[Chunk])
256
220
  '''
257
221
  reasoning_effort: Incomplete
258
222
  web_search: Incomplete
@@ -2,10 +2,10 @@ from gllm_inference.schema.activity import Activity as Activity, MCPCallActivity
2
2
  from gllm_inference.schema.attachment import Attachment as Attachment
3
3
  from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
4
4
  from gllm_inference.schema.config import TruncationConfig as TruncationConfig
5
- from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, JinjaEnvType as JinjaEnvType, LMEventType as LMEventType, LMEventTypeSuffix as LMEventTypeSuffix, MessageRole as MessageRole, TruncateSide as TruncateSide
5
+ from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, JinjaEnvType as JinjaEnvType, LMEventType as LMEventType, LMEventTypeSuffix as LMEventTypeSuffix, LMOutputType as LMOutputType, MessageRole as MessageRole, TruncateSide as TruncateSide
6
6
  from gllm_inference.schema.events import ActivityEvent as ActivityEvent, CodeEvent as CodeEvent, ThinkingEvent as ThinkingEvent
7
7
  from gllm_inference.schema.lm_input import LMInput as LMInput
8
- from gllm_inference.schema.lm_output import LMOutput as LMOutput
8
+ from gllm_inference.schema.lm_output import LMOutput as LMOutput, LMOutputData as LMOutputData, LMOutputItem as LMOutputItem
9
9
  from gllm_inference.schema.mcp import MCPCall as MCPCall, MCPServer as MCPServer
10
10
  from gllm_inference.schema.message import Message as Message
11
11
  from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
@@ -15,4 +15,4 @@ from gllm_inference.schema.tool_call import ToolCall as ToolCall
15
15
  from gllm_inference.schema.tool_result import ToolResult as ToolResult
16
16
  from gllm_inference.schema.type_alias import EMContent as EMContent, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
17
17
 
18
- __all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'LMEventType', 'LMEventTypeSuffix', 'InputTokenDetails', 'JinjaEnvType', 'LMInput', 'LMOutput', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ThinkingEvent', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
18
+ __all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'InputTokenDetails', 'JinjaEnvType', 'LMEventType', 'LMEventTypeSuffix', 'LMInput', 'LMOutput', 'LMOutputItem', 'LMOutputData', 'LMOutputType', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'ThinkingEvent', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
@@ -1,7 +1,7 @@
1
1
  from _typeshed import Incomplete
2
- from gllm_inference.constants import HEX_REPR_LENGTH as HEX_REPR_LENGTH
3
2
  from pydantic import BaseModel
4
3
 
4
+ HEX_REPR_LENGTH: int
5
5
  logger: Incomplete
6
6
 
7
7
  class Attachment(BaseModel):
@@ -35,6 +35,17 @@ class EmitDataType(StrEnum):
35
35
  THINKING_START = 'thinking_start'
36
36
  THINKING_END = 'thinking_end'
37
37
 
38
+ class LMOutputType(StrEnum):
39
+ """Defines valid types for language model outputs."""
40
+ TEXT = 'text'
41
+ STRUCTURED = 'structured'
42
+ ATTACHMENT = 'attachment'
43
+ TOOL_CALL = 'tool_call'
44
+ THINKING = 'thinking'
45
+ CITATION = 'citation'
46
+ CODE_EXEC_RESULT = 'code_exec_result'
47
+ MCP_CALL = 'mcp_call'
48
+
38
49
  class ActivityType(StrEnum):
39
50
  """Defines valid activity types."""
40
51
  FIND_IN_PAGE = 'find_in_page'