literun 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- literun/__init__.py +33 -0
- literun/agent.py +411 -0
- literun/args_schema.py +87 -0
- literun/constants.py +21 -0
- literun/events.py +110 -0
- literun/items.py +106 -0
- literun/llm.py +156 -0
- literun/prompt_message.py +136 -0
- literun/prompt_template.py +181 -0
- literun/results.py +51 -0
- literun/tool.py +145 -0
- literun/utils.py +73 -0
- literun-0.1.0.dist-info/METADATA +242 -0
- literun-0.1.0.dist-info/RECORD +17 -0
- literun-0.1.0.dist-info/WHEEL +5 -0
- literun-0.1.0.dist-info/licenses/LICENSE +21 -0
- literun-0.1.0.dist-info/top_level.txt +1 -0
literun/__init__.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Literun package initialization."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .agent import Agent
|
|
6
|
+
from .llm import ChatOpenAI
|
|
7
|
+
from .tool import Tool, ToolRuntime
|
|
8
|
+
from .args_schema import ArgsSchema
|
|
9
|
+
from .prompt_template import PromptTemplate
|
|
10
|
+
from .prompt_message import PromptMessage
|
|
11
|
+
from .constants import Role, ContentType
|
|
12
|
+
from .items import RunItem
|
|
13
|
+
from .events import StreamEvent
|
|
14
|
+
from .results import RunResult, RunResultStreaming
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"Agent",
|
|
19
|
+
"ChatOpenAI",
|
|
20
|
+
"Tool",
|
|
21
|
+
"ToolRuntime",
|
|
22
|
+
"ArgsSchema",
|
|
23
|
+
"PromptTemplate",
|
|
24
|
+
"PromptMessage",
|
|
25
|
+
"Role",
|
|
26
|
+
"ContentType",
|
|
27
|
+
"RunItem",
|
|
28
|
+
"StreamEvent",
|
|
29
|
+
"RunResult",
|
|
30
|
+
"RunResultStreaming",
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
__version__ = "0.1.0"
|
literun/agent.py
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
1
|
+
"""Agent runtime implementation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any, Dict, Iterable, Optional, Iterator, List, Union
|
|
7
|
+
|
|
8
|
+
from .tool import Tool
|
|
9
|
+
from .llm import ChatOpenAI
|
|
10
|
+
from .prompt_template import PromptTemplate
|
|
11
|
+
from .results import RunResult, RunResultStreaming
|
|
12
|
+
from .items import (
|
|
13
|
+
RunItem,
|
|
14
|
+
MessageOutputItem,
|
|
15
|
+
ToolCallItem,
|
|
16
|
+
ToolCallOutputItem,
|
|
17
|
+
ReasoningItem,
|
|
18
|
+
ResponseFunctionToolCallOutput,
|
|
19
|
+
)
|
|
20
|
+
from .events import (
|
|
21
|
+
StreamEvent,
|
|
22
|
+
ResponseFunctionCallOutputItemAddedEvent,
|
|
23
|
+
ResponseFunctionCallOutputItemDoneEvent,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Agent:
|
|
28
|
+
"""A minimal agent runtime built on OpenAI Responses API."""
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
*,
|
|
33
|
+
llm: ChatOpenAI,
|
|
34
|
+
system_prompt: Optional[str] = None,
|
|
35
|
+
tools: Optional[List[Tool]] = None,
|
|
36
|
+
tool_choice: str = "auto", # "auto", "none", "required"
|
|
37
|
+
parallel_tool_calls: bool = True,
|
|
38
|
+
max_iterations: int = 10,
|
|
39
|
+
) -> None:
|
|
40
|
+
"""Initialize the Agent.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
llm: The OpenAI language model instance to use.
|
|
44
|
+
system_prompt: The system instructions for the agent.
|
|
45
|
+
tools: An optional list of Tool instances to register.
|
|
46
|
+
tool_choice: Strategy for selecting tools during execution.
|
|
47
|
+
One of "auto", "none", or "required".
|
|
48
|
+
parallel_tool_calls: Whether to call tools in parallel.
|
|
49
|
+
max_iterations: Maximum number of iterations for the agent loop. Must be >= 1.
|
|
50
|
+
|
|
51
|
+
Raises:
|
|
52
|
+
ValueError: If max_iterations is less than 1.
|
|
53
|
+
"""
|
|
54
|
+
if max_iterations < 1:
|
|
55
|
+
raise ValueError("max_iterations must be >= 1")
|
|
56
|
+
|
|
57
|
+
self.llm = llm
|
|
58
|
+
self.system_prompt = system_prompt
|
|
59
|
+
self.tool_choice = tool_choice
|
|
60
|
+
self.parallel_tool_calls = parallel_tool_calls
|
|
61
|
+
self.max_iterations = max_iterations
|
|
62
|
+
self.tools: Dict[str, Tool] = self.add_tools(tools)
|
|
63
|
+
|
|
64
|
+
def add_tools(
|
|
65
|
+
self,
|
|
66
|
+
tools: Optional[Iterable[Tool]],
|
|
67
|
+
) -> Dict[str, Tool]:
|
|
68
|
+
"""Register a set of tools for the agent.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
tools: An optional list of Tool instances to register.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Dict[str, Tool]: A mapping from tool names to their Tool instances.
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
ValueError: If there are duplicate tool names.
|
|
78
|
+
"""
|
|
79
|
+
tool_map: Dict[str, Tool] = {}
|
|
80
|
+
for tool in tools or []:
|
|
81
|
+
if tool.name in tool_map:
|
|
82
|
+
raise ValueError(f"Duplicate tool name: {tool.name}")
|
|
83
|
+
tool_map[tool.name] = tool
|
|
84
|
+
return tool_map
|
|
85
|
+
|
|
86
|
+
def add_tool(self, tool: Tool) -> None:
|
|
87
|
+
"""Add a single tool at runtime.
|
|
88
|
+
|
|
89
|
+
This method MUTATES agent state. Intended for advanced/dynamic use cases.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
tool: The tool instance to register.
|
|
93
|
+
|
|
94
|
+
Raises:
|
|
95
|
+
ValueError: If a tool with the same name is already registered.
|
|
96
|
+
"""
|
|
97
|
+
if tool.name in self.tools:
|
|
98
|
+
raise ValueError(f"Tool '{tool.name}' already registered")
|
|
99
|
+
self.tools[tool.name] = tool
|
|
100
|
+
|
|
101
|
+
def _convert_to_openai_tools(self) -> List[Dict[str, Any]]:
|
|
102
|
+
"""Convert all registered tools to the OpenAI tool schema format.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
List[Dict[str, Any]]: A list of tools in OpenAI-compatible dictionary format.
|
|
106
|
+
"""
|
|
107
|
+
return [tool.to_openai_tool() for tool in self.tools.values()]
|
|
108
|
+
|
|
109
|
+
def _execute_tool(
|
|
110
|
+
self,
|
|
111
|
+
name: str,
|
|
112
|
+
arguments: Union[str, Dict[str, Any]],
|
|
113
|
+
runtime_context: Optional[Dict[str, Any]] = None,
|
|
114
|
+
) -> str:
|
|
115
|
+
"""Execute a registered tool safely with provided arguments.
|
|
116
|
+
|
|
117
|
+
Handles parsing of arguments (from JSON string or dict) and catches execution errors.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
name: The name of the tool to execute.
|
|
121
|
+
arguments: Arguments to pass to the tool, either as a JSON string or dict.
|
|
122
|
+
runtime_context: Optional runtime context to pass to tool arguments of type ``ToolRuntime``.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
str: The output of the tool execution, or an error message if execution fails.
|
|
126
|
+
"""
|
|
127
|
+
tool = self.tools.get(name)
|
|
128
|
+
if not tool:
|
|
129
|
+
return f"Error: Tool '{name}' not found"
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
if isinstance(arguments, str):
|
|
133
|
+
args = json.loads(arguments)
|
|
134
|
+
else:
|
|
135
|
+
args = arguments
|
|
136
|
+
|
|
137
|
+
result = tool.execute(args, runtime_context)
|
|
138
|
+
return str(result)
|
|
139
|
+
except Exception as e:
|
|
140
|
+
return f"Error executing tool '{name}': {e}"
|
|
141
|
+
|
|
142
|
+
def _build_prompt(
|
|
143
|
+
self, user_input: str, prompt_template: Optional[PromptTemplate] = None
|
|
144
|
+
) -> PromptTemplate:
|
|
145
|
+
"""Construct the conversation state for a new agent turn.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
user_input: The user's input text.
|
|
149
|
+
prompt_template: Optional template to initialize the conversation history.
|
|
150
|
+
If None, a new ``PromptTemplate`` is created, and the system prompt is added if available.
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
``PromptTemplate``: The fully constructed prompt containing system, user, and previous messages.
|
|
154
|
+
"""
|
|
155
|
+
if prompt_template is not None:
|
|
156
|
+
prompt = prompt_template.copy()
|
|
157
|
+
else:
|
|
158
|
+
prompt = PromptTemplate()
|
|
159
|
+
if self.system_prompt:
|
|
160
|
+
prompt.add_system(self.system_prompt)
|
|
161
|
+
|
|
162
|
+
prompt.add_user(user_input)
|
|
163
|
+
return prompt
|
|
164
|
+
|
|
165
|
+
def invoke(
|
|
166
|
+
self,
|
|
167
|
+
*,
|
|
168
|
+
user_input: str,
|
|
169
|
+
prompt_template: Optional[PromptTemplate] = None,
|
|
170
|
+
runtime_context: Optional[Dict[str, Any]] = None,
|
|
171
|
+
) -> RunResult:
|
|
172
|
+
"""Run the agent synchronously.
|
|
173
|
+
|
|
174
|
+
This method executes the agent loop, calling the language model and any
|
|
175
|
+
registered tools until a final output is produced or the maximum number
|
|
176
|
+
of iterations is reached. Each step in the execution is recorded in
|
|
177
|
+
the returned ``RunResult``.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
user_input: The input text from the user.
|
|
181
|
+
prompt_template: Optional template to initialize conversation history.
|
|
182
|
+
runtime_context: Optional runtime context dictionary to pass to tools.
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
``RunResult``: Contains the original input, all items generated
|
|
186
|
+
during execution (messages, tool calls, reasoning), and the final output.
|
|
187
|
+
|
|
188
|
+
Raises:
|
|
189
|
+
ValueError: If `user_input` is empty.
|
|
190
|
+
RuntimeError: If the agent exceeds `max_iterations` without completing.
|
|
191
|
+
"""
|
|
192
|
+
if not user_input:
|
|
193
|
+
raise ValueError("user_input cannot be empty")
|
|
194
|
+
|
|
195
|
+
prompt = self._build_prompt(user_input, prompt_template)
|
|
196
|
+
all_items: List[RunItem] = []
|
|
197
|
+
|
|
198
|
+
for _ in range(self.max_iterations):
|
|
199
|
+
response = self.llm.chat(
|
|
200
|
+
messages=prompt.to_openai_input(),
|
|
201
|
+
stream=False,
|
|
202
|
+
tools=self._convert_to_openai_tools() if self.tools else None,
|
|
203
|
+
tool_choice=self.tool_choice,
|
|
204
|
+
parallel_tool_calls=self.parallel_tool_calls,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
tool_calls: Dict[str, Dict[str, Any]] = {}
|
|
208
|
+
final_output_text: str = ""
|
|
209
|
+
|
|
210
|
+
# Process each output item from OpenAI response
|
|
211
|
+
for item in response.output:
|
|
212
|
+
if item.type == "reasoning":
|
|
213
|
+
all_items.append(
|
|
214
|
+
ReasoningItem(
|
|
215
|
+
role="assistant",
|
|
216
|
+
content=item.content,
|
|
217
|
+
raw_item=item,
|
|
218
|
+
type="reasoning_item",
|
|
219
|
+
)
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
elif item.type == "function_call":
|
|
223
|
+
tool_calls[item.id] = {
|
|
224
|
+
"call_id": item.call_id,
|
|
225
|
+
"name": item.name,
|
|
226
|
+
"arguments": item.arguments,
|
|
227
|
+
}
|
|
228
|
+
all_items.append(
|
|
229
|
+
ToolCallItem(
|
|
230
|
+
role="assistant",
|
|
231
|
+
content="",
|
|
232
|
+
raw_item=item,
|
|
233
|
+
type="tool_call_item",
|
|
234
|
+
)
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
elif item.type == "message":
|
|
238
|
+
text_parts = [
|
|
239
|
+
c.text for c in item.content if c.type == "output_text"
|
|
240
|
+
]
|
|
241
|
+
final_output_text = "".join(text_parts)
|
|
242
|
+
all_items.append(
|
|
243
|
+
MessageOutputItem(
|
|
244
|
+
role="assistant",
|
|
245
|
+
content=final_output_text,
|
|
246
|
+
raw_item=item,
|
|
247
|
+
type="message_output_item",
|
|
248
|
+
)
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
if not tool_calls:
|
|
252
|
+
return RunResult(
|
|
253
|
+
input=user_input,
|
|
254
|
+
new_items=all_items,
|
|
255
|
+
final_output=final_output_text,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
# Update history with assistant's text (Critical for context)
|
|
259
|
+
if final_output_text:
|
|
260
|
+
prompt.add_assistant(final_output_text)
|
|
261
|
+
|
|
262
|
+
for tc in tool_calls.values():
|
|
263
|
+
call_id = tc["call_id"]
|
|
264
|
+
name = tc["name"]
|
|
265
|
+
arguments_str = tc["arguments"]
|
|
266
|
+
|
|
267
|
+
prompt.add_tool_call(
|
|
268
|
+
name=name,
|
|
269
|
+
arguments=arguments_str,
|
|
270
|
+
call_id=call_id,
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
tool_output = self._execute_tool(name, arguments_str, runtime_context)
|
|
274
|
+
|
|
275
|
+
prompt.add_tool_output(call_id=call_id, output=tool_output)
|
|
276
|
+
|
|
277
|
+
all_items.append(
|
|
278
|
+
ToolCallOutputItem(
|
|
279
|
+
role="tool",
|
|
280
|
+
content=tool_output,
|
|
281
|
+
raw_item=ResponseFunctionToolCallOutput(
|
|
282
|
+
call_id=call_id,
|
|
283
|
+
output=tool_output,
|
|
284
|
+
name=name,
|
|
285
|
+
type="function_call_output",
|
|
286
|
+
status="completed",
|
|
287
|
+
),
|
|
288
|
+
type="tool_call_output_item",
|
|
289
|
+
)
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
raise RuntimeError(f"Agent exceeded max iterations ({self.max_iterations})")
|
|
293
|
+
|
|
294
|
+
def stream(
|
|
295
|
+
self,
|
|
296
|
+
*,
|
|
297
|
+
user_input: str,
|
|
298
|
+
prompt_template: Optional[PromptTemplate] = None,
|
|
299
|
+
runtime_context: Optional[Dict[str, Any]] = None,
|
|
300
|
+
) -> Iterator[RunResultStreaming]:
|
|
301
|
+
"""Run the agent with streaming output.
|
|
302
|
+
|
|
303
|
+
This method streams response events from the agent as they occur,
|
|
304
|
+
including messages, tool calls, and tool outputs. It allows
|
|
305
|
+
real-time processing of the agent's reasoning and tool execution.
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
user_input: The input text from the user.
|
|
309
|
+
prompt_template: Optional template to initialize conversation history.
|
|
310
|
+
runtime_context: Optional runtime context dictionary to pass to tools.
|
|
311
|
+
|
|
312
|
+
Yields:
|
|
313
|
+
``RunResultStreaming``: Streaming events containing the current input,
|
|
314
|
+
the event from the LLM or tool, and the accumulated final output.
|
|
315
|
+
|
|
316
|
+
Raises:
|
|
317
|
+
ValueError: If `user_input` is empty.
|
|
318
|
+
RuntimeError: If the agent exceeds `max_iterations`.
|
|
319
|
+
"""
|
|
320
|
+
if not user_input:
|
|
321
|
+
raise ValueError("user_input cannot be empty")
|
|
322
|
+
|
|
323
|
+
prompt = self._build_prompt(user_input, prompt_template)
|
|
324
|
+
|
|
325
|
+
for _ in range(self.max_iterations):
|
|
326
|
+
response_stream = self.llm.chat(
|
|
327
|
+
messages=prompt.to_openai_input(),
|
|
328
|
+
stream=True,
|
|
329
|
+
tools=self._convert_to_openai_tools() if self.tools else None,
|
|
330
|
+
tool_choice=self.tool_choice,
|
|
331
|
+
parallel_tool_calls=self.parallel_tool_calls,
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
tool_calls: Dict[str, Dict[str, Any]] = {}
|
|
335
|
+
final_output_text: str = ""
|
|
336
|
+
|
|
337
|
+
for event in response_stream:
|
|
338
|
+
yield RunResultStreaming(
|
|
339
|
+
input=user_input,
|
|
340
|
+
event=event,
|
|
341
|
+
final_output=final_output_text,
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
if event.type == "response.output_item.done":
|
|
345
|
+
if event.item.type == "message":
|
|
346
|
+
for content_part in event.item.content:
|
|
347
|
+
if content_part.type == "output_text":
|
|
348
|
+
final_output_text += content_part.text
|
|
349
|
+
|
|
350
|
+
elif event.item.type == "function_call":
|
|
351
|
+
tool_calls[event.item.id] = {
|
|
352
|
+
"call_id": event.item.call_id,
|
|
353
|
+
"name": event.item.name,
|
|
354
|
+
"arguments": event.item.arguments,
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
if not tool_calls:
|
|
358
|
+
return
|
|
359
|
+
|
|
360
|
+
# Update history with assistant's text (Critical for context)
|
|
361
|
+
if final_output_text:
|
|
362
|
+
prompt.add_assistant(final_output_text)
|
|
363
|
+
|
|
364
|
+
for tc in tool_calls.values():
|
|
365
|
+
call_id = tc["call_id"]
|
|
366
|
+
name = tc["name"]
|
|
367
|
+
arguments_str = tc["arguments"]
|
|
368
|
+
|
|
369
|
+
prompt.add_tool_call(
|
|
370
|
+
name=name, arguments=arguments_str, call_id=call_id
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
yield RunResultStreaming(
|
|
374
|
+
input=user_input,
|
|
375
|
+
event=ResponseFunctionCallOutputItemAddedEvent(
|
|
376
|
+
type="response.function_call_output_item.added",
|
|
377
|
+
item=ResponseFunctionToolCallOutput(
|
|
378
|
+
call_id=call_id,
|
|
379
|
+
output="",
|
|
380
|
+
name=name,
|
|
381
|
+
type="function_call_output",
|
|
382
|
+
status="in_progress",
|
|
383
|
+
),
|
|
384
|
+
output_index=None,
|
|
385
|
+
sequence_number=None,
|
|
386
|
+
),
|
|
387
|
+
final_output=final_output_text,
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
tool_output = self._execute_tool(name, arguments_str, runtime_context)
|
|
391
|
+
|
|
392
|
+
prompt.add_tool_output(call_id=call_id, output=tool_output)
|
|
393
|
+
|
|
394
|
+
yield RunResultStreaming(
|
|
395
|
+
input=user_input,
|
|
396
|
+
event=ResponseFunctionCallOutputItemDoneEvent(
|
|
397
|
+
type="response.function_call_output_item.done",
|
|
398
|
+
item=ResponseFunctionToolCallOutput(
|
|
399
|
+
call_id=call_id,
|
|
400
|
+
output=tool_output,
|
|
401
|
+
name=name,
|
|
402
|
+
type="function_call_output",
|
|
403
|
+
status="completed",
|
|
404
|
+
),
|
|
405
|
+
output_index=None,
|
|
406
|
+
sequence_number=None,
|
|
407
|
+
),
|
|
408
|
+
final_output=final_output_text,
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
raise RuntimeError(f"Agent exceeded max iterations ({self.max_iterations})")
|
literun/args_schema.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Schema definition for tool arguments."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, List, Optional, Type, Dict
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ArgsSchema:
|
|
9
|
+
"""Represents an argument for a tool."""
|
|
10
|
+
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
*,
|
|
14
|
+
name: str,
|
|
15
|
+
type: Type,
|
|
16
|
+
description: str,
|
|
17
|
+
enum: Optional[List[Any]] = None,
|
|
18
|
+
):
|
|
19
|
+
"""Initialize an ArgsSchema.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
name: The name of the argument.
|
|
23
|
+
type: The Python type of the argument (e.g., str, int, float, bool).
|
|
24
|
+
description: A description of the argument for documentation purposes.
|
|
25
|
+
enum: Optional list of allowed values for the argument.
|
|
26
|
+
"""
|
|
27
|
+
self.name = name
|
|
28
|
+
self.type_ = type
|
|
29
|
+
self.description = description
|
|
30
|
+
self.enum = enum
|
|
31
|
+
|
|
32
|
+
# JSON schema representation
|
|
33
|
+
def to_json_schema(self) -> Dict[str, Any]:
|
|
34
|
+
"""Convert the argument to a JSON Schema representation.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
Dict[str, Any]: A dictionary representing the argument in JSON Schema format.
|
|
38
|
+
"""
|
|
39
|
+
schema = {
|
|
40
|
+
"type": self._json_type(),
|
|
41
|
+
"description": self.description,
|
|
42
|
+
}
|
|
43
|
+
if self.enum:
|
|
44
|
+
schema["enum"] = self.enum
|
|
45
|
+
return schema
|
|
46
|
+
|
|
47
|
+
# Runtime validation / coercion
|
|
48
|
+
def validate_and_cast(self, value: Any) -> Any:
|
|
49
|
+
"""Validate a value against the argument's type and cast it.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
value: The value to validate and cast.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Any: The value cast to the argument's Python type.
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
ValueError: If the value is missing or cannot be cast to the expected type.
|
|
59
|
+
"""
|
|
60
|
+
if value is None:
|
|
61
|
+
raise ValueError(f"Missing required argument '{self.name}'")
|
|
62
|
+
try:
|
|
63
|
+
return self.type_(value)
|
|
64
|
+
except Exception as e:
|
|
65
|
+
raise ValueError(
|
|
66
|
+
f"Invalid value for '{self.name}': expected {self.type_.__name__}"
|
|
67
|
+
) from e
|
|
68
|
+
|
|
69
|
+
# Helpers
|
|
70
|
+
def _json_type(self) -> str:
|
|
71
|
+
"""Get the JSON Schema type corresponding to the Python type.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
str: The JSON type string (e.g., "string", "integer", "number", "boolean").
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
ValueError: If the Python type is unsupported.
|
|
78
|
+
"""
|
|
79
|
+
if self.type_ is str:
|
|
80
|
+
return "string"
|
|
81
|
+
if self.type_ is int:
|
|
82
|
+
return "integer"
|
|
83
|
+
if self.type_ is float:
|
|
84
|
+
return "number"
|
|
85
|
+
if self.type_ is bool:
|
|
86
|
+
return "boolean"
|
|
87
|
+
raise ValueError(f"Unsupported type: {self.type_}")
|
literun/constants.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Constants and Enums for the literun package."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from enum import Enum
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Role(str, Enum):
|
|
9
|
+
SYSTEM = "system"
|
|
10
|
+
USER = "user"
|
|
11
|
+
ASSISTANT = "assistant"
|
|
12
|
+
DEVELOPER = "developer"
|
|
13
|
+
TOOL = "tool"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ContentType(str, Enum):
|
|
17
|
+
INPUT_TEXT = "input_text"
|
|
18
|
+
OUTPUT_TEXT = "output_text"
|
|
19
|
+
MESSAGE = "message"
|
|
20
|
+
FUNCTION_CALL = "function_call"
|
|
21
|
+
FUNCTION_CALL_OUTPUT = "function_call_output"
|
literun/events.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
"""Response events for streaming agent execution."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Union, Literal, TypeAlias
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
8
|
+
from .items import ResponseFunctionToolCallOutput
|
|
9
|
+
from openai.types.responses import (
|
|
10
|
+
ResponseErrorEvent,
|
|
11
|
+
ResponseFailedEvent,
|
|
12
|
+
ResponseQueuedEvent,
|
|
13
|
+
ResponseCreatedEvent,
|
|
14
|
+
ResponseCompletedEvent,
|
|
15
|
+
ResponseTextDoneEvent,
|
|
16
|
+
ResponseIncompleteEvent,
|
|
17
|
+
ResponseTextDeltaEvent,
|
|
18
|
+
ResponseInProgressEvent,
|
|
19
|
+
ResponseRefusalDoneEvent,
|
|
20
|
+
ResponseRefusalDeltaEvent,
|
|
21
|
+
ResponseOutputItemDoneEvent,
|
|
22
|
+
ResponseContentPartDoneEvent,
|
|
23
|
+
ResponseOutputItemAddedEvent,
|
|
24
|
+
ResponseContentPartAddedEvent,
|
|
25
|
+
ResponseReasoningTextDoneEvent,
|
|
26
|
+
ResponseReasoningTextDeltaEvent,
|
|
27
|
+
ResponseWebSearchCallCompletedEvent,
|
|
28
|
+
ResponseWebSearchCallSearchingEvent,
|
|
29
|
+
ResponseReasoningSummaryPartDoneEvent,
|
|
30
|
+
ResponseReasoningSummaryTextDoneEvent,
|
|
31
|
+
ResponseWebSearchCallInProgressEvent,
|
|
32
|
+
ResponseFunctionCallArgumentsDoneEvent,
|
|
33
|
+
ResponseOutputTextAnnotationAddedEvent,
|
|
34
|
+
ResponseReasoningSummaryPartAddedEvent,
|
|
35
|
+
ResponseReasoningSummaryTextDeltaEvent,
|
|
36
|
+
ResponseFunctionCallArgumentsDeltaEvent,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
ResponseStreamEvent: TypeAlias = Union[
|
|
41
|
+
ResponseCompletedEvent,
|
|
42
|
+
ResponseContentPartAddedEvent,
|
|
43
|
+
ResponseContentPartDoneEvent,
|
|
44
|
+
ResponseCreatedEvent,
|
|
45
|
+
ResponseErrorEvent,
|
|
46
|
+
ResponseFunctionCallArgumentsDeltaEvent,
|
|
47
|
+
ResponseFunctionCallArgumentsDoneEvent,
|
|
48
|
+
ResponseInProgressEvent,
|
|
49
|
+
ResponseFailedEvent,
|
|
50
|
+
ResponseIncompleteEvent,
|
|
51
|
+
ResponseOutputItemAddedEvent,
|
|
52
|
+
ResponseOutputItemDoneEvent,
|
|
53
|
+
ResponseReasoningSummaryPartAddedEvent,
|
|
54
|
+
ResponseReasoningSummaryPartDoneEvent,
|
|
55
|
+
ResponseReasoningSummaryTextDeltaEvent,
|
|
56
|
+
ResponseReasoningSummaryTextDoneEvent,
|
|
57
|
+
ResponseReasoningTextDeltaEvent,
|
|
58
|
+
ResponseReasoningTextDoneEvent,
|
|
59
|
+
ResponseRefusalDeltaEvent,
|
|
60
|
+
ResponseRefusalDoneEvent,
|
|
61
|
+
ResponseTextDeltaEvent,
|
|
62
|
+
ResponseTextDoneEvent,
|
|
63
|
+
ResponseWebSearchCallCompletedEvent,
|
|
64
|
+
ResponseWebSearchCallInProgressEvent,
|
|
65
|
+
ResponseWebSearchCallSearchingEvent,
|
|
66
|
+
ResponseOutputTextAnnotationAddedEvent,
|
|
67
|
+
ResponseQueuedEvent,
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# Custom internal events
|
|
72
|
+
class ResponseFunctionCallOutputItemAddedEvent(BaseModel):
|
|
73
|
+
"""Emitted when a function tool call output item is created."""
|
|
74
|
+
|
|
75
|
+
item: ResponseFunctionToolCallOutput
|
|
76
|
+
"""The output item that was added."""
|
|
77
|
+
|
|
78
|
+
output_index: None
|
|
79
|
+
"""The index of the output item that was added. Always `None` for function tool call output."""
|
|
80
|
+
|
|
81
|
+
sequence_number: None
|
|
82
|
+
"""The sequence number of this event. Always `None` for function tool call output."""
|
|
83
|
+
|
|
84
|
+
type: Literal["response.function_call_output_item.added"]
|
|
85
|
+
"""The type of the event. Always `response.function_call_output_item.added`."""
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class ResponseFunctionCallOutputItemDoneEvent(BaseModel):
|
|
89
|
+
"""Emitted when a function tool call output item is marked done."""
|
|
90
|
+
|
|
91
|
+
item: ResponseFunctionToolCallOutput
|
|
92
|
+
"""The output item that was marked done."""
|
|
93
|
+
|
|
94
|
+
output_index: None
|
|
95
|
+
"""The index of the output item that was added. Always `None` for function tool call output."""
|
|
96
|
+
|
|
97
|
+
sequence_number: None
|
|
98
|
+
"""The sequence number of this event. Always `None` for function tool call output."""
|
|
99
|
+
|
|
100
|
+
type: Literal["response.function_call_output_item.done"]
|
|
101
|
+
"""The type of the event. Always `response.function_call_output_item.done`."""
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
TResponseStreamEvent = ResponseStreamEvent
|
|
105
|
+
|
|
106
|
+
StreamEvent: TypeAlias = Union[
|
|
107
|
+
TResponseStreamEvent,
|
|
108
|
+
ResponseFunctionCallOutputItemAddedEvent,
|
|
109
|
+
ResponseFunctionCallOutputItemDoneEvent,
|
|
110
|
+
]
|