literun 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- literun/__init__.py +1 -1
- literun/agent.py +78 -331
- literun/args_schema.py +18 -24
- literun/constants.py +18 -13
- literun/events.py +35 -35
- literun/items.py +12 -17
- literun/llm.py +160 -68
- literun/prompt_message.py +35 -48
- literun/prompt_template.py +14 -17
- literun/results.py +3 -5
- literun/runner.py +342 -0
- literun/tool.py +29 -41
- literun-0.1.1.dist-info/METADATA +187 -0
- literun-0.1.1.dist-info/RECORD +17 -0
- {literun-0.1.0.dist-info → literun-0.1.1.dist-info}/WHEEL +1 -2
- literun-0.1.0.dist-info/METADATA +0 -242
- literun-0.1.0.dist-info/RECORD +0 -17
- literun-0.1.0.dist-info/top_level.txt +0 -1
- {literun-0.1.0.dist-info → literun-0.1.1.dist-info}/licenses/LICENSE +0 -0
literun/items.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from typing import
|
|
5
|
+
from typing import Literal, TypeAlias
|
|
6
6
|
from pydantic import BaseModel
|
|
7
7
|
|
|
8
8
|
from openai.types.responses import (
|
|
@@ -20,7 +20,7 @@ class ResponseFunctionToolCallOutput(BaseModel):
|
|
|
20
20
|
call_id: str
|
|
21
21
|
"""The unique ID of the function tool call generated by the model."""
|
|
22
22
|
|
|
23
|
-
output:
|
|
23
|
+
output: str | ResponseInputText
|
|
24
24
|
"""The output from the function call generated by your code."""
|
|
25
25
|
|
|
26
26
|
name: str
|
|
@@ -29,10 +29,10 @@ class ResponseFunctionToolCallOutput(BaseModel):
|
|
|
29
29
|
type: Literal["function_call_output"]
|
|
30
30
|
"""The type of the function tool call output. Always `function_call_output`."""
|
|
31
31
|
|
|
32
|
-
id:
|
|
32
|
+
id: str | None = None
|
|
33
33
|
"""The unique ID of the function tool call output."""
|
|
34
34
|
|
|
35
|
-
status:
|
|
35
|
+
status: Literal["in_progress", "completed", "incomplete"] | None = None
|
|
36
36
|
"""The status of the item."""
|
|
37
37
|
|
|
38
38
|
|
|
@@ -44,7 +44,7 @@ class MessageOutputItem(BaseModel):
|
|
|
44
44
|
content: str = ""
|
|
45
45
|
"""The text content of the message. Always `assistant`."""
|
|
46
46
|
|
|
47
|
-
raw_item:
|
|
47
|
+
raw_item: ResponseOutputMessage | None = None
|
|
48
48
|
"""The raw response output message."""
|
|
49
49
|
|
|
50
50
|
type: Literal["message_output_item"] = "message_output_item"
|
|
@@ -59,9 +59,7 @@ class ToolCallItem(BaseModel):
|
|
|
59
59
|
content: str = ""
|
|
60
60
|
"""The content of the tool call. Usually empty."""
|
|
61
61
|
|
|
62
|
-
raw_item:
|
|
63
|
-
None
|
|
64
|
-
)
|
|
62
|
+
raw_item: ResponseFunctionToolCall | ResponseFunctionWebSearch | None = None
|
|
65
63
|
"""The raw tool call item."""
|
|
66
64
|
|
|
67
65
|
type: Literal["tool_call_item"] = "tool_call_item"
|
|
@@ -76,7 +74,7 @@ class ToolCallOutputItem(BaseModel):
|
|
|
76
74
|
content: str = ""
|
|
77
75
|
"""The output from the function call generated by your code."""
|
|
78
76
|
|
|
79
|
-
raw_item:
|
|
77
|
+
raw_item: ResponseFunctionToolCallOutput | None = None
|
|
80
78
|
"""The raw tool call output item."""
|
|
81
79
|
|
|
82
80
|
type: Literal["tool_call_output_item"] = "tool_call_output_item"
|
|
@@ -88,19 +86,16 @@ class ReasoningItem(BaseModel):
|
|
|
88
86
|
|
|
89
87
|
role: Literal["assistant"] = "assistant"
|
|
90
88
|
|
|
91
|
-
content:
|
|
89
|
+
content: str | None = None
|
|
92
90
|
"""The reasoning content."""
|
|
93
91
|
|
|
94
|
-
raw_item:
|
|
92
|
+
raw_item: ResponseReasoningItem | None = None
|
|
95
93
|
"""The raw reasoning item."""
|
|
96
94
|
|
|
97
95
|
type: Literal["reasoning_item"] = "reasoning_item"
|
|
98
96
|
"""The type of the reasoning item. Always `reasoning_item`."""
|
|
99
97
|
|
|
100
98
|
|
|
101
|
-
RunItem: TypeAlias =
|
|
102
|
-
MessageOutputItem
|
|
103
|
-
|
|
104
|
-
ToolCallOutputItem,
|
|
105
|
-
ReasoningItem,
|
|
106
|
-
]
|
|
99
|
+
RunItem: TypeAlias = (
|
|
100
|
+
MessageOutputItem | ToolCallItem | ToolCallOutputItem | ReasoningItem
|
|
101
|
+
)
|
literun/llm.py
CHANGED
|
@@ -2,67 +2,133 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from typing import Any,
|
|
5
|
+
from typing import Any, Iterator
|
|
6
|
+
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
|
6
7
|
from openai import OpenAI
|
|
8
|
+
from openai.types.responses import Response
|
|
7
9
|
|
|
8
10
|
from .tool import Tool
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
11
|
+
from .prompt_template import PromptTemplate
|
|
12
|
+
from .events import ResponseStreamEvent
|
|
13
|
+
from .constants import (
|
|
14
|
+
Verbosity,
|
|
15
|
+
TextFormat,
|
|
16
|
+
ReasoningEffort,
|
|
17
|
+
DEFAULT_TIMEOUT,
|
|
18
|
+
DEFAULT_MAX_RETRIES,
|
|
19
|
+
DEFAULT_OPENAI_MODEL,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ChatOpenAI(BaseModel):
|
|
12
24
|
"""Stateless wrapper for a configured OpenAI model.
|
|
13
25
|
|
|
14
26
|
Provides a unified interface to call the OpenAI Responses API, optionally
|
|
15
27
|
binding tools and streaming outputs.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
model: The model name to use.
|
|
31
|
+
temperature: Sampling temperature.
|
|
32
|
+
api_key: OpenAI API key.
|
|
33
|
+
organization: OpenAI organization ID.
|
|
34
|
+
project: OpenAI project ID.
|
|
35
|
+
base_url: Custom base URL for OpenAI API.
|
|
36
|
+
max_output_tokens: Maximum number of tokens in the output.
|
|
37
|
+
timeout: Request timeout in seconds.
|
|
38
|
+
max_retries: Number of retries for failed requests.
|
|
39
|
+
reasoning_effort: Level of reasoning effort for the model.
|
|
40
|
+
Options: "none", "low", "medium", "high".
|
|
41
|
+
verbosity: Level of verbosity in model responses.
|
|
42
|
+
Options: "low", "medium", "high".
|
|
43
|
+
text_format: Format of the text output.
|
|
44
|
+
Options: "text", "json_object", "json_schema".
|
|
45
|
+
store: Whether to store model responses with OpenAI.
|
|
46
|
+
model_kwargs: Additional model parameters.
|
|
16
47
|
"""
|
|
17
48
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
49
|
+
model: str = DEFAULT_OPENAI_MODEL
|
|
50
|
+
temperature: float | None = None
|
|
51
|
+
api_key: str | None = None
|
|
52
|
+
organization: str | None = None
|
|
53
|
+
project: str | None = None
|
|
54
|
+
base_url: str | None = None
|
|
55
|
+
max_output_tokens: int | None = None
|
|
56
|
+
timeout: float | None = DEFAULT_TIMEOUT
|
|
57
|
+
max_retries: int = DEFAULT_MAX_RETRIES
|
|
58
|
+
reasoning_effort: ReasoningEffort | None = None
|
|
59
|
+
verbosity: Verbosity | None = None
|
|
60
|
+
text_format: TextFormat | None = None
|
|
61
|
+
store: bool = False
|
|
62
|
+
model_kwargs: dict[str, Any] = Field(default_factory=dict)
|
|
63
|
+
|
|
64
|
+
_client: OpenAI = PrivateAttr()
|
|
65
|
+
_tools: list[Tool] | None = PrivateAttr(default=None)
|
|
66
|
+
_tool_choice: str | None = PrivateAttr(default=None)
|
|
67
|
+
_parallel_tool_calls: bool | None = PrivateAttr(default=None)
|
|
68
|
+
|
|
69
|
+
@model_validator(mode="after")
|
|
70
|
+
def _validate_temperature(self) -> ChatOpenAI:
|
|
71
|
+
"""Validate temperature and reasoning parameters."""
|
|
72
|
+
model_lower = self.model.lower()
|
|
73
|
+
|
|
74
|
+
# 1. For o-series models, default to temperature=1 if not provided
|
|
75
|
+
if model_lower.startswith("o") and self.temperature is None:
|
|
76
|
+
import warnings
|
|
77
|
+
|
|
78
|
+
warnings.warn(
|
|
79
|
+
"o-series models require temperature=1 and no temperature was provided. "
|
|
80
|
+
"Setting default temperature=1 for o-series models.",
|
|
81
|
+
UserWarning,
|
|
82
|
+
)
|
|
83
|
+
self.temperature = 1
|
|
33
84
|
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
store: Whether to store model responses.
|
|
44
|
-
**kwargs: Additional model parameters.
|
|
45
|
-
"""
|
|
46
|
-
self.model = model
|
|
47
|
-
self.temperature = temperature
|
|
48
|
-
self.max_output_tokens = max_output_tokens
|
|
49
|
-
self.model_kwargs = kwargs
|
|
50
|
-
self.client = (
|
|
51
|
-
OpenAI(api_key=api_key, base_url=base_url)
|
|
52
|
-
if base_url
|
|
53
|
-
else OpenAI(api_key=api_key)
|
|
85
|
+
# 2. For gpt-5 models, handle temperature restrictions
|
|
86
|
+
# (Assuming gpt-5-chat and non-reasoning gpt-5 support arbitrary temps)
|
|
87
|
+
is_gpt5 = model_lower.startswith("gpt-5")
|
|
88
|
+
is_chat = "chat" in model_lower
|
|
89
|
+
|
|
90
|
+
# Check reasoning effort from field or model_kwargs
|
|
91
|
+
effort_kwarg = (self.model_kwargs.get("reasoning") or {}).get("effort")
|
|
92
|
+
has_reasoning = (self.reasoning_effort and self.reasoning_effort != "none") or (
|
|
93
|
+
effort_kwarg and effort_kwarg != "none"
|
|
54
94
|
)
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
95
|
+
|
|
96
|
+
if is_gpt5 and not is_chat and has_reasoning:
|
|
97
|
+
if self.temperature is not None and self.temperature != 1:
|
|
98
|
+
import warnings
|
|
99
|
+
|
|
100
|
+
warnings.warn(
|
|
101
|
+
"Invalid temperature for gpt-5 with reasoning. Using default temperature.",
|
|
102
|
+
UserWarning,
|
|
103
|
+
)
|
|
104
|
+
self.temperature = None
|
|
105
|
+
|
|
106
|
+
return self
|
|
107
|
+
|
|
108
|
+
@model_validator(mode="after")
|
|
109
|
+
def _initialize_client(self) -> ChatOpenAI:
|
|
110
|
+
"""Initialize the OpenAI client."""
|
|
111
|
+
self._client = OpenAI(
|
|
112
|
+
api_key=self.api_key,
|
|
113
|
+
base_url=self.base_url,
|
|
114
|
+
organization=self.organization,
|
|
115
|
+
project=self.project,
|
|
116
|
+
timeout=self.timeout,
|
|
117
|
+
max_retries=self.max_retries,
|
|
118
|
+
)
|
|
119
|
+
return self
|
|
120
|
+
|
|
121
|
+
@property
|
|
122
|
+
def client(self) -> OpenAI:
|
|
123
|
+
"""Access the OpenAI client."""
|
|
124
|
+
return self._client
|
|
59
125
|
|
|
60
126
|
def bind_tools(
|
|
61
127
|
self,
|
|
62
128
|
*,
|
|
63
|
-
tools:
|
|
64
|
-
tool_choice:
|
|
65
|
-
parallel_tool_calls:
|
|
129
|
+
tools: list[Tool],
|
|
130
|
+
tool_choice: str | None = None,
|
|
131
|
+
parallel_tool_calls: bool | None = None,
|
|
66
132
|
) -> ChatOpenAI:
|
|
67
133
|
"""Bind tools to the LLM instance.
|
|
68
134
|
|
|
@@ -82,40 +148,55 @@ class ChatOpenAI:
|
|
|
82
148
|
def chat(
|
|
83
149
|
self,
|
|
84
150
|
*,
|
|
85
|
-
messages:
|
|
151
|
+
messages: PromptTemplate | list[dict[str, Any]],
|
|
86
152
|
stream: bool = False,
|
|
87
|
-
tools:
|
|
88
|
-
tool_choice:
|
|
89
|
-
parallel_tool_calls:
|
|
90
|
-
) ->
|
|
153
|
+
tools: list[Tool] | None = None,
|
|
154
|
+
tool_choice: str | None = None,
|
|
155
|
+
parallel_tool_calls: bool | None = None,
|
|
156
|
+
) -> Response | Iterator[ResponseStreamEvent]:
|
|
91
157
|
"""Call the model with the given messages.
|
|
92
158
|
|
|
93
159
|
Args:
|
|
94
|
-
messages:
|
|
160
|
+
messages: PromptTemplate or list of messages in OpenAI format.
|
|
95
161
|
stream: Whether to stream the output.
|
|
96
|
-
tools: Optional list of
|
|
162
|
+
tools: Optional list of Tool instances.
|
|
97
163
|
tool_choice: Optional tool selection strategy.
|
|
98
164
|
parallel_tool_calls: Whether to allow parallel tool calls.
|
|
99
165
|
|
|
100
166
|
Returns:
|
|
101
|
-
|
|
167
|
+
Response | Iterator[ResponseStreamEvent]: The OpenAI Responses API response object (or stream).
|
|
102
168
|
"""
|
|
169
|
+
if isinstance(messages, PromptTemplate):
|
|
170
|
+
input_ = messages.to_openai_input()
|
|
171
|
+
else:
|
|
172
|
+
input_ = messages
|
|
173
|
+
|
|
103
174
|
params = {
|
|
104
175
|
"model": self.model,
|
|
105
|
-
"
|
|
176
|
+
"temperature": self.temperature,
|
|
177
|
+
"max_output_tokens": self.max_output_tokens,
|
|
178
|
+
"input": input_,
|
|
106
179
|
"stream": stream,
|
|
107
180
|
"store": self.store,
|
|
108
181
|
**self.model_kwargs,
|
|
109
182
|
}
|
|
110
|
-
if self.
|
|
111
|
-
params["
|
|
112
|
-
|
|
113
|
-
|
|
183
|
+
if self.reasoning_effort is not None:
|
|
184
|
+
params["reasoning"] = {"effort": self.reasoning_effort}
|
|
185
|
+
|
|
186
|
+
text_options = {}
|
|
187
|
+
if self.verbosity is not None:
|
|
188
|
+
text_options["verbosity"] = self.verbosity
|
|
189
|
+
if self.text_format is not None:
|
|
190
|
+
text_options["type"] = self.text_format
|
|
191
|
+
if text_options:
|
|
192
|
+
params["text"] = {"format": text_options}
|
|
114
193
|
|
|
115
194
|
# Tools resolution
|
|
116
|
-
|
|
117
|
-
|
|
195
|
+
active_tools = tools if tools is not None else self._tools
|
|
196
|
+
current_tools = (
|
|
197
|
+
self._convert_to_openai_tools(active_tools) if active_tools else None
|
|
118
198
|
)
|
|
199
|
+
|
|
119
200
|
if current_tools:
|
|
120
201
|
params["tools"] = current_tools
|
|
121
202
|
params["tool_choice"] = tool_choice or self._tool_choice
|
|
@@ -127,30 +208,41 @@ class ChatOpenAI:
|
|
|
127
208
|
|
|
128
209
|
return self.client.responses.create(**params)
|
|
129
210
|
|
|
130
|
-
def invoke(self, messages:
|
|
211
|
+
def invoke(self, messages: list[dict[str, Any]] | PromptTemplate) -> Response:
|
|
131
212
|
"""Synchronously call the model.
|
|
132
213
|
|
|
133
214
|
Args:
|
|
134
|
-
messages:
|
|
215
|
+
messages: PromptTemplate or list of messages in OpenAI format.
|
|
135
216
|
|
|
136
217
|
Returns:
|
|
137
|
-
|
|
218
|
+
Response: The OpenAI Responses API response object.
|
|
138
219
|
"""
|
|
139
220
|
return self.chat(messages=messages, stream=False)
|
|
140
221
|
|
|
141
222
|
def stream(
|
|
142
223
|
self,
|
|
143
224
|
*,
|
|
144
|
-
messages:
|
|
145
|
-
) -> Iterator[
|
|
225
|
+
messages: list[dict[str, Any]] | PromptTemplate,
|
|
226
|
+
) -> Iterator[ResponseStreamEvent]:
|
|
146
227
|
"""Stream the model response.
|
|
147
228
|
|
|
148
229
|
Args:
|
|
149
|
-
messages:
|
|
230
|
+
messages: PromptTemplate or list of messages in OpenAI format.
|
|
150
231
|
|
|
151
232
|
Yields:
|
|
152
|
-
|
|
233
|
+
ResponseStreamEvent: Streamed response events from the OpenAI Responses API.
|
|
153
234
|
"""
|
|
154
235
|
response = self.chat(messages=messages, stream=True)
|
|
155
236
|
for event in response:
|
|
156
237
|
yield event
|
|
238
|
+
|
|
239
|
+
@staticmethod
|
|
240
|
+
def _convert_to_openai_tools(tools: list[Tool]) -> list[dict[str, Any]] | None:
|
|
241
|
+
"""Convert all registered tools to the OpenAI tool schema format.
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
List[Dict[str, Any]]: A list of tools in OpenAI-compatible dictionary format.
|
|
245
|
+
"""
|
|
246
|
+
if not tools:
|
|
247
|
+
return None
|
|
248
|
+
return [tool.to_openai_tool() for tool in tools]
|
literun/prompt_message.py
CHANGED
|
@@ -2,69 +2,55 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from typing import Any, Dict
|
|
5
|
+
from typing import Any, Dict
|
|
6
|
+
from pydantic import BaseModel, model_validator
|
|
6
7
|
|
|
7
8
|
from .constants import Role, ContentType
|
|
8
9
|
|
|
9
10
|
|
|
10
|
-
class PromptMessage:
|
|
11
|
+
class PromptMessage(BaseModel):
|
|
11
12
|
"""Domain representation of a single semantic message in a conversation.
|
|
12
13
|
|
|
13
14
|
This class is the only place that knows how to convert a semantic
|
|
14
15
|
message into an OpenAI-compatible message dictionary. It enforces
|
|
15
16
|
invariants depending on the message type.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
def __init__(
|
|
19
|
-
self,
|
|
20
|
-
*,
|
|
21
|
-
role: Optional[Role] = None,
|
|
22
|
-
content_type: ContentType,
|
|
23
|
-
text: Optional[str] = None,
|
|
24
|
-
name: Optional[str] = None,
|
|
25
|
-
arguments: Optional[str] = None,
|
|
26
|
-
call_id: Optional[str] = None,
|
|
27
|
-
output: Optional[str] = None,
|
|
28
|
-
) -> None:
|
|
29
|
-
"""Initialize a PromptMessage.
|
|
30
|
-
|
|
31
|
-
Args:
|
|
32
|
-
role: The role of the message sender (e.g., USER, ASSISTANT). Required for text messages.
|
|
33
|
-
content_type: The type of content (e.g., INPUT_TEXT, FUNCTION_CALL).
|
|
34
|
-
text: The text content of the message (required for text messages).
|
|
35
|
-
name: The name of the tool (for function calls).
|
|
36
|
-
arguments: The arguments for the tool as a JSON string (for function calls).
|
|
37
|
-
call_id: The ID of the tool call.
|
|
38
|
-
output: The output of the tool execution (for FUNCTION_CALL_OUTPUT messages).
|
|
39
17
|
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
18
|
+
Args:
|
|
19
|
+
role: The role of the message sender. Required for text messages.
|
|
20
|
+
Options: "system", "user", "assistant", "developer", "tool"
|
|
21
|
+
text: The text content of the message (required for text messages).
|
|
22
|
+
name: The name of the tool (for function calls).
|
|
23
|
+
arguments: The arguments for the tool as a JSON string (for function calls).
|
|
24
|
+
call_id: The ID of the tool call.
|
|
25
|
+
output: The output of the tool execution (for function call output messages).
|
|
26
|
+
content_type: The type of content.
|
|
27
|
+
Options: "input_text", "output_text", "message", "function_call", "function_call_output"
|
|
28
|
+
"""
|
|
50
29
|
|
|
51
|
-
|
|
30
|
+
role: Role | None = None
|
|
31
|
+
text: str | None = None
|
|
32
|
+
name: str | None = None
|
|
33
|
+
arguments: str | None = None
|
|
34
|
+
call_id: str | None = None
|
|
35
|
+
output: str | None = None
|
|
36
|
+
content_type: ContentType
|
|
52
37
|
|
|
53
|
-
|
|
38
|
+
@model_validator(mode="after")
|
|
39
|
+
def _validate_invariants(self) -> PromptMessage:
|
|
54
40
|
"""Enforce invariants so that invalid messages are never constructed.
|
|
55
41
|
|
|
56
42
|
Raises:
|
|
57
43
|
ValueError: If required fields are missing for the given content_type.
|
|
58
44
|
"""
|
|
59
45
|
# Text messages (system / user / assistant)
|
|
60
|
-
if self.content_type in (
|
|
46
|
+
if self.content_type in ("input_text", "output_text"):
|
|
61
47
|
if self.role is None:
|
|
62
48
|
raise ValueError("role is required for text messages")
|
|
63
49
|
if not isinstance(self.text, str):
|
|
64
50
|
raise ValueError("text is required for text messages")
|
|
65
51
|
|
|
66
52
|
# Tool call (model -> agent)
|
|
67
|
-
elif self.content_type ==
|
|
53
|
+
elif self.content_type == "function_call":
|
|
68
54
|
if not self.name:
|
|
69
55
|
raise ValueError("name is required for FUNCTION_CALL")
|
|
70
56
|
if not isinstance(self.arguments, str):
|
|
@@ -73,14 +59,15 @@ class PromptMessage:
|
|
|
73
59
|
raise ValueError("call_id is required for FUNCTION_CALL")
|
|
74
60
|
|
|
75
61
|
# Tool output (agent -> model)
|
|
76
|
-
elif self.content_type ==
|
|
62
|
+
elif self.content_type == "function_call_output":
|
|
77
63
|
if not self.call_id:
|
|
78
64
|
raise ValueError("call_id is required for FUNCTION_CALL_OUTPUT")
|
|
79
65
|
if not isinstance(self.output, str):
|
|
80
66
|
raise ValueError("output must be a string")
|
|
81
|
-
|
|
82
67
|
else:
|
|
83
68
|
raise ValueError(f"Unsupported content_type: {self.content_type}")
|
|
69
|
+
|
|
70
|
+
return self
|
|
84
71
|
|
|
85
72
|
def to_openai_message(self) -> Dict[str, Any]:
|
|
86
73
|
"""Convert the PromptMessage to an OpenAI-compatible message dictionary.
|
|
@@ -93,30 +80,30 @@ class PromptMessage:
|
|
|
93
80
|
RuntimeError: If the message state is invalid (should not occur).
|
|
94
81
|
"""
|
|
95
82
|
# System / User / Assistant messages
|
|
96
|
-
if self.content_type in (
|
|
83
|
+
if self.content_type in ("input_text", "output_text"):
|
|
97
84
|
return {
|
|
98
|
-
"role": self.role
|
|
85
|
+
"role": self.role,
|
|
99
86
|
"content": [
|
|
100
87
|
{
|
|
101
|
-
"type": self.content_type
|
|
88
|
+
"type": self.content_type,
|
|
102
89
|
"text": self.text,
|
|
103
90
|
}
|
|
104
91
|
],
|
|
105
92
|
}
|
|
106
93
|
|
|
107
94
|
# Tool call (model -> agent)
|
|
108
|
-
if self.content_type ==
|
|
95
|
+
if self.content_type == "function_call":
|
|
109
96
|
return {
|
|
110
|
-
"type": self.content_type
|
|
97
|
+
"type": self.content_type,
|
|
111
98
|
"name": self.name,
|
|
112
99
|
"arguments": self.arguments,
|
|
113
100
|
"call_id": self.call_id,
|
|
114
101
|
}
|
|
115
102
|
|
|
116
103
|
# Tool output (agent -> model)
|
|
117
|
-
if self.content_type ==
|
|
104
|
+
if self.content_type == "function_call_output":
|
|
118
105
|
return {
|
|
119
|
-
"type": self.content_type
|
|
106
|
+
"type": self.content_type,
|
|
120
107
|
"call_id": self.call_id,
|
|
121
108
|
"output": self.output,
|
|
122
109
|
}
|
literun/prompt_template.py
CHANGED
|
@@ -2,13 +2,13 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from typing import Iterable,
|
|
5
|
+
from typing import Iterable, Any
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
6
7
|
|
|
7
8
|
from .prompt_message import PromptMessage
|
|
8
|
-
from .constants import Role, ContentType
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
class PromptTemplate:
|
|
11
|
+
class PromptTemplate(BaseModel):
|
|
12
12
|
"""Container for conversation state.
|
|
13
13
|
|
|
14
14
|
This class stores the authoritative message history used by the Agent.
|
|
@@ -16,10 +16,7 @@ class PromptTemplate:
|
|
|
16
16
|
OpenAI API boundary.
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
|
-
|
|
20
|
-
"""Initialize an empty PromptTemplate."""
|
|
21
|
-
|
|
22
|
-
self.messages: List[PromptMessage] = []
|
|
19
|
+
messages: list[PromptMessage] = Field(default_factory=list)
|
|
23
20
|
|
|
24
21
|
def add_message(self, message: PromptMessage) -> PromptTemplate:
|
|
25
22
|
"""Add a custom prompt message.
|
|
@@ -62,8 +59,8 @@ class PromptTemplate:
|
|
|
62
59
|
"""
|
|
63
60
|
return self.add_message(
|
|
64
61
|
PromptMessage(
|
|
65
|
-
role=
|
|
66
|
-
content_type=
|
|
62
|
+
role="system",
|
|
63
|
+
content_type="input_text",
|
|
67
64
|
text=text,
|
|
68
65
|
)
|
|
69
66
|
)
|
|
@@ -79,8 +76,8 @@ class PromptTemplate:
|
|
|
79
76
|
"""
|
|
80
77
|
return self.add_message(
|
|
81
78
|
PromptMessage(
|
|
82
|
-
role=
|
|
83
|
-
content_type=
|
|
79
|
+
role="user",
|
|
80
|
+
content_type="input_text",
|
|
84
81
|
text=text,
|
|
85
82
|
)
|
|
86
83
|
)
|
|
@@ -96,8 +93,8 @@ class PromptTemplate:
|
|
|
96
93
|
"""
|
|
97
94
|
return self.add_message(
|
|
98
95
|
PromptMessage(
|
|
99
|
-
role=
|
|
100
|
-
content_type=
|
|
96
|
+
role="assistant",
|
|
97
|
+
content_type="output_text",
|
|
101
98
|
text=text,
|
|
102
99
|
)
|
|
103
100
|
)
|
|
@@ -121,7 +118,7 @@ class PromptTemplate:
|
|
|
121
118
|
"""
|
|
122
119
|
return self.add_message(
|
|
123
120
|
PromptMessage(
|
|
124
|
-
content_type=
|
|
121
|
+
content_type="function_call",
|
|
125
122
|
name=name,
|
|
126
123
|
arguments=arguments,
|
|
127
124
|
call_id=call_id,
|
|
@@ -145,7 +142,7 @@ class PromptTemplate:
|
|
|
145
142
|
"""
|
|
146
143
|
return self.add_message(
|
|
147
144
|
PromptMessage(
|
|
148
|
-
content_type=
|
|
145
|
+
content_type="function_call_output",
|
|
149
146
|
call_id=call_id,
|
|
150
147
|
output=output,
|
|
151
148
|
)
|
|
@@ -164,11 +161,11 @@ class PromptTemplate:
|
|
|
164
161
|
new.messages = list(self.messages)
|
|
165
162
|
return new
|
|
166
163
|
|
|
167
|
-
def to_openai_input(self) ->
|
|
164
|
+
def to_openai_input(self) -> list[dict[str, Any]]:
|
|
168
165
|
"""Convert the template to OpenAI message dictionaries.
|
|
169
166
|
|
|
170
167
|
Returns:
|
|
171
|
-
|
|
168
|
+
list[dict[str, Any]]: The formatted messages.
|
|
172
169
|
"""
|
|
173
170
|
return [msg.to_openai_message() for msg in self.messages]
|
|
174
171
|
|
literun/results.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from
|
|
5
|
+
from pydantic import BaseModel
|
|
6
6
|
from typing import Any
|
|
7
7
|
|
|
8
8
|
from openai.types.responses import Response
|
|
@@ -10,8 +10,7 @@ from .items import RunItem
|
|
|
10
10
|
from .events import StreamEvent
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
|
|
14
|
-
class RunResult:
|
|
13
|
+
class RunResult(BaseModel):
|
|
15
14
|
"""Final result returned by the OpenAI Agent.
|
|
16
15
|
|
|
17
16
|
Used in the ``Agent.invoke()`` method. Contains the full execution
|
|
@@ -30,8 +29,7 @@ class RunResult:
|
|
|
30
29
|
"""The output produced by the final agent invocation."""
|
|
31
30
|
|
|
32
31
|
|
|
33
|
-
|
|
34
|
-
class RunResultStreaming:
|
|
32
|
+
class RunResultStreaming(BaseModel):
|
|
35
33
|
"""Streaming result returned by the OpenAI Agent.
|
|
36
34
|
|
|
37
35
|
Used in the ``Agent.stream()`` method. Each instance represents a
|