literun 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of literun might be problematic. Click here for more details.

literun/items.py ADDED
@@ -0,0 +1,106 @@
1
+ """Response items used in agent execution."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional, Union, Literal, TypeAlias
6
+ from pydantic import BaseModel
7
+
8
+ from openai.types.responses import (
9
+ ResponseOutputMessage,
10
+ ResponseReasoningItem,
11
+ ResponseFunctionToolCall,
12
+ ResponseFunctionWebSearch,
13
+ ResponseInputText,
14
+ )
15
+
16
+
17
+ class ResponseFunctionToolCallOutput(BaseModel):
18
+ """The output of a function tool call."""
19
+
20
+ call_id: str
21
+ """The unique ID of the function tool call generated by the model."""
22
+
23
+ output: Union[str, ResponseInputText]
24
+ """The output from the function call generated by your code."""
25
+
26
+ name: str
27
+ """The name of the function tool call."""
28
+
29
+ type: Literal["function_call_output"]
30
+ """The type of the function tool call output. Always `function_call_output`."""
31
+
32
+ id: Optional[str] = None
33
+ """The unique ID of the function tool call output."""
34
+
35
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
36
+ """The status of the item."""
37
+
38
+
39
+ class MessageOutputItem(BaseModel):
40
+ """Represents a message from the LLM."""
41
+
42
+ role: Literal["assistant"] = "assistant"
43
+
44
+ content: str = ""
45
+ """The text content of the message. Always `assistant`."""
46
+
47
+ raw_item: Optional[ResponseOutputMessage] = None
48
+ """The raw response output message."""
49
+
50
+ type: Literal["message_output_item"] = "message_output_item"
51
+ """The type of the message output item. Always `message_output_item`."""
52
+
53
+
54
+ class ToolCallItem(BaseModel):
55
+ """Represents a tool call e.g. a function call or computer action call."""
56
+
57
+ role: Literal["assistant"] = "assistant"
58
+
59
+ content: str = ""
60
+ """The content of the tool call. Usually empty."""
61
+
62
+ raw_item: Optional[Union[ResponseFunctionToolCall, ResponseFunctionWebSearch]] = (
63
+ None
64
+ )
65
+ """The raw tool call item."""
66
+
67
+ type: Literal["tool_call_item"] = "tool_call_item"
68
+ """The type of the tool call item. Always `tool_call_item`."""
69
+
70
+
71
+ class ToolCallOutputItem(BaseModel):
72
+ """Represents the output of a tool call."""
73
+
74
+ role: Literal["tool"] = "tool"
75
+
76
+ content: str = ""
77
+ """The output from the function call generated by your code."""
78
+
79
+ raw_item: Optional[ResponseFunctionToolCallOutput] = None
80
+ """The raw tool call output item."""
81
+
82
+ type: Literal["tool_call_output_item"] = "tool_call_output_item"
83
+ """The type of the tool call output item. Always `tool_call_output_item`."""
84
+
85
+
86
+ class ReasoningItem(BaseModel):
87
+ """Represents a reasoning item."""
88
+
89
+ role: Literal["assistant"] = "assistant"
90
+
91
+ content: Optional[str] = None
92
+ """The reasoning content."""
93
+
94
+ raw_item: Optional[ResponseReasoningItem] = None
95
+ """The raw reasoning item."""
96
+
97
+ type: Literal["reasoning_item"] = "reasoning_item"
98
+ """The type of the reasoning item. Always `reasoning_item`."""
99
+
100
+
101
+ RunItem: TypeAlias = Union[
102
+ MessageOutputItem,
103
+ ToolCallItem,
104
+ ToolCallOutputItem,
105
+ ReasoningItem,
106
+ ]
literun/llm.py ADDED
@@ -0,0 +1,156 @@
1
+ """LLM client wrapper and configuration."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Dict, List, Optional, Iterator
6
+ from openai import OpenAI
7
+
8
+ from .tool import Tool
9
+
10
+
11
+ class ChatOpenAI:
12
+ """Stateless wrapper for a configured OpenAI model.
13
+
14
+ Provides a unified interface to call the OpenAI Responses API, optionally
15
+ binding tools and streaming outputs.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ *,
21
+ model: str = "gpt-4.1-mini",
22
+ temperature: Optional[float] = None,
23
+ api_key: Optional[str] = None,
24
+ base_url: Optional[str] = None,
25
+ max_output_tokens: Optional[int] = None,
26
+ tools: Optional[List[Tool]] = None,
27
+ tool_choice: Optional[str] = None,
28
+ parallel_tool_calls: Optional[bool] = None,
29
+ store: bool = False,
30
+ **kwargs: Any,
31
+ ) -> None:
32
+ """Initialize the ChatOpenAI instance.
33
+
34
+ Args:
35
+ model: The model name to use.
36
+ temperature: Sampling temperature.
37
+ api_key: OpenAI API key.
38
+ base_url: Custom base URL for OpenAI API.
39
+ max_output_tokens: Maximum number of tokens in the output.
40
+ tools: Optional list of Tool instances to bind.
41
+ tool_choice: Optional tool selection strategy.
42
+ parallel_tool_calls: Whether to allow parallel tool calls.
43
+ store: Whether to store model responses.
44
+ **kwargs: Additional model parameters.
45
+ """
46
+ self.model = model
47
+ self.temperature = temperature
48
+ self.max_output_tokens = max_output_tokens
49
+ self.model_kwargs = kwargs
50
+ self.client = (
51
+ OpenAI(api_key=api_key, base_url=base_url)
52
+ if base_url
53
+ else OpenAI(api_key=api_key)
54
+ )
55
+ self.store = store
56
+ self._tools = tools
57
+ self._tool_choice = tool_choice
58
+ self._parallel_tool_calls = parallel_tool_calls
59
+
60
+ def bind_tools(
61
+ self,
62
+ *,
63
+ tools: List[Tool],
64
+ tool_choice: Optional[str] = None,
65
+ parallel_tool_calls: Optional[bool] = None,
66
+ ) -> ChatOpenAI:
67
+ """Bind tools to the LLM instance.
68
+
69
+ Args:
70
+ tools: List of Tool instances to bind.
71
+ tool_choice: Optional tool selection strategy.
72
+ parallel_tool_calls: Whether to allow parallel tool calls.
73
+
74
+ Returns:
75
+ ``ChatOpenAI``: The updated instance with tools bound.
76
+ """
77
+ self._tools = tools
78
+ self._tool_choice = tool_choice
79
+ self._parallel_tool_calls = parallel_tool_calls
80
+ return self
81
+
82
+ def chat(
83
+ self,
84
+ *,
85
+ messages: List[Dict[str, Any]],
86
+ stream: bool = False,
87
+ tools: Optional[List[Dict[str, Any]]] = None,
88
+ tool_choice: Optional[str] = None,
89
+ parallel_tool_calls: Optional[bool] = None,
90
+ ) -> Any:
91
+ """Call the model with the given messages.
92
+
93
+ Args:
94
+ messages: List of messages in OpenAI format.
95
+ stream: Whether to stream the output.
96
+ tools: Optional list of OpenAI tool definitions.
97
+ tool_choice: Optional tool selection strategy.
98
+ parallel_tool_calls: Whether to allow parallel tool calls.
99
+
100
+ Returns:
101
+ Any: The OpenAI Responses API response object (or stream).
102
+ """
103
+ params = {
104
+ "model": self.model,
105
+ "input": messages,
106
+ "stream": stream,
107
+ "store": self.store,
108
+ **self.model_kwargs,
109
+ }
110
+ if self.temperature is not None:
111
+ params["temperature"] = self.temperature
112
+ if self.max_output_tokens is not None:
113
+ params["max_output_tokens"] = self.max_output_tokens
114
+
115
+ # Tools resolution
116
+ current_tools = tools or (
117
+ [tool.to_openai_tool() for tool in self._tools] if self._tools else None
118
+ )
119
+ if current_tools:
120
+ params["tools"] = current_tools
121
+ params["tool_choice"] = tool_choice or self._tool_choice
122
+ params["parallel_tool_calls"] = (
123
+ parallel_tool_calls
124
+ if parallel_tool_calls is not None
125
+ else self._parallel_tool_calls
126
+ )
127
+
128
+ return self.client.responses.create(**params)
129
+
130
+ def invoke(self, messages: List[Dict[str, Any]]) -> Any:
131
+ """Synchronously call the model.
132
+
133
+ Args:
134
+ messages: List of messages in OpenAI format.
135
+
136
+ Returns:
137
+ Any: The OpenAI Responses API response object.
138
+ """
139
+ return self.chat(messages=messages, stream=False)
140
+
141
+ def stream(
142
+ self,
143
+ *,
144
+ messages: List[Dict[str, Any]],
145
+ ) -> Iterator[Any]:
146
+ """Stream the model response.
147
+
148
+ Args:
149
+ messages: List of messages in OpenAI format.
150
+
151
+ Yields:
152
+ Any: Streamed response events from the OpenAI Responses API.
153
+ """
154
+ response = self.chat(messages=messages, stream=True)
155
+ for event in response:
156
+ yield event
@@ -0,0 +1,136 @@
1
+ """Message structures for prompts."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Dict, Optional
6
+
7
+ from .constants import Role, ContentType
8
+
9
+
10
+ class PromptMessage:
11
+ """Domain representation of a single semantic message in a conversation.
12
+
13
+ This class is the only place that knows how to convert a semantic
14
+ message into an OpenAI-compatible message dictionary. It enforces
15
+ invariants depending on the message type.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ *,
21
+ role: Optional[Role] = None,
22
+ content_type: ContentType,
23
+ text: Optional[str] = None,
24
+ name: Optional[str] = None,
25
+ arguments: Optional[str] = None,
26
+ call_id: Optional[str] = None,
27
+ output: Optional[str] = None,
28
+ ) -> None:
29
+ """Initialize a PromptMessage.
30
+
31
+ Args:
32
+ role: The role of the message sender (e.g., USER, ASSISTANT). Required for text messages.
33
+ content_type: The type of content (e.g., INPUT_TEXT, FUNCTION_CALL).
34
+ text: The text content of the message (required for text messages).
35
+ name: The name of the tool (for function calls).
36
+ arguments: The arguments for the tool as a JSON string (for function calls).
37
+ call_id: The ID of the tool call.
38
+ output: The output of the tool execution (for FUNCTION_CALL_OUTPUT messages).
39
+
40
+ Raises:
41
+ ValueError: If required fields for the given content_type are missing.
42
+ """
43
+ self.role = role
44
+ self.content_type = content_type
45
+ self.text = text
46
+ self.name = name
47
+ self.arguments = arguments
48
+ self.call_id = call_id
49
+ self.output = output
50
+
51
+ self._validate()
52
+
53
+ def _validate(self) -> None:
54
+ """Enforce invariants so that invalid messages are never constructed.
55
+
56
+ Raises:
57
+ ValueError: If required fields are missing for the given content_type.
58
+ """
59
+ # Text messages (system / user / assistant)
60
+ if self.content_type in (ContentType.INPUT_TEXT, ContentType.OUTPUT_TEXT):
61
+ if self.role is None:
62
+ raise ValueError("role is required for text messages")
63
+ if not isinstance(self.text, str):
64
+ raise ValueError("text is required for text messages")
65
+
66
+ # Tool call (model -> agent)
67
+ elif self.content_type == ContentType.FUNCTION_CALL:
68
+ if not self.name:
69
+ raise ValueError("name is required for FUNCTION_CALL")
70
+ if not isinstance(self.arguments, str):
71
+ raise ValueError("arguments must be a JSON string")
72
+ if not self.call_id:
73
+ raise ValueError("call_id is required for FUNCTION_CALL")
74
+
75
+ # Tool output (agent -> model)
76
+ elif self.content_type == ContentType.FUNCTION_CALL_OUTPUT:
77
+ if not self.call_id:
78
+ raise ValueError("call_id is required for FUNCTION_CALL_OUTPUT")
79
+ if not isinstance(self.output, str):
80
+ raise ValueError("output must be a string")
81
+
82
+ else:
83
+ raise ValueError(f"Unsupported content_type: {self.content_type}")
84
+
85
+ def to_openai_message(self) -> Dict[str, Any]:
86
+ """Convert the PromptMessage to an OpenAI-compatible message dictionary.
87
+
88
+ Returns:
89
+ Dict[str, Any]: The formatted message dictionary.
90
+
91
+ Raises:
92
+ ValueError: If required fields are missing for the specified content_type.
93
+ RuntimeError: If the message state is invalid (should not occur).
94
+ """
95
+ # System / User / Assistant messages
96
+ if self.content_type in (ContentType.INPUT_TEXT, ContentType.OUTPUT_TEXT):
97
+ return {
98
+ "role": self.role.value,
99
+ "content": [
100
+ {
101
+ "type": self.content_type.value,
102
+ "text": self.text,
103
+ }
104
+ ],
105
+ }
106
+
107
+ # Tool call (model -> agent)
108
+ if self.content_type == ContentType.FUNCTION_CALL:
109
+ return {
110
+ "type": self.content_type.value,
111
+ "name": self.name,
112
+ "arguments": self.arguments,
113
+ "call_id": self.call_id,
114
+ }
115
+
116
+ # Tool output (agent -> model)
117
+ if self.content_type == ContentType.FUNCTION_CALL_OUTPUT:
118
+ return {
119
+ "type": self.content_type.value,
120
+ "call_id": self.call_id,
121
+ "output": self.output,
122
+ }
123
+
124
+ # Should never reach here due to validation
125
+ raise RuntimeError("Invalid PromptMessage state")
126
+
127
+ def __repr__(self) -> str:
128
+ """Return a concise representation of the message for debugging."""
129
+ return (
130
+ f"PromptMessage("
131
+ f"content_type={self.content_type}, "
132
+ f"role={self.role}, "
133
+ f"name={self.name}, "
134
+ f"call_id={self.call_id}"
135
+ f")"
136
+ )
@@ -0,0 +1,181 @@
1
+ """Template management for constructing prompts."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Iterable, List, Dict, Any
6
+
7
+ from .prompt_message import PromptMessage
8
+ from .constants import Role, ContentType
9
+
10
+
11
+ class PromptTemplate:
12
+ """Container for conversation state.
13
+
14
+ This class stores the authoritative message history used by the Agent.
15
+ It manages ``PromptMessage`` objects and serializes them only at the
16
+ OpenAI API boundary.
17
+ """
18
+
19
+ def __init__(self) -> None:
20
+ """Initialize an empty PromptTemplate."""
21
+
22
+ self.messages: List[PromptMessage] = []
23
+
24
+ def add_message(self, message: PromptMessage) -> PromptTemplate:
25
+ """Add a custom prompt message.
26
+
27
+ Args:
28
+ message: The message to add.
29
+
30
+ Returns:
31
+ ``PromptTemplate``: The template instance, allowing method chaining.
32
+
33
+ Raises:
34
+ TypeError: If `message` is not a ``PromptMessage`` instance.
35
+ """
36
+ if not isinstance(message, PromptMessage):
37
+ raise TypeError("Expected PromptMessage")
38
+ self.messages.append(message)
39
+ return self
40
+
41
+ def add_messages(self, messages: Iterable[PromptMessage]) -> PromptTemplate:
42
+ """Add multiple prompt messages.
43
+
44
+ Args:
45
+ messages: An iterable of messages to add.
46
+
47
+ Returns:
48
+ ``PromptTemplate``: The template instance, allowing method chaining.
49
+ """
50
+ for message in messages:
51
+ self.add_message(message)
52
+ return self
53
+
54
+ def add_system(self, text: str) -> PromptTemplate:
55
+ """Add a system message.
56
+
57
+ Args:
58
+ text: The system message text.
59
+
60
+ Returns:
61
+ ``PromptTemplate``: The template instance, allowing method chaining.
62
+ """
63
+ return self.add_message(
64
+ PromptMessage(
65
+ role=Role.DEVELOPER,
66
+ content_type=ContentType.INPUT_TEXT,
67
+ text=text,
68
+ )
69
+ )
70
+
71
+ def add_user(self, text: str) -> PromptTemplate:
72
+ """Add a user message.
73
+
74
+ Args:
75
+ text: The user message text.
76
+
77
+ Returns:
78
+ ``PromptTemplate``: The template instance, allowing method chaining.
79
+ """
80
+ return self.add_message(
81
+ PromptMessage(
82
+ role=Role.USER,
83
+ content_type=ContentType.INPUT_TEXT,
84
+ text=text,
85
+ )
86
+ )
87
+
88
+ def add_assistant(self, text: str) -> PromptTemplate:
89
+ """Add an assistant message.
90
+
91
+ Args:
92
+ text: The assistant message text.
93
+
94
+ Returns:
95
+ ``PromptTemplate``: The template instance, allowing method chaining.
96
+ """
97
+ return self.add_message(
98
+ PromptMessage(
99
+ role=Role.ASSISTANT,
100
+ content_type=ContentType.OUTPUT_TEXT,
101
+ text=text,
102
+ )
103
+ )
104
+
105
+ def add_tool_call(
106
+ self,
107
+ *,
108
+ name: str,
109
+ arguments: str,
110
+ call_id: str,
111
+ ) -> PromptTemplate:
112
+ """Add a tool call message.
113
+
114
+ Args:
115
+ name: The name of the tool being called.
116
+ arguments: The tool arguments, encoded as a JSON string.
117
+ call_id: The unique identifier for this tool call.
118
+
119
+ Returns:
120
+ ``PromptTemplate``: The template instance, allowing method chaining.
121
+ """
122
+ return self.add_message(
123
+ PromptMessage(
124
+ content_type=ContentType.FUNCTION_CALL,
125
+ name=name,
126
+ arguments=arguments,
127
+ call_id=call_id,
128
+ )
129
+ )
130
+
131
+ def add_tool_output(
132
+ self,
133
+ *,
134
+ call_id: str,
135
+ output: str,
136
+ ) -> PromptTemplate:
137
+ """Add a tool output message.
138
+
139
+ Args:
140
+ call_id: The ID of the tool call this output corresponds to.
141
+ output: The output produced by the tool.
142
+
143
+ Returns:
144
+ ``PromptTemplate``: The template instance, allowing method chaining.
145
+ """
146
+ return self.add_message(
147
+ PromptMessage(
148
+ content_type=ContentType.FUNCTION_CALL_OUTPUT,
149
+ call_id=call_id,
150
+ output=output,
151
+ )
152
+ )
153
+
154
+ def copy(self) -> PromptTemplate:
155
+ """Create a shallow copy of this template.
156
+
157
+ This is required to avoid mutating caller-owned templates inside
158
+ the Agent.
159
+
160
+ Returns:
161
+ ``PromptTemplate``: A new template containing the same messages.
162
+ """
163
+ new = PromptTemplate()
164
+ new.messages = list(self.messages)
165
+ return new
166
+
167
+ def to_openai_input(self) -> List[Dict[str, Any]]:
168
+ """Convert the template to OpenAI message dictionaries.
169
+
170
+ Returns:
171
+ List[Dict[str, Any]]: The formatted messages.
172
+ """
173
+ return [msg.to_openai_message() for msg in self.messages]
174
+
175
+ def __len__(self) -> int:
176
+ """Return the number of messages in the template."""
177
+ return len(self.messages)
178
+
179
+ def __iter__(self):
180
+ """Iterate over stored messages."""
181
+ return iter(self.messages)
literun/results.py ADDED
@@ -0,0 +1,51 @@
1
+ """Return types for agent execution."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from typing import Any
7
+
8
+ from openai.types.responses import Response
9
+ from .items import RunItem
10
+ from .events import StreamEvent
11
+
12
+
13
+ @dataclass
14
+ class RunResult:
15
+ """Final result returned by the OpenAI Agent.
16
+
17
+ Used in the ``Agent.invoke()`` method. Contains the full execution
18
+ trace accumulated during a single agent run.
19
+ """
20
+
21
+ input: str | list[Any]
22
+ """The original input items before ``run()`` was called."""
23
+
24
+ new_items: Response | list[RunItem]
25
+ """Items generated during the agent run, such as messages, tool calls,
26
+ and tool outputs.
27
+ """
28
+
29
+ final_output: Any
30
+ """The output produced by the final agent invocation."""
31
+
32
+
33
+ @dataclass
34
+ class RunResultStreaming:
35
+ """Streaming result returned by the OpenAI Agent.
36
+
37
+ Used in the ``Agent.stream()`` method. Each instance represents a
38
+ single stream event while accumulating completed items.
39
+ """
40
+
41
+ input: str | list[Any]
42
+ """The original input items before ``run()`` was called."""
43
+
44
+ event: StreamEvent
45
+ """The stream event emitted for this iteration."""
46
+
47
+ final_output: Any
48
+ """The output produced by the final agent invocation.
49
+
50
+ This value is `None` until the final message is complete.
51
+ """