literun 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- literun-0.1.0/LICENSE +21 -0
- literun-0.1.0/PKG-INFO +242 -0
- literun-0.1.0/README.md +226 -0
- literun-0.1.0/pyproject.toml +34 -0
- literun-0.1.0/setup.cfg +4 -0
- literun-0.1.0/src/literun/__init__.py +33 -0
- literun-0.1.0/src/literun/agent.py +411 -0
- literun-0.1.0/src/literun/args_schema.py +87 -0
- literun-0.1.0/src/literun/constants.py +21 -0
- literun-0.1.0/src/literun/events.py +110 -0
- literun-0.1.0/src/literun/items.py +106 -0
- literun-0.1.0/src/literun/llm.py +156 -0
- literun-0.1.0/src/literun/prompt_message.py +136 -0
- literun-0.1.0/src/literun/prompt_template.py +181 -0
- literun-0.1.0/src/literun/results.py +51 -0
- literun-0.1.0/src/literun/tool.py +145 -0
- literun-0.1.0/src/literun/utils.py +73 -0
- literun-0.1.0/src/literun.egg-info/PKG-INFO +242 -0
- literun-0.1.0/src/literun.egg-info/SOURCES.txt +26 -0
- literun-0.1.0/src/literun.egg-info/dependency_links.txt +1 -0
- literun-0.1.0/src/literun.egg-info/requires.txt +6 -0
- literun-0.1.0/src/literun.egg-info/top_level.txt +1 -0
- literun-0.1.0/tests/test_agent.py +144 -0
- literun-0.1.0/tests/test_future_annotations.py +58 -0
- literun-0.1.0/tests/test_llm.py +78 -0
- literun-0.1.0/tests/test_prompt.py +59 -0
- literun-0.1.0/tests/test_runtime_context.py +73 -0
- literun-0.1.0/tests/test_tool.py +71 -0
literun-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Kaustubh Trivedi
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
literun-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: literun
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A Minimal agent runtime built on OpenAI Responses API
|
|
5
|
+
Author-email: Kaustubh <trivedikaustubh01@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.10
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Dist: openai>=2.11.0
|
|
11
|
+
Provides-Extra: dev
|
|
12
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
13
|
+
Requires-Dist: black; extra == "dev"
|
|
14
|
+
Requires-Dist: flake8; extra == "dev"
|
|
15
|
+
Dynamic: license-file
|
|
16
|
+
|
|
17
|
+
# LiteRun 🚀
|
|
18
|
+
|
|
19
|
+
A lightweight, flexible Python framework for building custom OpenAI agents (Responses API) with tool support and structured prompt management.
|
|
20
|
+
|
|
21
|
+
## Features
|
|
22
|
+
|
|
23
|
+
- **Custom Agent Execution**: Complete control over the agent execution loop, supporting both synchronous and streaming responses.
|
|
24
|
+
- **Tool Support**: Easy registration and execution of Python functions as tools.
|
|
25
|
+
- **Type Safety**: Strong typing for tool arguments with automatic coercion and validation.
|
|
26
|
+
- **Prompt Templates**: Structured way to build system, user, and assistant messages.
|
|
27
|
+
- **Constants**: Pre-defined constants for OpenAI roles and message types.
|
|
28
|
+
- **Streaming Support**: Built-in support for real-time streaming of agent thoughts, tool calls, and responses.
|
|
29
|
+
- **Tool Management**: Easy-to-define tools with automatic JSON schema generation (`ArgsSchema`).
|
|
30
|
+
- **Event-Driven**: Structured event system for granular control over the agent's execution lifecycle.
|
|
31
|
+
- **OpenAI Compatible**: Seamlessly integrates with `openai-python` client.
|
|
32
|
+
|
|
33
|
+
## Requirements
|
|
34
|
+
|
|
35
|
+
- Python 3.10+
|
|
36
|
+
- [OpenAI Python API library](https://pypi.org/project/openai/)
|
|
37
|
+
|
|
38
|
+
## Installation
|
|
39
|
+
|
|
40
|
+
### Production
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
pip install literun
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
### Development
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
git clone https://github.com/kaustubh-tr/literun.git
|
|
50
|
+
cd openai-agent
|
|
51
|
+
pip install -e .[dev]
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## Quick Start
|
|
55
|
+
|
|
56
|
+
### Basic Agent
|
|
57
|
+
|
|
58
|
+
Here is a simple example of how to create an agent with a custom tool:
|
|
59
|
+
|
|
60
|
+
```python
|
|
61
|
+
import os
|
|
62
|
+
from literun import Agent, ChatOpenAI, Tool, ArgsSchema
|
|
63
|
+
|
|
64
|
+
# 1. Define a tool function
|
|
65
|
+
def get_weather(location: str, unit: str = "celsius") -> str:
|
|
66
|
+
return f"The weather in {location} is 25 degrees {unit}."
|
|
67
|
+
|
|
68
|
+
# 2. Wrap it with Tool schema
|
|
69
|
+
weather_tool = Tool(
|
|
70
|
+
func=get_weather,
|
|
71
|
+
name="get_weather",
|
|
72
|
+
description="Get the weather for a location",
|
|
73
|
+
args_schema=[
|
|
74
|
+
ArgsSchema(
|
|
75
|
+
name="location",
|
|
76
|
+
type=str,
|
|
77
|
+
description="The city and state, e.g. San Francisco, CA",
|
|
78
|
+
),
|
|
79
|
+
ArgsSchema(
|
|
80
|
+
name="unit",
|
|
81
|
+
type=str,
|
|
82
|
+
description="The unit of temperature",
|
|
83
|
+
enum=["celsius", "fahrenheit"],
|
|
84
|
+
),
|
|
85
|
+
],
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
# 3. Initialize LLM and Agent
|
|
89
|
+
llm = ChatOpenAI(model="gpt-4o", temperature=0.7)
|
|
90
|
+
|
|
91
|
+
# 4. Initialize Agent
|
|
92
|
+
agent = Agent(
|
|
93
|
+
llm=llm,
|
|
94
|
+
system_prompt="You are a helpful assistant.",
|
|
95
|
+
tools=[weather_tool],
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# 5. Run the Agent
|
|
99
|
+
result = agent.invoke(user_input="What is the weather in Tokyo?")
|
|
100
|
+
print(f"Final Answer: {result.final_output}")
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### Streaming Agent
|
|
104
|
+
|
|
105
|
+
You can also stream the agent's execution to handle events in real-time:
|
|
106
|
+
|
|
107
|
+
```python
|
|
108
|
+
# ... (setup tool and agent as above)
|
|
109
|
+
|
|
110
|
+
print("Agent: ", end="", flush=True)
|
|
111
|
+
for result in agent.stream(user_input="What is the weather in Tokyo?"):
|
|
112
|
+
event = result.event
|
|
113
|
+
if event.type == "response.output_text.delta":
|
|
114
|
+
print(event.delta, end="", flush=True)
|
|
115
|
+
elif event.type == "response.function_call_arguments.done":
|
|
116
|
+
print(f"\n[Tool Call: {event.name}]")
|
|
117
|
+
|
|
118
|
+
print()
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
### Runtime Configuration (Context Injection)
|
|
122
|
+
|
|
123
|
+
The framework allows passing a runtime context to tools using explicit context injection.
|
|
124
|
+
|
|
125
|
+
Rules:
|
|
126
|
+
1. Define a tool function with a parameter annotated with `ToolRuntime`.
|
|
127
|
+
2. The framework will automatically inject the `runtime_context` (wrapped in `ToolRuntime`) into that parameter.
|
|
128
|
+
3. Access configuration values using `ctx.{parameter}`.
|
|
129
|
+
|
|
130
|
+
```python
|
|
131
|
+
from typing import Dict, Any
|
|
132
|
+
from literun import Tool, ArgsSchema, ToolRuntime
|
|
133
|
+
|
|
134
|
+
# 1. Define tool with context
|
|
135
|
+
def get_weather(location: str, ctx: ToolRuntime) -> str:
|
|
136
|
+
"""
|
|
137
|
+
Returns weather info for a location.
|
|
138
|
+
The runtime context can include sensitive info like user_id or API keys.
|
|
139
|
+
"""
|
|
140
|
+
user_id = getattr(ctx, "user_id", "unknown_user")
|
|
141
|
+
api_key = getattr(ctx, "weather_api_key", None)
|
|
142
|
+
|
|
143
|
+
# Simulate fetching weather
|
|
144
|
+
return f"Weather for {location} fetched using API key '{api_key}' for user '{user_id}'."
|
|
145
|
+
|
|
146
|
+
# 2. Register tool
|
|
147
|
+
tool = Tool(
|
|
148
|
+
name="get_weather",
|
|
149
|
+
description="Get the weather for a given location",
|
|
150
|
+
func=get_weather,
|
|
151
|
+
args_schema=[
|
|
152
|
+
ArgsSchema(
|
|
153
|
+
name="location",
|
|
154
|
+
type=str,
|
|
155
|
+
description="Location for which to get the weather",
|
|
156
|
+
)
|
|
157
|
+
]
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# 3. Setup agent
|
|
161
|
+
agent = Agent(
|
|
162
|
+
llm=ChatOpenAI(api_key="fake"),
|
|
163
|
+
tools=[tool]
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
# 4. Pass config at runtime
|
|
167
|
+
# The whole dict is passed into the 'ctx' argument
|
|
168
|
+
agent.invoke(
|
|
169
|
+
user_input="What's the weather in London?",
|
|
170
|
+
runtime_context={
|
|
171
|
+
"user_id": "user_123",
|
|
172
|
+
"weather_api_key": "SECRET_API_KEY_456"
|
|
173
|
+
}
|
|
174
|
+
)
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
### Using ChatOpenAI Directly
|
|
178
|
+
|
|
179
|
+
You can also use the `ChatOpenAI` class directly if you don't need the agent loop (e.g., for simple, one-off LLM calls).
|
|
180
|
+
|
|
181
|
+
```python
|
|
182
|
+
from literun import ChatOpenAI
|
|
183
|
+
|
|
184
|
+
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
|
185
|
+
|
|
186
|
+
messages = [
|
|
187
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
188
|
+
{"role": "user", "content": "Tell me a joke."}
|
|
189
|
+
]
|
|
190
|
+
|
|
191
|
+
# Synchronous call
|
|
192
|
+
# Returns the raw OpenAI Responses API response object
|
|
193
|
+
response = llm.invoke(messages=messages)
|
|
194
|
+
print(response.output_text)
|
|
195
|
+
|
|
196
|
+
# Or streaming call
|
|
197
|
+
# Returns a generator of raw OpenAI response stream events
|
|
198
|
+
stream = llm.stream(messages=messages)
|
|
199
|
+
for event in stream:
|
|
200
|
+
print(event)
|
|
201
|
+
```
|
|
202
|
+
|
|
203
|
+
See [examples](examples/) for complete runnable examples.
|
|
204
|
+
|
|
205
|
+
## Project Structure
|
|
206
|
+
|
|
207
|
+
The project is organized as follows:
|
|
208
|
+
|
|
209
|
+
```
|
|
210
|
+
literun/
|
|
211
|
+
├── src/
|
|
212
|
+
│ └── literun/ # Main package source
|
|
213
|
+
│ ├── agent.py # Agent runtime logic
|
|
214
|
+
│ ├── llm.py # LLM client wrapper
|
|
215
|
+
│ ├── tool.py # Tool definition and execution
|
|
216
|
+
│ ├── events.py # Stream event types
|
|
217
|
+
│ └── ...
|
|
218
|
+
├── tests/ # Unit tests
|
|
219
|
+
├── examples/ # Usage examples
|
|
220
|
+
└── pyproject.toml # Project configuration
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
## Testing
|
|
224
|
+
|
|
225
|
+
Run the test suite using `unittest`:
|
|
226
|
+
|
|
227
|
+
```bash
|
|
228
|
+
python -m unittest discover tests
|
|
229
|
+
```
|
|
230
|
+
|
|
231
|
+
## Contributing
|
|
232
|
+
|
|
233
|
+
1. Fork the repository
|
|
234
|
+
2. Create a feature branch
|
|
235
|
+
3. Make your changes
|
|
236
|
+
4. Run tests: `python -m unittest discover tests`
|
|
237
|
+
5. Update the example usage if needed
|
|
238
|
+
6. Submit a pull request
|
|
239
|
+
|
|
240
|
+
## License
|
|
241
|
+
|
|
242
|
+
MIT
|
literun-0.1.0/README.md
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
# LiteRun 🚀
|
|
2
|
+
|
|
3
|
+
A lightweight, flexible Python framework for building custom OpenAI agents (Responses API) with tool support and structured prompt management.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Custom Agent Execution**: Complete control over the agent execution loop, supporting both synchronous and streaming responses.
|
|
8
|
+
- **Tool Support**: Easy registration and execution of Python functions as tools.
|
|
9
|
+
- **Type Safety**: Strong typing for tool arguments with automatic coercion and validation.
|
|
10
|
+
- **Prompt Templates**: Structured way to build system, user, and assistant messages.
|
|
11
|
+
- **Constants**: Pre-defined constants for OpenAI roles and message types.
|
|
12
|
+
- **Streaming Support**: Built-in support for real-time streaming of agent thoughts, tool calls, and responses.
|
|
13
|
+
- **Tool Management**: Easy-to-define tools with automatic JSON schema generation (`ArgsSchema`).
|
|
14
|
+
- **Event-Driven**: Structured event system for granular control over the agent's execution lifecycle.
|
|
15
|
+
- **OpenAI Compatible**: Seamlessly integrates with `openai-python` client.
|
|
16
|
+
|
|
17
|
+
## Requirements
|
|
18
|
+
|
|
19
|
+
- Python 3.10+
|
|
20
|
+
- [OpenAI Python API library](https://pypi.org/project/openai/)
|
|
21
|
+
|
|
22
|
+
## Installation
|
|
23
|
+
|
|
24
|
+
### Production
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
pip install literun
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
### Development
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
git clone https://github.com/kaustubh-tr/literun.git
|
|
34
|
+
cd openai-agent
|
|
35
|
+
pip install -e .[dev]
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Quick Start
|
|
39
|
+
|
|
40
|
+
### Basic Agent
|
|
41
|
+
|
|
42
|
+
Here is a simple example of how to create an agent with a custom tool:
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
import os
|
|
46
|
+
from literun import Agent, ChatOpenAI, Tool, ArgsSchema
|
|
47
|
+
|
|
48
|
+
# 1. Define a tool function
|
|
49
|
+
def get_weather(location: str, unit: str = "celsius") -> str:
|
|
50
|
+
return f"The weather in {location} is 25 degrees {unit}."
|
|
51
|
+
|
|
52
|
+
# 2. Wrap it with Tool schema
|
|
53
|
+
weather_tool = Tool(
|
|
54
|
+
func=get_weather,
|
|
55
|
+
name="get_weather",
|
|
56
|
+
description="Get the weather for a location",
|
|
57
|
+
args_schema=[
|
|
58
|
+
ArgsSchema(
|
|
59
|
+
name="location",
|
|
60
|
+
type=str,
|
|
61
|
+
description="The city and state, e.g. San Francisco, CA",
|
|
62
|
+
),
|
|
63
|
+
ArgsSchema(
|
|
64
|
+
name="unit",
|
|
65
|
+
type=str,
|
|
66
|
+
description="The unit of temperature",
|
|
67
|
+
enum=["celsius", "fahrenheit"],
|
|
68
|
+
),
|
|
69
|
+
],
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
# 3. Initialize LLM and Agent
|
|
73
|
+
llm = ChatOpenAI(model="gpt-4o", temperature=0.7)
|
|
74
|
+
|
|
75
|
+
# 4. Initialize Agent
|
|
76
|
+
agent = Agent(
|
|
77
|
+
llm=llm,
|
|
78
|
+
system_prompt="You are a helpful assistant.",
|
|
79
|
+
tools=[weather_tool],
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# 5. Run the Agent
|
|
83
|
+
result = agent.invoke(user_input="What is the weather in Tokyo?")
|
|
84
|
+
print(f"Final Answer: {result.final_output}")
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
### Streaming Agent
|
|
88
|
+
|
|
89
|
+
You can also stream the agent's execution to handle events in real-time:
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
# ... (setup tool and agent as above)
|
|
93
|
+
|
|
94
|
+
print("Agent: ", end="", flush=True)
|
|
95
|
+
for result in agent.stream(user_input="What is the weather in Tokyo?"):
|
|
96
|
+
event = result.event
|
|
97
|
+
if event.type == "response.output_text.delta":
|
|
98
|
+
print(event.delta, end="", flush=True)
|
|
99
|
+
elif event.type == "response.function_call_arguments.done":
|
|
100
|
+
print(f"\n[Tool Call: {event.name}]")
|
|
101
|
+
|
|
102
|
+
print()
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
### Runtime Configuration (Context Injection)
|
|
106
|
+
|
|
107
|
+
The framework allows passing a runtime context to tools using explicit context injection.
|
|
108
|
+
|
|
109
|
+
Rules:
|
|
110
|
+
1. Define a tool function with a parameter annotated with `ToolRuntime`.
|
|
111
|
+
2. The framework will automatically inject the `runtime_context` (wrapped in `ToolRuntime`) into that parameter.
|
|
112
|
+
3. Access configuration values using `ctx.{parameter}`.
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
from typing import Dict, Any
|
|
116
|
+
from literun import Tool, ArgsSchema, ToolRuntime
|
|
117
|
+
|
|
118
|
+
# 1. Define tool with context
|
|
119
|
+
def get_weather(location: str, ctx: ToolRuntime) -> str:
|
|
120
|
+
"""
|
|
121
|
+
Returns weather info for a location.
|
|
122
|
+
The runtime context can include sensitive info like user_id or API keys.
|
|
123
|
+
"""
|
|
124
|
+
user_id = getattr(ctx, "user_id", "unknown_user")
|
|
125
|
+
api_key = getattr(ctx, "weather_api_key", None)
|
|
126
|
+
|
|
127
|
+
# Simulate fetching weather
|
|
128
|
+
return f"Weather for {location} fetched using API key '{api_key}' for user '{user_id}'."
|
|
129
|
+
|
|
130
|
+
# 2. Register tool
|
|
131
|
+
tool = Tool(
|
|
132
|
+
name="get_weather",
|
|
133
|
+
description="Get the weather for a given location",
|
|
134
|
+
func=get_weather,
|
|
135
|
+
args_schema=[
|
|
136
|
+
ArgsSchema(
|
|
137
|
+
name="location",
|
|
138
|
+
type=str,
|
|
139
|
+
description="Location for which to get the weather",
|
|
140
|
+
)
|
|
141
|
+
]
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# 3. Setup agent
|
|
145
|
+
agent = Agent(
|
|
146
|
+
llm=ChatOpenAI(api_key="fake"),
|
|
147
|
+
tools=[tool]
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# 4. Pass config at runtime
|
|
151
|
+
# The whole dict is passed into the 'ctx' argument
|
|
152
|
+
agent.invoke(
|
|
153
|
+
user_input="What's the weather in London?",
|
|
154
|
+
runtime_context={
|
|
155
|
+
"user_id": "user_123",
|
|
156
|
+
"weather_api_key": "SECRET_API_KEY_456"
|
|
157
|
+
}
|
|
158
|
+
)
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
### Using ChatOpenAI Directly
|
|
162
|
+
|
|
163
|
+
You can also use the `ChatOpenAI` class directly if you don't need the agent loop (e.g., for simple, one-off LLM calls).
|
|
164
|
+
|
|
165
|
+
```python
|
|
166
|
+
from literun import ChatOpenAI
|
|
167
|
+
|
|
168
|
+
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
|
169
|
+
|
|
170
|
+
messages = [
|
|
171
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
172
|
+
{"role": "user", "content": "Tell me a joke."}
|
|
173
|
+
]
|
|
174
|
+
|
|
175
|
+
# Synchronous call
|
|
176
|
+
# Returns the raw OpenAI Responses API response object
|
|
177
|
+
response = llm.invoke(messages=messages)
|
|
178
|
+
print(response.output_text)
|
|
179
|
+
|
|
180
|
+
# Or streaming call
|
|
181
|
+
# Returns a generator of raw OpenAI response stream events
|
|
182
|
+
stream = llm.stream(messages=messages)
|
|
183
|
+
for event in stream:
|
|
184
|
+
print(event)
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
See [examples](examples/) for complete runnable examples.
|
|
188
|
+
|
|
189
|
+
## Project Structure
|
|
190
|
+
|
|
191
|
+
The project is organized as follows:
|
|
192
|
+
|
|
193
|
+
```
|
|
194
|
+
literun/
|
|
195
|
+
├── src/
|
|
196
|
+
│ └── literun/ # Main package source
|
|
197
|
+
│ ├── agent.py # Agent runtime logic
|
|
198
|
+
│ ├── llm.py # LLM client wrapper
|
|
199
|
+
│ ├── tool.py # Tool definition and execution
|
|
200
|
+
│ ├── events.py # Stream event types
|
|
201
|
+
│ └── ...
|
|
202
|
+
├── tests/ # Unit tests
|
|
203
|
+
├── examples/ # Usage examples
|
|
204
|
+
└── pyproject.toml # Project configuration
|
|
205
|
+
```
|
|
206
|
+
|
|
207
|
+
## Testing
|
|
208
|
+
|
|
209
|
+
Run the test suite using `unittest`:
|
|
210
|
+
|
|
211
|
+
```bash
|
|
212
|
+
python -m unittest discover tests
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
## Contributing
|
|
216
|
+
|
|
217
|
+
1. Fork the repository
|
|
218
|
+
2. Create a feature branch
|
|
219
|
+
3. Make your changes
|
|
220
|
+
4. Run tests: `python -m unittest discover tests`
|
|
221
|
+
5. Update the example usage if needed
|
|
222
|
+
6. Submit a pull request
|
|
223
|
+
|
|
224
|
+
## License
|
|
225
|
+
|
|
226
|
+
MIT
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=80.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "literun"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "A Minimal agent runtime built on OpenAI Responses API"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
license = { text = "MIT" }
|
|
12
|
+
|
|
13
|
+
authors = [
|
|
14
|
+
{ name = "Kaustubh", email = "trivedikaustubh01@gmail.com" }
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
# runtime dependencies
|
|
18
|
+
dependencies = [
|
|
19
|
+
"openai>=2.11.0",
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
# optional dev dependencies
|
|
23
|
+
[project.optional-dependencies]
|
|
24
|
+
dev = [
|
|
25
|
+
"pytest>=7.0",
|
|
26
|
+
"black",
|
|
27
|
+
"flake8"
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
[tool.setuptools]
|
|
31
|
+
package-dir = {"" = "src"}
|
|
32
|
+
|
|
33
|
+
[tool.setuptools.packages.find]
|
|
34
|
+
where = ["src"]
|
literun-0.1.0/setup.cfg
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Literun package initialization."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .agent import Agent
|
|
6
|
+
from .llm import ChatOpenAI
|
|
7
|
+
from .tool import Tool, ToolRuntime
|
|
8
|
+
from .args_schema import ArgsSchema
|
|
9
|
+
from .prompt_template import PromptTemplate
|
|
10
|
+
from .prompt_message import PromptMessage
|
|
11
|
+
from .constants import Role, ContentType
|
|
12
|
+
from .items import RunItem
|
|
13
|
+
from .events import StreamEvent
|
|
14
|
+
from .results import RunResult, RunResultStreaming
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"Agent",
|
|
19
|
+
"ChatOpenAI",
|
|
20
|
+
"Tool",
|
|
21
|
+
"ToolRuntime",
|
|
22
|
+
"ArgsSchema",
|
|
23
|
+
"PromptTemplate",
|
|
24
|
+
"PromptMessage",
|
|
25
|
+
"Role",
|
|
26
|
+
"ContentType",
|
|
27
|
+
"RunItem",
|
|
28
|
+
"StreamEvent",
|
|
29
|
+
"RunResult",
|
|
30
|
+
"RunResultStreaming",
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
__version__ = "0.1.0"
|