literun 0.1.0__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- literun-0.1.1/.github/workflows/release.yml +125 -0
- literun-0.1.1/.gitignore +61 -0
- literun-0.1.1/DOCS.md +217 -0
- literun-0.1.1/PKG-INFO +187 -0
- literun-0.1.1/README.md +148 -0
- literun-0.1.1/examples/run_agent.py +166 -0
- literun-0.1.1/examples/run_llm.py +57 -0
- literun-0.1.1/examples/run_llm_with_tools.py +88 -0
- literun-0.1.1/pyproject.toml +40 -0
- {literun-0.1.0 → literun-0.1.1}/src/literun/__init__.py +1 -1
- literun-0.1.1/src/literun/agent.py +158 -0
- {literun-0.1.0 → literun-0.1.1}/src/literun/args_schema.py +18 -24
- literun-0.1.1/src/literun/constants.py +26 -0
- {literun-0.1.0 → literun-0.1.1}/src/literun/events.py +35 -35
- {literun-0.1.0 → literun-0.1.1}/src/literun/items.py +12 -17
- literun-0.1.1/src/literun/llm.py +248 -0
- {literun-0.1.0 → literun-0.1.1}/src/literun/prompt_message.py +35 -48
- {literun-0.1.0 → literun-0.1.1}/src/literun/prompt_template.py +14 -17
- {literun-0.1.0 → literun-0.1.1}/src/literun/results.py +3 -5
- literun-0.1.0/src/literun/agent.py → literun-0.1.1/src/literun/runner.py +114 -183
- {literun-0.1.0 → literun-0.1.1}/src/literun/tool.py +29 -41
- literun-0.1.1/tests/test_agent.py +169 -0
- {literun-0.1.0 → literun-0.1.1}/tests/test_llm.py +32 -18
- literun-0.1.1/tests/test_prompt.py +82 -0
- literun-0.1.1/tests/test_tool.py +194 -0
- literun-0.1.0/PKG-INFO +0 -242
- literun-0.1.0/README.md +0 -226
- literun-0.1.0/pyproject.toml +0 -34
- literun-0.1.0/setup.cfg +0 -4
- literun-0.1.0/src/literun/constants.py +0 -21
- literun-0.1.0/src/literun/llm.py +0 -156
- literun-0.1.0/src/literun.egg-info/PKG-INFO +0 -242
- literun-0.1.0/src/literun.egg-info/SOURCES.txt +0 -26
- literun-0.1.0/src/literun.egg-info/dependency_links.txt +0 -1
- literun-0.1.0/src/literun.egg-info/requires.txt +0 -6
- literun-0.1.0/src/literun.egg-info/top_level.txt +0 -1
- literun-0.1.0/tests/test_agent.py +0 -144
- literun-0.1.0/tests/test_future_annotations.py +0 -58
- literun-0.1.0/tests/test_prompt.py +0 -59
- literun-0.1.0/tests/test_runtime_context.py +0 -73
- literun-0.1.0/tests/test_tool.py +0 -71
- {literun-0.1.0 → literun-0.1.1}/LICENSE +0 -0
- {literun-0.1.0 → literun-0.1.1}/src/literun/utils.py +0 -0
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
# Builds and publishes LiteRun packages to PyPI.
|
|
2
|
+
|
|
3
|
+
name: "🚀 LiteRun Release"
|
|
4
|
+
run-name: "Release ${{ inputs.version }}"
|
|
5
|
+
on:
|
|
6
|
+
workflow_dispatch:
|
|
7
|
+
inputs:
|
|
8
|
+
version:
|
|
9
|
+
description: "Version to release (must match pyproject.toml)"
|
|
10
|
+
required: true
|
|
11
|
+
type: string
|
|
12
|
+
|
|
13
|
+
permissions:
|
|
14
|
+
contents: write # Required for creating GitHub releases
|
|
15
|
+
|
|
16
|
+
jobs:
|
|
17
|
+
build:
|
|
18
|
+
name: Build Distribution
|
|
19
|
+
runs-on: ubuntu-latest
|
|
20
|
+
steps:
|
|
21
|
+
- name: Checkout code
|
|
22
|
+
uses: actions/checkout@v6
|
|
23
|
+
|
|
24
|
+
- name: Set up Python
|
|
25
|
+
uses: actions/setup-python@v6
|
|
26
|
+
with:
|
|
27
|
+
python-version: "3.10"
|
|
28
|
+
|
|
29
|
+
- name: Install build tools
|
|
30
|
+
run: |
|
|
31
|
+
python -m pip install --upgrade pip
|
|
32
|
+
pip install build twine
|
|
33
|
+
|
|
34
|
+
- name: Verify Version
|
|
35
|
+
# Mature Practice: The code in git is the source of truth.
|
|
36
|
+
# We verify inputs.version matches pyproject.toml instead of patching it.
|
|
37
|
+
run: |
|
|
38
|
+
# Extract version from pyproject.toml
|
|
39
|
+
PROJECT_VERSION=$(grep -m 1 '^version = ' pyproject.toml | cut -d '"' -f 2)
|
|
40
|
+
echo "Project Version: $PROJECT_VERSION"
|
|
41
|
+
echo "Input Version: ${{ inputs.version }}"
|
|
42
|
+
|
|
43
|
+
if [ "$PROJECT_VERSION" != "${{ inputs.version }}" ]; then
|
|
44
|
+
echo "::error::Version mismatch! pyproject.toml has $PROJECT_VERSION but workflow input is ${{ inputs.version }}."
|
|
45
|
+
echo "Please update pyproject.toml in your code, commit, and push before releasing."
|
|
46
|
+
exit 1
|
|
47
|
+
fi
|
|
48
|
+
|
|
49
|
+
- name: Build package
|
|
50
|
+
run: python -m build
|
|
51
|
+
|
|
52
|
+
- name: Check metadata
|
|
53
|
+
run: twine check dist/*
|
|
54
|
+
|
|
55
|
+
- name: Upload artifacts
|
|
56
|
+
uses: actions/upload-artifact@v6
|
|
57
|
+
with:
|
|
58
|
+
name: dist
|
|
59
|
+
path: dist/
|
|
60
|
+
|
|
61
|
+
publish-testpypi:
|
|
62
|
+
name: Publish to test PyPI
|
|
63
|
+
needs: build
|
|
64
|
+
runs-on: ubuntu-latest
|
|
65
|
+
environment: release
|
|
66
|
+
permissions:
|
|
67
|
+
id-token: write
|
|
68
|
+
steps:
|
|
69
|
+
- name: Download artifacts
|
|
70
|
+
uses: actions/download-artifact@v7
|
|
71
|
+
with:
|
|
72
|
+
name: dist
|
|
73
|
+
path: dist/
|
|
74
|
+
|
|
75
|
+
- name: Publish to test PyPI
|
|
76
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
|
77
|
+
with:
|
|
78
|
+
repository-url: https://test.pypi.org/legacy/
|
|
79
|
+
password: ${{ secrets.TEST_PYPI_API_TOKEN }}
|
|
80
|
+
skip-existing: true
|
|
81
|
+
verbose: true
|
|
82
|
+
|
|
83
|
+
verify-release:
|
|
84
|
+
name: Verify Release
|
|
85
|
+
needs: publish-testpypi
|
|
86
|
+
runs-on: ubuntu-latest
|
|
87
|
+
steps:
|
|
88
|
+
- name: Set up Python
|
|
89
|
+
uses: actions/setup-python@v6
|
|
90
|
+
with:
|
|
91
|
+
python-version: "3.10"
|
|
92
|
+
|
|
93
|
+
- name: Wait for propagation
|
|
94
|
+
run: sleep 30
|
|
95
|
+
|
|
96
|
+
- name: Verify Installation
|
|
97
|
+
# Detailed verification similar to professional workflows
|
|
98
|
+
run: |
|
|
99
|
+
# Install from test PyPI, fallback to PyPI for deps
|
|
100
|
+
pip install --index-url https://test.pypi.org/simple/ \
|
|
101
|
+
--extra-index-url https://pypi.org/simple/ \
|
|
102
|
+
literun==${{ inputs.version }}
|
|
103
|
+
|
|
104
|
+
# Verify import and version
|
|
105
|
+
python -c "import literun; print(f'Successfully installed literun {literun.__version__}')"
|
|
106
|
+
|
|
107
|
+
publish-pypi:
|
|
108
|
+
name: Publish to PyPI
|
|
109
|
+
needs: verify-release
|
|
110
|
+
runs-on: ubuntu-latest
|
|
111
|
+
environment: release
|
|
112
|
+
permissions:
|
|
113
|
+
id-token: write
|
|
114
|
+
steps:
|
|
115
|
+
- name: Download artifacts
|
|
116
|
+
uses: actions/download-artifact@v7
|
|
117
|
+
with:
|
|
118
|
+
name: dist
|
|
119
|
+
path: dist/
|
|
120
|
+
|
|
121
|
+
- name: Publish to PyPI
|
|
122
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
|
123
|
+
with:
|
|
124
|
+
password: ${{ secrets.PYPI_API_TOKEN }}
|
|
125
|
+
verbose: true
|
literun-0.1.1/.gitignore
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
__pypackages__/
|
|
4
|
+
*.py[cod]
|
|
5
|
+
*$py.class
|
|
6
|
+
|
|
7
|
+
# C extensions
|
|
8
|
+
*.so
|
|
9
|
+
|
|
10
|
+
# Distribution / packaging
|
|
11
|
+
.Python
|
|
12
|
+
build/
|
|
13
|
+
develop-eggs/
|
|
14
|
+
dist/
|
|
15
|
+
downloads/
|
|
16
|
+
eggs/
|
|
17
|
+
.eggs/
|
|
18
|
+
lib/
|
|
19
|
+
lib64/
|
|
20
|
+
parts/
|
|
21
|
+
sdist/
|
|
22
|
+
var/
|
|
23
|
+
wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
|
|
28
|
+
# Environments
|
|
29
|
+
.env
|
|
30
|
+
.venv
|
|
31
|
+
env/
|
|
32
|
+
venv/
|
|
33
|
+
ENV/
|
|
34
|
+
|
|
35
|
+
# IDE
|
|
36
|
+
.vscode/
|
|
37
|
+
.idea/
|
|
38
|
+
*.swp
|
|
39
|
+
*.swo
|
|
40
|
+
*~
|
|
41
|
+
|
|
42
|
+
# Celery stuff
|
|
43
|
+
celerybeat-schedule
|
|
44
|
+
celerybeat.pid
|
|
45
|
+
|
|
46
|
+
# Database
|
|
47
|
+
*.db
|
|
48
|
+
*.sqlite
|
|
49
|
+
|
|
50
|
+
# mkdocs documentation
|
|
51
|
+
/site
|
|
52
|
+
|
|
53
|
+
# Jupyter Notebook
|
|
54
|
+
.ipynb_checkpoints
|
|
55
|
+
*.ipynb
|
|
56
|
+
|
|
57
|
+
# Logs
|
|
58
|
+
*.log
|
|
59
|
+
|
|
60
|
+
# macOS
|
|
61
|
+
.DS_Store
|
literun-0.1.1/DOCS.md
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
# LiteRun Documentation 📚
|
|
2
|
+
|
|
3
|
+
LiteRun is a lightweight, flexible Python framework for building custom OpenAI agents. It provides a robust abstraction over the OpenAI Chat Completions API, adding tool management, structured prompt handling, and event-driven execution without the bloat of larger frameworks.
|
|
4
|
+
|
|
5
|
+
## Table of Contents
|
|
6
|
+
|
|
7
|
+
- [Core Architecture](#core-architecture)
|
|
8
|
+
- [Agent Execution](#agent-execution)
|
|
9
|
+
- [Tool Management](#tool-management)
|
|
10
|
+
- [Runtime Context Injection](#runtime-context-injection)
|
|
11
|
+
- [Prompt Templates](#prompt-templates)
|
|
12
|
+
- [Streaming](#streaming)
|
|
13
|
+
- [Direct LLM Usage](#direct-llm-usage)
|
|
14
|
+
|
|
15
|
+
---
|
|
16
|
+
|
|
17
|
+
## Core Architecture
|
|
18
|
+
|
|
19
|
+
LiteRun is built around three main components:
|
|
20
|
+
|
|
21
|
+
1. **Agent**: The orchestrator that manages the interaction loop between the user, the LLM, and the tools.
|
|
22
|
+
2. **Tool**: A wrapper around Python functions that handles argument validation (via Pydantic logic) and schema generation for OpenAI.
|
|
23
|
+
3. **ChatOpenAI**: A wrapper around the `openai` client that handles API communication, including `bind_tools`.
|
|
24
|
+
|
|
25
|
+
### Design Philosophy
|
|
26
|
+
|
|
27
|
+
- **Type Safety**: Heavily relies on Python type hints and Pydantic for validation.
|
|
28
|
+
- **Transparency**: Exposes raw OpenAI events and responses where possible.
|
|
29
|
+
- **Simplicity**: Minimal abstractions; "it's just Python functions".
|
|
30
|
+
|
|
31
|
+
---
|
|
32
|
+
|
|
33
|
+
## Agent Execution
|
|
34
|
+
|
|
35
|
+
The `Agent` runs a loop:
|
|
36
|
+
|
|
37
|
+
1. Appends user input to the history.
|
|
38
|
+
2. Calls the LLM.
|
|
39
|
+
3. If the LLM calls a tool:
|
|
40
|
+
- Executes the tool.
|
|
41
|
+
- Appends the tool result to the history.
|
|
42
|
+
- Repeats Step 2.
|
|
43
|
+
4. If the LLM responds with text, returns the final result.
|
|
44
|
+
|
|
45
|
+
The `invoke` method returns a `RunResult` object containing the final output and a list of all items (messages, tool calls) generated in this run.
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
agent = Agent(llm=llm, tools=[...])
|
|
49
|
+
result = agent.invoke("Hello")
|
|
50
|
+
|
|
51
|
+
# result is a RunResult object
|
|
52
|
+
print(result.final_output) # The text string
|
|
53
|
+
print(result.new_items) # List of all items (msgs, tool calls) generated in this run
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
---
|
|
57
|
+
|
|
58
|
+
## Tool Management
|
|
59
|
+
|
|
60
|
+
Tools are defined using the `Tool` class. You must provide:
|
|
61
|
+
|
|
62
|
+
- `name`: Unique identifier.
|
|
63
|
+
- `description`: Used by the LLM to understand when to call it.
|
|
64
|
+
- `func`: The actual Python function.
|
|
65
|
+
- `args_schema`: A definition of arguments for the LLM.
|
|
66
|
+
|
|
67
|
+
### Using `ArgsSchema`
|
|
68
|
+
|
|
69
|
+
The `ArgsSchema` maps argument names to types and descriptions. This generates the JSON Schema sent to OpenAI.
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
from literun import Tool, ArgsSchema
|
|
73
|
+
|
|
74
|
+
def my_func(x: int):
|
|
75
|
+
return x * 2
|
|
76
|
+
|
|
77
|
+
tool = Tool(
|
|
78
|
+
name="doubler",
|
|
79
|
+
description="Doubles a number",
|
|
80
|
+
func=my_func,
|
|
81
|
+
args_schema=[
|
|
82
|
+
ArgsSchema(name="x", type=int, description="Number to double")
|
|
83
|
+
]
|
|
84
|
+
)
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
## Runtime Context Injection
|
|
90
|
+
|
|
91
|
+
Sometimes tools need access to data that shouldn't be visible to the LLM (e.g., database connections, User IDs, API keys). LiteRun supports **runtime context injection**.
|
|
92
|
+
|
|
93
|
+
1. Annotate an argument in your tool function with `ToolRuntime`.
|
|
94
|
+
2. Pass a dictionary to `agent.invoke(..., runtime_context={...})`.
|
|
95
|
+
3. The agent will automatically strip this argument from the LLM schema and inject the context object at execution time.
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
from literun import ToolRuntime
|
|
99
|
+
|
|
100
|
+
def sensitive_tool(data: str, ctx: ToolRuntime) -> str:
|
|
101
|
+
# 'data' comes from LLM
|
|
102
|
+
# 'ctx' comes from your application
|
|
103
|
+
api_key = getattr(ctx, "api_key", None)
|
|
104
|
+
return f"Processed {data} with {api_key}"
|
|
105
|
+
|
|
106
|
+
# ... Initialize tool & agent ...
|
|
107
|
+
|
|
108
|
+
agent.invoke(
|
|
109
|
+
"process data",
|
|
110
|
+
runtime_context={"api_key": "secret_123"}
|
|
111
|
+
)
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
---
|
|
115
|
+
|
|
116
|
+
## Prompt Templates
|
|
117
|
+
|
|
118
|
+
The `PromptTemplate` class helps structure conversation history. It replaces simple list-of-dict management with a type-safe builder.
|
|
119
|
+
|
|
120
|
+
```python
|
|
121
|
+
from literun import PromptTemplate
|
|
122
|
+
|
|
123
|
+
template = PromptTemplate()
|
|
124
|
+
template.add_system("You are a helpful assistant.")
|
|
125
|
+
template.add_user("Hello")
|
|
126
|
+
template.add_assistant("Hi there")
|
|
127
|
+
|
|
128
|
+
agent.invoke(user_input="How are you?", prompt_template=template)
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
You can also simulate tool interactions for testing or history restoration:
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
# Add a tool call and its output
|
|
135
|
+
template.add_tool_call(
|
|
136
|
+
name="get_weather",
|
|
137
|
+
arguments='{"location": "Tokyo"}',
|
|
138
|
+
call_id="call_123"
|
|
139
|
+
)
|
|
140
|
+
template.add_tool_output(
|
|
141
|
+
call_id="call_123",
|
|
142
|
+
output="Sunny, 25C"
|
|
143
|
+
)
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
This is especially useful for managing long-term memory or restoring chat sessions.
|
|
147
|
+
|
|
148
|
+
---
|
|
149
|
+
|
|
150
|
+
## Streaming
|
|
151
|
+
|
|
152
|
+
LiteRun supports real-time streaming of both text generation and tool execution status usage `agent.stream()`.
|
|
153
|
+
|
|
154
|
+
The stream yields `RunResultStreaming` objects containing an `event`.
|
|
155
|
+
|
|
156
|
+
Key Events:
|
|
157
|
+
|
|
158
|
+
- `response.output_text.delta`: A chunk of text content.
|
|
159
|
+
- `response.output_text.done`: Sent when text generation is complete.
|
|
160
|
+
- `response.function_call_arguments.delta`: A chunk of tool arguments (JSON).
|
|
161
|
+
- `response.function_call_arguments.done`: Sent when the LLM finishes generating arguments for a tool call.
|
|
162
|
+
|
|
163
|
+
```python
|
|
164
|
+
for result in agent.stream("Hello"):
|
|
165
|
+
event = result.event
|
|
166
|
+
|
|
167
|
+
# Text Streaming
|
|
168
|
+
if event.type == "response.output_text.delta":
|
|
169
|
+
print(event.delta, end="")
|
|
170
|
+
|
|
171
|
+
# Tool Argument Streaming
|
|
172
|
+
elif event.type == "response.function_call_arguments.delta":
|
|
173
|
+
print(event.delta, end="")
|
|
174
|
+
|
|
175
|
+
# Completion Events
|
|
176
|
+
elif event.type == "response.output_text.done":
|
|
177
|
+
print("\nText generation complete")
|
|
178
|
+
elif event.type == "response.function_call_arguments.done":
|
|
179
|
+
print(f"\nTool call complete: {event.name}({event.arguments})")
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
---
|
|
183
|
+
|
|
184
|
+
## Direct LLM Usage
|
|
185
|
+
|
|
186
|
+
If you don't need the agent loop (e.g. for a simple chat or classification task without tools), you can use `ChatOpenAI` directly.
|
|
187
|
+
|
|
188
|
+
```python
|
|
189
|
+
from literun import ChatOpenAI
|
|
190
|
+
|
|
191
|
+
llm = ChatOpenAI(model="gpt-4o")
|
|
192
|
+
response = llm.invoke([{"role": "user", "content": "Hi"}])
|
|
193
|
+
print(response.output_text)
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
You can also bind tools manually if you want to handle execution yourself:
|
|
197
|
+
|
|
198
|
+
```python
|
|
199
|
+
llm.bind_tools([my_tool])
|
|
200
|
+
response = llm.invoke(...)
|
|
201
|
+
# Check response.output for tool calls
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
### Streaming with ChatOpenAI
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
stream = llm.stream([{"role": "user", "content": "Tell me a joke."}])
|
|
208
|
+
for event in stream:
|
|
209
|
+
if event.type == "response.output_text.delta":
|
|
210
|
+
print(event.delta, end="")
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
---
|
|
214
|
+
|
|
215
|
+
## Examples
|
|
216
|
+
|
|
217
|
+
For complete, runnable code examples covering these concepts, please visit the [**examples**](https://github.com/kaustubh-tr/literun/blob/main/examples/) directory in the repository.
|
literun-0.1.1/PKG-INFO
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: literun
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: A Minimal agent runtime built on OpenAI Responses API
|
|
5
|
+
Project-URL: Homepage, https://github.com/kaustubh-tr/literun
|
|
6
|
+
Project-URL: Source, https://github.com/kaustubh-tr/literun
|
|
7
|
+
Project-URL: Issues, https://github.com/kaustubh-tr/literun/issues
|
|
8
|
+
Project-URL: Readme, https://github.com/kaustubh-tr/literun#readme
|
|
9
|
+
Project-URL: Documentation, https://github.com/kaustubh-tr/literun/blob/main/DOCS.md
|
|
10
|
+
Author-email: Kaustubh Trivedi <trivedikaustubh01@gmail.com>
|
|
11
|
+
License: MIT License
|
|
12
|
+
|
|
13
|
+
Copyright (c) 2026 Kaustubh Trivedi
|
|
14
|
+
|
|
15
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
16
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
17
|
+
in the Software without restriction, including without limitation the rights
|
|
18
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
19
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
20
|
+
furnished to do so, subject to the following conditions:
|
|
21
|
+
|
|
22
|
+
The above copyright notice and this permission notice shall be included in all
|
|
23
|
+
copies or substantial portions of the Software.
|
|
24
|
+
|
|
25
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
26
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
27
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
28
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
29
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
30
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
31
|
+
SOFTWARE.
|
|
32
|
+
License-File: LICENSE
|
|
33
|
+
Requires-Python: <4.0.0,>=3.10.0
|
|
34
|
+
Requires-Dist: openai<3.0.0,>=2.11.0
|
|
35
|
+
Requires-Dist: pydantic<3.0.0,>=2.12.0
|
|
36
|
+
Provides-Extra: dev
|
|
37
|
+
Requires-Dist: pytest<10.0.0,>=9.0.0; extra == 'dev'
|
|
38
|
+
Description-Content-Type: text/markdown
|
|
39
|
+
|
|
40
|
+
# LiteRun 🚀
|
|
41
|
+
|
|
42
|
+
[](https://pypi.org/project/literun/)
|
|
43
|
+
[](https://pypi.org/project/literun/)
|
|
44
|
+
[](https://opensource.org/licenses/MIT)
|
|
45
|
+
[](https://github.com/kaustubh-tr/literun/blob/main/DOCS.md)
|
|
46
|
+
|
|
47
|
+
A lightweight, flexible Python framework for building custom OpenAI agents (Responses API) with tool support and structured prompt management.
|
|
48
|
+
|
|
49
|
+
## Features
|
|
50
|
+
|
|
51
|
+
- **Custom Agent Execution**: Control the loop with synchronous and streaming support.
|
|
52
|
+
- **Tool Support**: Easy registration with Pydantic-powered validation.
|
|
53
|
+
- **Type Safety**: Built for modern Python 3.10+ environments.
|
|
54
|
+
- **Prompt Templates**: Structured message management.
|
|
55
|
+
- **Event-Driven**: Granular control via a rich event system.
|
|
56
|
+
|
|
57
|
+
For detailed documentation on Architecture, Streaming, and Advanced Configuration, see [DOCS.md](https://github.com/kaustubh-tr/literun/blob/main/DOCS.md).
|
|
58
|
+
|
|
59
|
+
## Requirements
|
|
60
|
+
|
|
61
|
+
- Python 3.10+
|
|
62
|
+
|
|
63
|
+
> **Note**: Core dependencies like `openai` and `pydantic` are automatically installed when you install `literun`.
|
|
64
|
+
|
|
65
|
+
## Installation
|
|
66
|
+
|
|
67
|
+
You can install `literun` directly from PyPI:
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
pip install literun
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Quick Start
|
|
74
|
+
|
|
75
|
+
### Basic Agent
|
|
76
|
+
|
|
77
|
+
Here is a simple example of how to create an agent with a custom tool.
|
|
78
|
+
|
|
79
|
+
```python
|
|
80
|
+
import os
|
|
81
|
+
from literun import Agent, ChatOpenAI, Tool, ArgsSchema
|
|
82
|
+
|
|
83
|
+
# 1. Define a tool function
|
|
84
|
+
def get_weather(location: str, unit: str = "celsius") -> str:
|
|
85
|
+
return f"The weather in {location} is 25 degrees {unit}."
|
|
86
|
+
|
|
87
|
+
# 2. Wrap it with Tool schema
|
|
88
|
+
weather_tool = Tool(
|
|
89
|
+
func=get_weather,
|
|
90
|
+
name="get_weather",
|
|
91
|
+
description="Get the weather for a location",
|
|
92
|
+
args_schema=[
|
|
93
|
+
ArgsSchema(
|
|
94
|
+
name="location",
|
|
95
|
+
type=str,
|
|
96
|
+
description="The city and state, e.g. San Francisco, CA",
|
|
97
|
+
),
|
|
98
|
+
ArgsSchema(
|
|
99
|
+
name="unit",
|
|
100
|
+
type=str,
|
|
101
|
+
description="The unit of temperature",
|
|
102
|
+
enum=["celsius", "fahrenheit"],
|
|
103
|
+
),
|
|
104
|
+
],
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# 3. Initialize Agent
|
|
108
|
+
agent = Agent(
|
|
109
|
+
llm=ChatOpenAI(model="gpt-4.1-mini", temperature=0.7),
|
|
110
|
+
system_prompt="You are a helpful assistant.",
|
|
111
|
+
tools=[weather_tool],
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# 4. Run the Agent
|
|
115
|
+
result = agent.invoke(user_input="What is the weather in Tokyo?")
|
|
116
|
+
print(f"Final Answer: {result.final_output}")
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### Advanced Usage
|
|
120
|
+
|
|
121
|
+
LiteRun supports **Streaming**, **Runtime Context Injection** (for secrets), and **Direct LLM Usage**.
|
|
122
|
+
|
|
123
|
+
👉 Check out the [Documentation](https://github.com/kaustubh-tr/literun/blob/main/DOCS.md) and [Examples](https://github.com/kaustubh-tr/literun/blob/main/examples/) for more details.
|
|
124
|
+
|
|
125
|
+
## Project Structure
|
|
126
|
+
|
|
127
|
+
```text
|
|
128
|
+
literun/
|
|
129
|
+
├── src/
|
|
130
|
+
│ └── literun/ # Main package source
|
|
131
|
+
│ ├── agent.py # Agent orchestrator
|
|
132
|
+
│ ├── llm.py # ChatOpenAI wrapper
|
|
133
|
+
│ ├── tool.py # Tool & Schema definitions
|
|
134
|
+
│ └── ...
|
|
135
|
+
├── tests/ # Unit tests (agent, llm, tools, prompts)
|
|
136
|
+
├── examples/ # Runnable examples
|
|
137
|
+
├── DOCS.md # Detailed documentation
|
|
138
|
+
├── LICENSE # MIT License
|
|
139
|
+
├── README.md # This file
|
|
140
|
+
└── pyproject.toml # Project configuration & dependencies
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
## Contributing
|
|
144
|
+
|
|
145
|
+
We welcome contributions! Please follow these steps to set up your development environment:
|
|
146
|
+
|
|
147
|
+
1. **Fork** the repository and clone it locally:
|
|
148
|
+
|
|
149
|
+
```bash
|
|
150
|
+
git clone https://github.com/kaustubh-tr/literun.git
|
|
151
|
+
cd literun
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
2. **Install** in editable mode with development dependencies:
|
|
155
|
+
|
|
156
|
+
```bash
|
|
157
|
+
pip install -e .[dev]
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
3. **Create** a feature branch and make your changes.
|
|
161
|
+
|
|
162
|
+
4. **Test** your changes (see below).
|
|
163
|
+
|
|
164
|
+
5. **Submit** a pull request.
|
|
165
|
+
|
|
166
|
+
## Testing
|
|
167
|
+
|
|
168
|
+
This project uses `pytest` as the primary test runner, but supports `unittest` as well.
|
|
169
|
+
|
|
170
|
+
```bash
|
|
171
|
+
# Run all tests
|
|
172
|
+
pytest
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
or using unittest:
|
|
176
|
+
|
|
177
|
+
```bash
|
|
178
|
+
python -m unittest discover tests
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
> **Note**: Some integration tests require the `OPENAI_API_KEY` environment variable. They are automatically skipped if it is missing.
|
|
182
|
+
|
|
183
|
+
## License
|
|
184
|
+
|
|
185
|
+
Copyright (c) 2026 Kaustubh Trivedi.
|
|
186
|
+
|
|
187
|
+
Distributed under the terms of the [MIT](https://github.com/kaustubh-tr/literun/blob/main/LICENSE) license, LiteRun is free and open source software.
|