literun 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of literun might be problematic. Click here for more details.

literun/tool.py ADDED
@@ -0,0 +1,145 @@
1
+ """Tool definition and runtime context."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import inspect
6
+ from typing import Any, Dict, List, Optional, Callable, get_type_hints
7
+
8
+ from .args_schema import ArgsSchema
9
+
10
+
11
+ class ToolRuntime:
12
+ """Runtime context container for tools.
13
+
14
+ This class stores arbitrary runtime values as attributes, using keyword
15
+ arguments provided at initialization. It is typically injected into tool
16
+ functions that declare a parameter annotated with ``ToolRuntime``.
17
+
18
+ Args:
19
+ **kwargs: Arbitrary keyword arguments that will be set as attributes
20
+ on the instance.
21
+ """
22
+
23
+ def __init__(self, **kwargs):
24
+ for k, v in kwargs.items():
25
+ setattr(self, k, v)
26
+
27
+ def __repr__(self):
28
+ return f"ToolRuntime({self.__dict__})"
29
+
30
+
31
+ class Tool:
32
+ """Represents a callable tool that can be invoked by an agent or LLM.
33
+
34
+ A ``Tool`` wraps a Python callable along with metadata and an argument
35
+ schema, and provides utilities for argument validation, execution,
36
+ and conversion to the OpenAI tool definition format.
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ *,
42
+ func: Callable,
43
+ name: str,
44
+ description: str,
45
+ args_schema: List[ArgsSchema],
46
+ strict: Optional[bool] = None,
47
+ ):
48
+ """Initialize a Tool.
49
+
50
+ Args:
51
+ func: The function to execute when the tool is called.
52
+ name: The name of the tool.
53
+ description: A description of what the tool does.
54
+ args_schema: A list of arguments the tool accepts.
55
+ strict: If True, model output is guaranteed to exactly match the JSON Schema
56
+ provided in the function definition. If None, `strict` argument will not
57
+ be included in tool definition.
58
+ """
59
+ self.func = func
60
+ self.name = name
61
+ self.description = description
62
+ self.args_schema = args_schema
63
+ self.strict = strict
64
+
65
+ # OpenAI schema
66
+ def to_openai_tool(self) -> Dict[str, Any]:
67
+ """Convert the tool to the OpenAI tool schema format.
68
+
69
+ Returns:
70
+ Dict[str, Any]: The OpenAI-compatible tool definition.
71
+ """
72
+ properties = {}
73
+ required = []
74
+
75
+ for arg in self.args_schema:
76
+ properties[arg.name] = arg.to_json_schema()
77
+ required.append(arg.name)
78
+
79
+ return {
80
+ "type": "function",
81
+ "name": self.name,
82
+ "description": self.description,
83
+ "parameters": {
84
+ "type": "object",
85
+ "properties": properties,
86
+ "required": required,
87
+ "additionalProperties": False,
88
+ },
89
+ **({"strict": self.strict} if self.strict is not None else {}),
90
+ }
91
+
92
+ # LLM Runtime argument handling
93
+ def resolve_arguments(self, raw_args: Dict[str, Any]) -> Dict[str, Any]:
94
+ """Validate and cast raw arguments provided by the model.
95
+
96
+ Args:
97
+ raw_args: The raw argument dictionary produced by the model.
98
+
99
+ Returns:
100
+ Dict[str, Any]: A dictionary of validated and type-cast arguments.
101
+ """
102
+ parsed = {}
103
+ for arg in self.args_schema:
104
+ parsed[arg.name] = arg.validate_and_cast(raw_args.get(arg.name))
105
+ return parsed
106
+
107
+ def execute(
108
+ self, args: Dict[str, Any], runtime_context: Optional[Dict[str, Any]] = None
109
+ ) -> Any:
110
+ """Execute the tool with validated arguments and runtime context.
111
+
112
+ This method resolves and validates model-provided arguments, injects
113
+ a ``ToolRuntime`` instance into the function call if requested by the
114
+ function's type annotations, and then invokes the underlying function.
115
+
116
+ Args:
117
+ args: Raw arguments provided by the model.
118
+ runtime_context: Optional runtime context data used to construct
119
+ a ``ToolRuntime`` instance when required.
120
+
121
+ Returns:
122
+ Any: The return value of the underlying tool function.
123
+ """
124
+ # 1. Resolve LLM arguments using the tool's schema logic
125
+ final_args = self.resolve_arguments(args)
126
+
127
+ # 2. Inject ToolRuntime if requested by the function signature
128
+ # Use get_type_hints to properly resolve annotations, including forward references
129
+ try:
130
+ type_hints = get_type_hints(self.func)
131
+ except (NameError, AttributeError, TypeError):
132
+ # Fallback to inspect.signature if get_type_hints fails
133
+ # This handles cases where annotations can't be resolved
134
+ sig = inspect.signature(self.func)
135
+ type_hints = {
136
+ name: param.annotation
137
+ for name, param in sig.parameters.items()
138
+ if param.annotation != inspect.Parameter.empty
139
+ }
140
+
141
+ for param_name, param_type in type_hints.items():
142
+ if param_type is ToolRuntime:
143
+ final_args[param_name] = ToolRuntime(**(runtime_context or {}))
144
+
145
+ return self.func(**final_args)
literun/utils.py ADDED
@@ -0,0 +1,73 @@
1
+ """Utilities for extracting structured data from OpenAI response objects."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import List, Dict, Any
6
+
7
+
8
+ def extract_output_text(response: Any) -> str:
9
+ """Extracts and concatenates output text from the response object.
10
+
11
+ Args:
12
+ response: The response object containing output messages.
13
+
14
+ Returns:
15
+ str: The concatenated output text.
16
+ """
17
+ texts: List[str] = []
18
+ for output in response.output:
19
+ if output.type == "message":
20
+ for content in output.content:
21
+ if content.type == "output_text":
22
+ texts.append(content.text)
23
+
24
+ return "".join(texts)
25
+
26
+
27
+ def extract_tool_calls(response: Any) -> List[Dict[str, Any]]:
28
+ """Extracts tool call information from the response object into a list of dictionaries.
29
+
30
+ Args:
31
+ response: The response object containing tool call information.
32
+
33
+ Returns:
34
+ List[Dict[str, Any]]: A list of dictionaries with tool call details.
35
+ """
36
+ import json
37
+
38
+ tool_calls: List[Dict[str, Any]] = []
39
+ for output in response.output:
40
+ if output.type == "function_call":
41
+ tool_calls.append(
42
+ {
43
+ "arguments": json.loads(output.arguments),
44
+ "call_id": output.call_id,
45
+ "name": output.name,
46
+ "type": output.type,
47
+ "id": output.id,
48
+ "status": output.status,
49
+ }
50
+ )
51
+ return tool_calls
52
+
53
+
54
+ def extract_usage_dict(response: Any) -> Dict[str, Any]:
55
+ """Extracts usage statistics from the response object into a dictionary.
56
+
57
+ Args:
58
+ response: The response object containing usage statistics.
59
+
60
+ Returns:
61
+ Dict[str, Any]: A dictionary with usage statistics.
62
+ """
63
+ return {
64
+ "input_tokens": response.usage.input_tokens,
65
+ "input_tokens_details": {
66
+ "cached_tokens": response.usage.input_tokens_details.cached_tokens
67
+ },
68
+ "output_tokens": response.usage.output_tokens,
69
+ "output_tokens_details": {
70
+ "reasoning_tokens": response.usage.output_tokens_details.reasoning_tokens
71
+ },
72
+ "total_tokens": response.usage.total_tokens,
73
+ }
@@ -0,0 +1,242 @@
1
+ Metadata-Version: 2.4
2
+ Name: literun
3
+ Version: 0.1.0
4
+ Summary: A Minimal agent runtime built on OpenAI Responses API
5
+ Author-email: Kaustubh <trivedikaustubh01@gmail.com>
6
+ License: MIT
7
+ Requires-Python: >=3.10
8
+ Description-Content-Type: text/markdown
9
+ License-File: LICENSE
10
+ Requires-Dist: openai>=2.11.0
11
+ Provides-Extra: dev
12
+ Requires-Dist: pytest>=7.0; extra == "dev"
13
+ Requires-Dist: black; extra == "dev"
14
+ Requires-Dist: flake8; extra == "dev"
15
+ Dynamic: license-file
16
+
17
+ # LiteRun 🚀
18
+
19
+ A lightweight, flexible Python framework for building custom OpenAI agents (Responses API) with tool support and structured prompt management.
20
+
21
+ ## Features
22
+
23
+ - **Custom Agent Execution**: Complete control over the agent execution loop, supporting both synchronous and streaming responses.
24
+ - **Tool Support**: Easy registration and execution of Python functions as tools.
25
+ - **Type Safety**: Strong typing for tool arguments with automatic coercion and validation.
26
+ - **Prompt Templates**: Structured way to build system, user, and assistant messages.
27
+ - **Constants**: Pre-defined constants for OpenAI roles and message types.
28
+ - **Streaming Support**: Built-in support for real-time streaming of agent thoughts, tool calls, and responses.
29
+ - **Tool Management**: Easy-to-define tools with automatic JSON schema generation (`ArgsSchema`).
30
+ - **Event-Driven**: Structured event system for granular control over the agent's execution lifecycle.
31
+ - **OpenAI Compatible**: Seamlessly integrates with `openai-python` client.
32
+
33
+ ## Requirements
34
+
35
+ - Python 3.10+
36
+ - [OpenAI Python API library](https://pypi.org/project/openai/)
37
+
38
+ ## Installation
39
+
40
+ ### Production
41
+
42
+ ```bash
43
+ pip install literun
44
+ ```
45
+
46
+ ### Development
47
+
48
+ ```bash
49
+ git clone https://github.com/kaustubh-tr/literun.git
50
+ cd openai-agent
51
+ pip install -e .[dev]
52
+ ```
53
+
54
+ ## Quick Start
55
+
56
+ ### Basic Agent
57
+
58
+ Here is a simple example of how to create an agent with a custom tool:
59
+
60
+ ```python
61
+ import os
62
+ from literun import Agent, ChatOpenAI, Tool, ArgsSchema
63
+
64
+ # 1. Define a tool function
65
+ def get_weather(location: str, unit: str = "celsius") -> str:
66
+ return f"The weather in {location} is 25 degrees {unit}."
67
+
68
+ # 2. Wrap it with Tool schema
69
+ weather_tool = Tool(
70
+ func=get_weather,
71
+ name="get_weather",
72
+ description="Get the weather for a location",
73
+ args_schema=[
74
+ ArgsSchema(
75
+ name="location",
76
+ type=str,
77
+ description="The city and state, e.g. San Francisco, CA",
78
+ ),
79
+ ArgsSchema(
80
+ name="unit",
81
+ type=str,
82
+ description="The unit of temperature",
83
+ enum=["celsius", "fahrenheit"],
84
+ ),
85
+ ],
86
+ )
87
+
88
+ # 3. Initialize LLM and Agent
89
+ llm = ChatOpenAI(model="gpt-4o", temperature=0.7)
90
+
91
+ # 4. Initialize Agent
92
+ agent = Agent(
93
+ llm=llm,
94
+ system_prompt="You are a helpful assistant.",
95
+ tools=[weather_tool],
96
+ )
97
+
98
+ # 5. Run the Agent
99
+ result = agent.invoke(user_input="What is the weather in Tokyo?")
100
+ print(f"Final Answer: {result.final_output}")
101
+ ```
102
+
103
+ ### Streaming Agent
104
+
105
+ You can also stream the agent's execution to handle events in real-time:
106
+
107
+ ```python
108
+ # ... (setup tool and agent as above)
109
+
110
+ print("Agent: ", end="", flush=True)
111
+ for result in agent.stream(user_input="What is the weather in Tokyo?"):
112
+ event = result.event
113
+ if event.type == "response.output_text.delta":
114
+ print(event.delta, end="", flush=True)
115
+ elif event.type == "response.function_call_arguments.done":
116
+ print(f"\n[Tool Call: {event.name}]")
117
+
118
+ print()
119
+ ```
120
+
121
+ ### Runtime Configuration (Context Injection)
122
+
123
+ The framework allows passing a runtime context to tools using explicit context injection.
124
+
125
+ Rules:
126
+ 1. Define a tool function with a parameter annotated with `ToolRuntime`.
127
+ 2. The framework will automatically inject the `runtime_context` (wrapped in `ToolRuntime`) into that parameter.
128
+ 3. Access configuration values using `ctx.{parameter}`.
129
+
130
+ ```python
131
+ from typing import Dict, Any
132
+ from literun import Tool, ArgsSchema, ToolRuntime
133
+
134
+ # 1. Define tool with context
135
+ def get_weather(location: str, ctx: ToolRuntime) -> str:
136
+ """
137
+ Returns weather info for a location.
138
+ The runtime context can include sensitive info like user_id or API keys.
139
+ """
140
+ user_id = getattr(ctx, "user_id", "unknown_user")
141
+ api_key = getattr(ctx, "weather_api_key", None)
142
+
143
+ # Simulate fetching weather
144
+ return f"Weather for {location} fetched using API key '{api_key}' for user '{user_id}'."
145
+
146
+ # 2. Register tool
147
+ tool = Tool(
148
+ name="get_weather",
149
+ description="Get the weather for a given location",
150
+ func=get_weather,
151
+ args_schema=[
152
+ ArgsSchema(
153
+ name="location",
154
+ type=str,
155
+ description="Location for which to get the weather",
156
+ )
157
+ ]
158
+ )
159
+
160
+ # 3. Setup agent
161
+ agent = Agent(
162
+ llm=ChatOpenAI(api_key="fake"),
163
+ tools=[tool]
164
+ )
165
+
166
+ # 4. Pass config at runtime
167
+ # The whole dict is passed into the 'ctx' argument
168
+ agent.invoke(
169
+ user_input="What's the weather in London?",
170
+ runtime_context={
171
+ "user_id": "user_123",
172
+ "weather_api_key": "SECRET_API_KEY_456"
173
+ }
174
+ )
175
+ ```
176
+
177
+ ### Using ChatOpenAI Directly
178
+
179
+ You can also use the `ChatOpenAI` class directly if you don't need the agent loop (e.g., for simple, one-off LLM calls).
180
+
181
+ ```python
182
+ from literun import ChatOpenAI
183
+
184
+ llm = ChatOpenAI(model="gpt-4o", temperature=0)
185
+
186
+ messages = [
187
+ {"role": "system", "content": "You are a helpful assistant."},
188
+ {"role": "user", "content": "Tell me a joke."}
189
+ ]
190
+
191
+ # Synchronous call
192
+ # Returns the raw OpenAI Responses API response object
193
+ response = llm.invoke(messages=messages)
194
+ print(response.output_text)
195
+
196
+ # Or streaming call
197
+ # Returns a generator of raw OpenAI response stream events
198
+ stream = llm.stream(messages=messages)
199
+ for event in stream:
200
+ print(event)
201
+ ```
202
+
203
+ See [examples](examples/) for complete runnable examples.
204
+
205
+ ## Project Structure
206
+
207
+ The project is organized as follows:
208
+
209
+ ```
210
+ literun/
211
+ ├── src/
212
+ │ └── literun/ # Main package source
213
+ │ ├── agent.py # Agent runtime logic
214
+ │ ├── llm.py # LLM client wrapper
215
+ │ ├── tool.py # Tool definition and execution
216
+ │ ├── events.py # Stream event types
217
+ │ └── ...
218
+ ├── tests/ # Unit tests
219
+ ├── examples/ # Usage examples
220
+ └── pyproject.toml # Project configuration
221
+ ```
222
+
223
+ ## Testing
224
+
225
+ Run the test suite using `unittest`:
226
+
227
+ ```bash
228
+ python -m unittest discover tests
229
+ ```
230
+
231
+ ## Contributing
232
+
233
+ 1. Fork the repository
234
+ 2. Create a feature branch
235
+ 3. Make your changes
236
+ 4. Run tests: `python -m unittest discover tests`
237
+ 5. Update the example usage if needed
238
+ 6. Submit a pull request
239
+
240
+ ## License
241
+
242
+ MIT
@@ -0,0 +1,17 @@
1
+ literun/__init__.py,sha256=lnkWaQ_BYMVjyuPiRxmELOEMKzzqGw8n0DPEKIuNrgk,706
2
+ literun/agent.py,sha256=CrMrvKHDlwQ4shqR1GkJv0pKq64mdIAy5VnVZoa-1To,15105
3
+ literun/args_schema.py,sha256=bmLTybD4zPhjTwHkZrjQVEkiARHbje1SXTkH0hWyp0s,2618
4
+ literun/constants.py,sha256=gSyuHnUdPuQBH3GKjp7FwZhk_C-F7ecF1IF-36H23_Q,453
5
+ literun/events.py,sha256=SARUr4XWG9qpuy34hpZHHyJRX5btmJmBhBfvU8KbYyc,3615
6
+ literun/items.py,sha256=SPINMwy8vOuK6iIqAMC5Av-BIgsCiuVKpYK_4vk70fk,2975
7
+ literun/llm.py,sha256=7RU990Lw5XMY5RrlkepBaKFmP9cyZUu85SmUccrVIbY,5030
8
+ literun/prompt_message.py,sha256=wizT6T8A4rsswcCyQVQeKyOdMPPFmSKtQoVUliMgV0M,4982
9
+ literun/prompt_template.py,sha256=IFNZH80e2AO4u1p5L59Q49vvumU6nvToVHVXwkM_7mI,5128
10
+ literun/results.py,sha256=7uRPdfzRzdI0HXhWhFiya4yhH-NmjGfdGZAgXMpfkpQ,1324
11
+ literun/tool.py,sha256=iKQg1Xgh_Bzn1V0lVllmH1-G0XiHuDLkXOe0rCHRfc4,5049
12
+ literun/utils.py,sha256=4r9P7u46KzuG3eNZq8kfuEWJxpc4b8T26nrzjW7_Hec,2261
13
+ literun-0.1.0.dist-info/licenses/LICENSE,sha256=sJlY4ztFUqGGojhTNtL2UbhQeSZF3B4V1dAtzGfMHOE,1073
14
+ literun-0.1.0.dist-info/METADATA,sha256=jTSF_nuY1_6T3ImwNAgAQcxkZ3TdVzHx8Ofmy8CmK1U,6649
15
+ literun-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
16
+ literun-0.1.0.dist-info/top_level.txt,sha256=YFnS29wBQf5eX9UEtBYA1ZegxjIs_7n691L8qIR_QW0,8
17
+ literun-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Kaustubh Trivedi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ literun