literun 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
literun/runner.py ADDED
@@ -0,0 +1,342 @@
1
+ """Agent execution runner."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any, Iterator, TYPE_CHECKING
7
+
8
+ if TYPE_CHECKING:
9
+ from .agent import Agent
10
+
11
+ from .items import (
12
+ RunItem,
13
+ MessageOutputItem,
14
+ ToolCallItem,
15
+ ToolCallOutputItem,
16
+ ReasoningItem,
17
+ ResponseFunctionToolCallOutput,
18
+ )
19
+ from .results import RunResult, RunResultStreaming
20
+ from .events import (
21
+ ResponseFunctionCallOutputItemAddedEvent,
22
+ ResponseFunctionCallOutputItemDoneEvent,
23
+ )
24
+ from .prompt_template import PromptTemplate
25
+
26
+
27
+ class Runner:
28
+ """Executes agent runs."""
29
+
30
+ @classmethod
31
+ def run(
32
+ cls,
33
+ agent: Agent,
34
+ user_input: str,
35
+ prompt_template: PromptTemplate | None = None,
36
+ runtime_context: dict[str, Any] | None = None,
37
+ ) -> RunResult:
38
+ """Run the agent synchronously.
39
+
40
+ This method executes the agent loop, calling the language model and any
41
+ registered tools until a final output is produced or the maximum number
42
+ of iterations is reached.
43
+
44
+ Args:
45
+ agent: The agent instance to run.
46
+ user_input: The input text from the user.
47
+ prompt_template: Optional template to initialize conversation history.
48
+ runtime_context: Optional runtime context dictionary to pass to tools.
49
+
50
+ Returns:
51
+ ``RunResult``: The result of the agent run, including output items and final text.
52
+
53
+ Raises:
54
+ ValueError: If `user_input` is empty.
55
+ RuntimeError: If the agent exceeds `max_iterations`.
56
+ """
57
+ if not user_input:
58
+ raise ValueError("user_input cannot be empty")
59
+
60
+ prompt = cls._build_prompt(agent, user_input, prompt_template)
61
+ all_items: list[RunItem] = []
62
+
63
+ iteration = 0
64
+ while iteration < agent.max_iterations:
65
+ response = agent.llm.chat(
66
+ messages=prompt,
67
+ stream=False,
68
+ tools=agent.tools,
69
+ tool_choice=agent.tool_choice,
70
+ parallel_tool_calls=agent.parallel_tool_calls,
71
+ )
72
+
73
+ tool_calls: dict[str, dict[str, Any]] = {}
74
+ final_output_text: str = ""
75
+
76
+ for item in response.output:
77
+ if item.type == "reasoning":
78
+ all_items.append(
79
+ ReasoningItem(
80
+ role="assistant",
81
+ content=item.content,
82
+ raw_item=item,
83
+ type="reasoning_item",
84
+ )
85
+ )
86
+
87
+ elif item.type == "function_call":
88
+ tool_calls[item.id] = {
89
+ "call_id": item.call_id,
90
+ "name": item.name,
91
+ "arguments": item.arguments,
92
+ }
93
+ all_items.append(
94
+ ToolCallItem(
95
+ role="assistant",
96
+ content="",
97
+ raw_item=item,
98
+ type="tool_call_item",
99
+ )
100
+ )
101
+
102
+ elif item.type == "message":
103
+ text_parts = [
104
+ c.text for c in item.content if c.type == "output_text"
105
+ ]
106
+ final_output_text = "".join(text_parts)
107
+ all_items.append(
108
+ MessageOutputItem(
109
+ role="assistant",
110
+ content=final_output_text,
111
+ raw_item=item,
112
+ type="message_output_item",
113
+ )
114
+ )
115
+
116
+ if not tool_calls:
117
+ return RunResult(
118
+ input=user_input,
119
+ new_items=all_items,
120
+ final_output=final_output_text,
121
+ )
122
+
123
+ if final_output_text:
124
+ prompt.add_assistant(final_output_text)
125
+
126
+ for tc in tool_calls.values():
127
+ call_id = tc["call_id"]
128
+ name = tc["name"]
129
+ arguments_str = tc["arguments"]
130
+
131
+ prompt.add_tool_call(
132
+ name=name,
133
+ arguments=arguments_str,
134
+ call_id=call_id,
135
+ )
136
+
137
+ tool_output = cls._execute_tool(
138
+ agent, name, arguments_str, runtime_context
139
+ )
140
+
141
+ prompt.add_tool_output(call_id=call_id, output=tool_output)
142
+
143
+ all_items.append(
144
+ ToolCallOutputItem(
145
+ role="tool",
146
+ content=tool_output,
147
+ raw_item=ResponseFunctionToolCallOutput(
148
+ call_id=call_id,
149
+ output=tool_output,
150
+ name=name,
151
+ type="function_call_output",
152
+ status="completed",
153
+ ),
154
+ type="tool_call_output_item",
155
+ )
156
+ )
157
+ iteration += 1
158
+
159
+ raise RuntimeError(f"Agent exceeded max iterations ({agent.max_iterations})")
160
+
161
+ @classmethod
162
+ def run_streamed(
163
+ cls,
164
+ agent: Agent,
165
+ user_input: str,
166
+ prompt_template: PromptTemplate | None = None,
167
+ runtime_context: dict[str, Any] | None = None,
168
+ ) -> Iterator[RunResultStreaming]:
169
+ """Run the agent with streaming output.
170
+
171
+ This method streams response events from the agent as they occur,
172
+ including messages, tool calls, and tool outputs. It allows
173
+ real-time processing of the agent's reasoning and tool execution.
174
+
175
+ Args:
176
+ user_input: The input text from the user.
177
+ prompt_template: Optional template to initialize conversation history.
178
+ runtime_context: Optional runtime context dictionary to pass to tools.
179
+
180
+ Yields:
181
+ ``RunResultStreaming``: Streaming events containing the current input,
182
+ the event from the LLM or tool, and the accumulated final output.
183
+
184
+ Raises:
185
+ ValueError: If `user_input` is empty.
186
+ RuntimeError: If the agent exceeds `max_iterations`.
187
+ """
188
+ if not user_input:
189
+ raise ValueError("user_input cannot be empty")
190
+
191
+ prompt = cls._build_prompt(agent, user_input, prompt_template)
192
+
193
+ iteration = 0
194
+ while iteration < agent.max_iterations:
195
+ response_stream = agent.llm.chat(
196
+ messages=prompt,
197
+ stream=True,
198
+ tools=agent.tools,
199
+ tool_choice=agent.tool_choice,
200
+ parallel_tool_calls=agent.parallel_tool_calls,
201
+ )
202
+
203
+ tool_calls: dict[str, dict[str, Any]] = {}
204
+ final_output_text: str = ""
205
+
206
+ for event in response_stream:
207
+ yield RunResultStreaming(
208
+ input=user_input,
209
+ event=event,
210
+ final_output=final_output_text,
211
+ )
212
+
213
+ if event.type == "response.output_item.done":
214
+ if event.item.type == "message":
215
+ for content_part in event.item.content:
216
+ if content_part.type == "output_text":
217
+ final_output_text += content_part.text
218
+
219
+ elif event.item.type == "function_call":
220
+ tool_calls[event.item.id] = {
221
+ "call_id": event.item.call_id,
222
+ "name": event.item.name,
223
+ "arguments": event.item.arguments,
224
+ }
225
+
226
+ if not tool_calls:
227
+ return
228
+
229
+ if final_output_text:
230
+ prompt.add_assistant(final_output_text)
231
+
232
+ for tc in tool_calls.values():
233
+ call_id = tc["call_id"]
234
+ name = tc["name"]
235
+ arguments_str = tc["arguments"]
236
+
237
+ prompt.add_tool_call(
238
+ name=name, arguments=arguments_str, call_id=call_id
239
+ )
240
+
241
+ yield RunResultStreaming(
242
+ input=user_input,
243
+ event=ResponseFunctionCallOutputItemAddedEvent(
244
+ type="response.function_call_output_item.added",
245
+ item=ResponseFunctionToolCallOutput(
246
+ call_id=call_id,
247
+ output="",
248
+ name=name,
249
+ type="function_call_output",
250
+ status="in_progress",
251
+ ),
252
+ output_index=None,
253
+ sequence_number=None,
254
+ ),
255
+ final_output=final_output_text,
256
+ )
257
+
258
+ tool_output = cls._execute_tool(
259
+ agent, name, arguments_str, runtime_context
260
+ )
261
+
262
+ prompt.add_tool_output(call_id=call_id, output=tool_output)
263
+
264
+ yield RunResultStreaming(
265
+ input=user_input,
266
+ event=ResponseFunctionCallOutputItemDoneEvent(
267
+ type="response.function_call_output_item.done",
268
+ item=ResponseFunctionToolCallOutput(
269
+ call_id=call_id,
270
+ output=tool_output,
271
+ name=name,
272
+ type="function_call_output",
273
+ status="completed",
274
+ ),
275
+ output_index=None,
276
+ sequence_number=None,
277
+ ),
278
+ final_output=final_output_text,
279
+ )
280
+ iteration += 1
281
+
282
+ raise RuntimeError(f"Agent exceeded max iterations ({agent.max_iterations})")
283
+
284
+ @staticmethod
285
+ def _build_prompt(
286
+ agent: Agent,
287
+ user_input: str,
288
+ prompt_template: PromptTemplate | None = None,
289
+ ) -> PromptTemplate:
290
+ """Construct the conversation state for a new agent turn.
291
+
292
+ Args:
293
+ user_input: The user's input text.
294
+ prompt_template: Optional template to initialize conversation history.
295
+ If None, a new ``PromptTemplate`` is created, and the system prompt is added if available.
296
+
297
+ Returns:
298
+ ``PromptTemplate``: The fully constructed prompt containing system, user, and previous messages.
299
+ """
300
+ if prompt_template is not None:
301
+ prompt = prompt_template.copy()
302
+ else:
303
+ prompt = PromptTemplate()
304
+ if agent.system_prompt:
305
+ prompt.add_system(agent.system_prompt)
306
+
307
+ prompt.add_user(user_input)
308
+ return prompt
309
+
310
+ @staticmethod
311
+ def _execute_tool(
312
+ agent: Agent,
313
+ name: str,
314
+ arguments: str | dict[str, Any],
315
+ runtime_context: dict[str, Any] | None = None,
316
+ ) -> str:
317
+ """Execute a registered tool safely with provided arguments.
318
+
319
+ Handles parsing of arguments (from JSON string or dict) and catches execution errors.
320
+
321
+ Args:
322
+ name: The name of the tool to execute.
323
+ arguments: Arguments to pass to the tool, either as a JSON string or dict.
324
+ runtime_context: Optional runtime context to pass to tool arguments of type ``ToolRuntime``.
325
+
326
+ Returns:
327
+ str: The output of the tool execution, or an error message if execution fails.
328
+ """
329
+ tool = agent._tools.get(name)
330
+ if not tool:
331
+ return f"Error: Tool '{name}' not found"
332
+
333
+ try:
334
+ if isinstance(arguments, str):
335
+ args = json.loads(arguments)
336
+ else:
337
+ args = arguments
338
+
339
+ result = tool.execute(args, runtime_context)
340
+ return str(result)
341
+ except Exception as e:
342
+ return f"Error executing tool '{name}': {e}"
literun/tool.py CHANGED
@@ -3,71 +3,57 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import inspect
6
- from typing import Any, Dict, List, Optional, Callable, get_type_hints
6
+ from typing import Any, Callable, get_type_hints
7
+ from pydantic import BaseModel, ConfigDict
7
8
 
8
9
  from .args_schema import ArgsSchema
9
10
 
10
11
 
11
- class ToolRuntime:
12
+ class ToolRuntime(BaseModel):
12
13
  """Runtime context container for tools.
13
14
 
14
- This class stores arbitrary runtime values as attributes, using keyword
15
- arguments provided at initialization. It is typically injected into tool
16
- functions that declare a parameter annotated with ``ToolRuntime``.
15
+ This class corresponds to arbitrary runtime values passed via `runtime_context`
16
+ in `Agent.invoke()`. It allows extra arguments on initialization.
17
17
 
18
18
  Args:
19
19
  **kwargs: Arbitrary keyword arguments that will be set as attributes
20
20
  on the instance.
21
21
  """
22
22
 
23
- def __init__(self, **kwargs):
24
- for k, v in kwargs.items():
25
- setattr(self, k, v)
23
+ model_config = ConfigDict(extra="allow")
26
24
 
27
- def __repr__(self):
28
- return f"ToolRuntime({self.__dict__})"
29
25
 
30
-
31
- class Tool:
26
+ class Tool(BaseModel):
32
27
  """Represents a callable tool that can be invoked by an agent or LLM.
33
28
 
34
29
  A ``Tool`` wraps a Python callable along with metadata and an argument
35
30
  schema, and provides utilities for argument validation, execution,
36
31
  and conversion to the OpenAI tool definition format.
32
+
33
+ Args:
34
+ func: The function to execute when the tool is called.
35
+ name: The name of the tool.
36
+ description: A description of what the tool does.
37
+ args_schema: A list of arguments the tool accepts.
38
+ strict: If True, model output is guaranteed to exactly match the JSON Schema
39
+ provided in the function definition. If None, `strict` argument will not
40
+ be included in tool definition.
37
41
  """
38
42
 
39
- def __init__(
40
- self,
41
- *,
42
- func: Callable,
43
- name: str,
44
- description: str,
45
- args_schema: List[ArgsSchema],
46
- strict: Optional[bool] = None,
47
- ):
48
- """Initialize a Tool.
43
+ model_config = ConfigDict(arbitrary_types_allowed=True)
49
44
 
50
- Args:
51
- func: The function to execute when the tool is called.
52
- name: The name of the tool.
53
- description: A description of what the tool does.
54
- args_schema: A list of arguments the tool accepts.
55
- strict: If True, model output is guaranteed to exactly match the JSON Schema
56
- provided in the function definition. If None, `strict` argument will not
57
- be included in tool definition.
58
- """
59
- self.func = func
60
- self.name = name
61
- self.description = description
62
- self.args_schema = args_schema
63
- self.strict = strict
45
+ func: Callable
46
+ name: str
47
+ description: str
48
+ args_schema: list[ArgsSchema]
49
+ strict: bool | None = None
64
50
 
65
51
  # OpenAI schema
66
- def to_openai_tool(self) -> Dict[str, Any]:
52
+ def to_openai_tool(self) -> dict[str, Any]:
67
53
  """Convert the tool to the OpenAI tool schema format.
68
54
 
69
55
  Returns:
70
- Dict[str, Any]: The OpenAI-compatible tool definition.
56
+ dict[str, Any]: The OpenAI-compatible tool definition.
71
57
  """
72
58
  properties = {}
73
59
  required = []
@@ -90,14 +76,14 @@ class Tool:
90
76
  }
91
77
 
92
78
  # LLM Runtime argument handling
93
- def resolve_arguments(self, raw_args: Dict[str, Any]) -> Dict[str, Any]:
79
+ def resolve_arguments(self, raw_args: dict[str, Any]) -> dict[str, Any]:
94
80
  """Validate and cast raw arguments provided by the model.
95
81
 
96
82
  Args:
97
83
  raw_args: The raw argument dictionary produced by the model.
98
84
 
99
85
  Returns:
100
- Dict[str, Any]: A dictionary of validated and type-cast arguments.
86
+ dict[str, Any]: A dictionary of validated and type-cast arguments.
101
87
  """
102
88
  parsed = {}
103
89
  for arg in self.args_schema:
@@ -105,7 +91,9 @@ class Tool:
105
91
  return parsed
106
92
 
107
93
  def execute(
108
- self, args: Dict[str, Any], runtime_context: Optional[Dict[str, Any]] = None
94
+ self,
95
+ args: dict[str, Any],
96
+ runtime_context: dict[str, Any] | None = None,
109
97
  ) -> Any:
110
98
  """Execute the tool with validated arguments and runtime context.
111
99
 
@@ -0,0 +1,187 @@
1
+ Metadata-Version: 2.4
2
+ Name: literun
3
+ Version: 0.1.1
4
+ Summary: A Minimal agent runtime built on OpenAI Responses API
5
+ Project-URL: Homepage, https://github.com/kaustubh-tr/literun
6
+ Project-URL: Source, https://github.com/kaustubh-tr/literun
7
+ Project-URL: Issues, https://github.com/kaustubh-tr/literun/issues
8
+ Project-URL: Readme, https://github.com/kaustubh-tr/literun#readme
9
+ Project-URL: Documentation, https://github.com/kaustubh-tr/literun/blob/main/DOCS.md
10
+ Author-email: Kaustubh Trivedi <trivedikaustubh01@gmail.com>
11
+ License: MIT License
12
+
13
+ Copyright (c) 2026 Kaustubh Trivedi
14
+
15
+ Permission is hereby granted, free of charge, to any person obtaining a copy
16
+ of this software and associated documentation files (the "Software"), to deal
17
+ in the Software without restriction, including without limitation the rights
18
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
19
+ copies of the Software, and to permit persons to whom the Software is
20
+ furnished to do so, subject to the following conditions:
21
+
22
+ The above copyright notice and this permission notice shall be included in all
23
+ copies or substantial portions of the Software.
24
+
25
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
30
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
+ SOFTWARE.
32
+ License-File: LICENSE
33
+ Requires-Python: <4.0.0,>=3.10.0
34
+ Requires-Dist: openai<3.0.0,>=2.11.0
35
+ Requires-Dist: pydantic<3.0.0,>=2.12.0
36
+ Provides-Extra: dev
37
+ Requires-Dist: pytest<10.0.0,>=9.0.0; extra == 'dev'
38
+ Description-Content-Type: text/markdown
39
+
40
+ # LiteRun 🚀
41
+
42
+ [![PyPI - Version](https://img.shields.io/pypi/v/literun)](https://pypi.org/project/literun/)
43
+ [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/literun)](https://pypi.org/project/literun/)
44
+ [![PyPI - License](https://img.shields.io/pypi/l/literun)](https://opensource.org/licenses/MIT)
45
+ [![Documentation](https://img.shields.io/badge/docs-DOCS-blue)](https://github.com/kaustubh-tr/literun/blob/main/DOCS.md)
46
+
47
+ A lightweight, flexible Python framework for building custom OpenAI agents (Responses API) with tool support and structured prompt management.
48
+
49
+ ## Features
50
+
51
+ - **Custom Agent Execution**: Control the loop with synchronous and streaming support.
52
+ - **Tool Support**: Easy registration with Pydantic-powered validation.
53
+ - **Type Safety**: Built for modern Python 3.10+ environments.
54
+ - **Prompt Templates**: Structured message management.
55
+ - **Event-Driven**: Granular control via a rich event system.
56
+
57
+ For detailed documentation on Architecture, Streaming, and Advanced Configuration, see [DOCS.md](https://github.com/kaustubh-tr/literun/blob/main/DOCS.md).
58
+
59
+ ## Requirements
60
+
61
+ - Python 3.10+
62
+
63
+ > **Note**: Core dependencies like `openai` and `pydantic` are automatically installed when you install `literun`.
64
+
65
+ ## Installation
66
+
67
+ You can install `literun` directly from PyPI:
68
+
69
+ ```bash
70
+ pip install literun
71
+ ```
72
+
73
+ ## Quick Start
74
+
75
+ ### Basic Agent
76
+
77
+ Here is a simple example of how to create an agent with a custom tool.
78
+
79
+ ```python
80
+ import os
81
+ from literun import Agent, ChatOpenAI, Tool, ArgsSchema
82
+
83
+ # 1. Define a tool function
84
+ def get_weather(location: str, unit: str = "celsius") -> str:
85
+ return f"The weather in {location} is 25 degrees {unit}."
86
+
87
+ # 2. Wrap it with Tool schema
88
+ weather_tool = Tool(
89
+ func=get_weather,
90
+ name="get_weather",
91
+ description="Get the weather for a location",
92
+ args_schema=[
93
+ ArgsSchema(
94
+ name="location",
95
+ type=str,
96
+ description="The city and state, e.g. San Francisco, CA",
97
+ ),
98
+ ArgsSchema(
99
+ name="unit",
100
+ type=str,
101
+ description="The unit of temperature",
102
+ enum=["celsius", "fahrenheit"],
103
+ ),
104
+ ],
105
+ )
106
+
107
+ # 3. Initialize Agent
108
+ agent = Agent(
109
+ llm=ChatOpenAI(model="gpt-4.1-mini", temperature=0.7),
110
+ system_prompt="You are a helpful assistant.",
111
+ tools=[weather_tool],
112
+ )
113
+
114
+ # 4. Run the Agent
115
+ result = agent.invoke(user_input="What is the weather in Tokyo?")
116
+ print(f"Final Answer: {result.final_output}")
117
+ ```
118
+
119
+ ### Advanced Usage
120
+
121
+ LiteRun supports **Streaming**, **Runtime Context Injection** (for secrets), and **Direct LLM Usage**.
122
+
123
+ 👉 Check out the [Documentation](https://github.com/kaustubh-tr/literun/blob/main/DOCS.md) and [Examples](https://github.com/kaustubh-tr/literun/blob/main/examples/) for more details.
124
+
125
+ ## Project Structure
126
+
127
+ ```text
128
+ literun/
129
+ ├── src/
130
+ │ └── literun/ # Main package source
131
+ │ ├── agent.py # Agent orchestrator
132
+ │ ├── llm.py # ChatOpenAI wrapper
133
+ │ ├── tool.py # Tool & Schema definitions
134
+ │ └── ...
135
+ ├── tests/ # Unit tests (agent, llm, tools, prompts)
136
+ ├── examples/ # Runnable examples
137
+ ├── DOCS.md # Detailed documentation
138
+ ├── LICENSE # MIT License
139
+ ├── README.md # This file
140
+ └── pyproject.toml # Project configuration & dependencies
141
+ ```
142
+
143
+ ## Contributing
144
+
145
+ We welcome contributions! Please follow these steps to set up your development environment:
146
+
147
+ 1. **Fork** the repository and clone it locally:
148
+
149
+ ```bash
150
+ git clone https://github.com/kaustubh-tr/literun.git
151
+ cd literun
152
+ ```
153
+
154
+ 2. **Install** in editable mode with development dependencies:
155
+
156
+ ```bash
157
+ pip install -e .[dev]
158
+ ```
159
+
160
+ 3. **Create** a feature branch and make your changes.
161
+
162
+ 4. **Test** your changes (see below).
163
+
164
+ 5. **Submit** a pull request.
165
+
166
+ ## Testing
167
+
168
+ This project uses `pytest` as the primary test runner, but supports `unittest` as well.
169
+
170
+ ```bash
171
+ # Run all tests
172
+ pytest
173
+ ```
174
+
175
+ or using unittest:
176
+
177
+ ```bash
178
+ python -m unittest discover tests
179
+ ```
180
+
181
+ > **Note**: Some integration tests require the `OPENAI_API_KEY` environment variable. They are automatically skipped if it is missing.
182
+
183
+ ## License
184
+
185
+ Copyright (c) 2026 Kaustubh Trivedi.
186
+
187
+ Distributed under the terms of the [MIT](https://github.com/kaustubh-tr/literun/blob/main/LICENSE) license, LiteRun is free and open source software.
@@ -0,0 +1,17 @@
1
+ literun/__init__.py,sha256=ojY1q7jAS0tXQ-qhkyKzVG3MTKSaaGZSuoo0liuuZzs,707
2
+ literun/agent.py,sha256=NjP8g3kNriPJ0vFF8jB5NuRjxgfqrErU1L7zIiRkmuY,5192
3
+ literun/args_schema.py,sha256=rkn_mvC1HnFUtV2r6-PigQdvo3SWIQDUB23uIrv_vsw,2486
4
+ literun/constants.py,sha256=K-5f6Rhbkqb89HV2paB56nOsWgb_YI95oZM0eU5Vmus,688
5
+ literun/events.py,sha256=F_p9Dp4FCY4HPdjkSfPk_C5fbGDGmwEf6NO4KRsfCww,3624
6
+ literun/items.py,sha256=PM8zFrEv-5sncwZOU7ruhN1dVX-GjcVx21ivsKk8V08,2894
7
+ literun/llm.py,sha256=WxDhVtSWxuhwQ2Icl2tfjkyMvnyXT9BjTpEGwwyGxlI,8639
8
+ literun/prompt_message.py,sha256=zV56UB7FvxGqECRwKkqY9fh4ihBoG9t86ClmfZpB6-w,4594
9
+ literun/prompt_template.py,sha256=1pmXbvOKDydQgOegLPwSa2WwPtbWthY3jsVXZPsRWDg,4995
10
+ literun/results.py,sha256=YHa5rXh5YkSB6mEVBG6aJOZjYOp6WtdDqh5ugpbK2nE,1321
11
+ literun/runner.py,sha256=rkLr1fYJY5SMdQR8uZ1NQ9jCGzawJzP-MJYxgBDrcZs,12124
12
+ literun/tool.py,sha256=dzNJ3JwVu7X9u34eb2GC047wMDetqcle_4thCUjZe6I,4659
13
+ literun/utils.py,sha256=4r9P7u46KzuG3eNZq8kfuEWJxpc4b8T26nrzjW7_Hec,2261
14
+ literun-0.1.1.dist-info/METADATA,sha256=7hFvFpyDWMW5nqaClX8FJ2MpANcLBRXNNfJUgfGa--o,6488
15
+ literun-0.1.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
16
+ literun-0.1.1.dist-info/licenses/LICENSE,sha256=sJlY4ztFUqGGojhTNtL2UbhQeSZF3B4V1dAtzGfMHOE,1073
17
+ literun-0.1.1.dist-info/RECORD,,
@@ -1,5 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.10.2)
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
-