pydantic-ai-rlm 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,9 @@
1
1
  from .agent import create_rlm_agent, run_rlm_analysis, run_rlm_analysis_sync
2
2
  from .dependencies import ContextType, RLMConfig, RLMDependencies
3
3
  from .logging import configure_logging
4
+ from .models import GroundedResponse
4
5
  from .prompts import (
6
+ GROUNDING_INSTRUCTIONS,
5
7
  LLM_QUERY_INSTRUCTIONS,
6
8
  RLM_INSTRUCTIONS,
7
9
  build_rlm_instructions,
@@ -13,9 +15,11 @@ from .toolset import (
13
15
  )
14
16
 
15
17
  __all__ = [
18
+ "GROUNDING_INSTRUCTIONS",
16
19
  "LLM_QUERY_INSTRUCTIONS",
17
20
  "RLM_INSTRUCTIONS",
18
21
  "ContextType",
22
+ "GroundedResponse",
19
23
  "REPLEnvironment",
20
24
  "REPLResult",
21
25
  "RLMConfig",
pydantic_ai_rlm/agent.py CHANGED
@@ -1,21 +1,45 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Any
3
+ from typing import Any, Literal, overload
4
4
 
5
5
  from pydantic_ai import Agent, UsageLimits
6
6
 
7
7
  from .dependencies import ContextType, RLMConfig, RLMDependencies
8
+ from .models import GroundedResponse
8
9
  from .prompts import build_rlm_instructions
9
10
  from .toolset import create_rlm_toolset
10
11
 
11
12
 
13
+ @overload
12
14
  def create_rlm_agent(
13
15
  model: str = "openai:gpt-5",
14
16
  sub_model: str | None = None,
15
17
  code_timeout: float = 60.0,
16
- include_example_instructions: bool = True,
17
18
  custom_instructions: str | None = None,
18
- ) -> Agent[RLMDependencies, str]:
19
+ *,
20
+ grounded: Literal[False] = False,
21
+ ) -> Agent[RLMDependencies, str]: ...
22
+
23
+
24
+ @overload
25
+ def create_rlm_agent(
26
+ model: str = "openai:gpt-5",
27
+ sub_model: str | None = None,
28
+ code_timeout: float = 60.0,
29
+ custom_instructions: str | None = None,
30
+ *,
31
+ grounded: Literal[True],
32
+ ) -> Agent[RLMDependencies, GroundedResponse]: ...
33
+
34
+
35
+ def create_rlm_agent(
36
+ model: str = "openai:gpt-5",
37
+ sub_model: str | None = None,
38
+ code_timeout: float = 60.0,
39
+ custom_instructions: str | None = None,
40
+ *,
41
+ grounded: bool = False,
42
+ ) -> Agent[RLMDependencies, str] | Agent[RLMDependencies, GroundedResponse]:
19
43
  """
20
44
  Create a Pydantic AI agent with REPL code execution capabilities.
21
45
 
@@ -26,11 +50,12 @@ def create_rlm_agent(
26
50
  available in the REPL, allowing the agent to delegate sub-queries.
27
51
  Example: "openai:gpt-5-mini" or "anthropic:claude-3-haiku-20240307"
28
52
  code_timeout: Timeout for code execution in seconds
29
- include_example_instructions: Include detailed examples in instructions
30
53
  custom_instructions: Additional instructions to append
54
+ grounded: If True, return a GroundedResponse with citation markers
31
55
 
32
56
  Returns:
33
- Configured Agent instance
57
+ Configured Agent instance. Returns Agent[RLMDependencies, GroundedResponse]
58
+ when grounded=True, otherwise Agent[RLMDependencies, str].
34
59
 
35
60
  Example:
36
61
  ```python
@@ -48,19 +73,28 @@ def create_rlm_agent(
48
73
  )
49
74
  result = await agent.run("What are the main themes?", deps=deps)
50
75
  print(result.output)
76
+
77
+ # Create grounded agent
78
+ grounded_agent = create_rlm_agent(model="openai:gpt-5", grounded=True)
79
+ result = await grounded_agent.run("What happened?", deps=deps)
80
+ print(result.output.info) # Response with [N] markers
81
+ print(result.output.grounding) # {"1": "exact quote", ...}
51
82
  ```
52
83
  """
53
84
  toolset = create_rlm_toolset(code_timeout=code_timeout, sub_model=sub_model)
54
85
 
55
86
  instructions = build_rlm_instructions(
56
87
  include_llm_query=sub_model is not None,
88
+ include_grounding=grounded,
57
89
  custom_suffix=custom_instructions,
58
90
  )
59
91
 
60
- agent: Agent[RLMDependencies, str] = Agent(
92
+ output_type: type[str] | type[GroundedResponse] = GroundedResponse if grounded else str
93
+
94
+ agent: Agent[RLMDependencies, Any] = Agent(
61
95
  model,
62
96
  deps_type=RLMDependencies,
63
- output_type=str,
97
+ output_type=output_type,
64
98
  toolsets=[toolset],
65
99
  instructions=instructions,
66
100
  )
@@ -68,6 +102,34 @@ def create_rlm_agent(
68
102
  return agent
69
103
 
70
104
 
105
+ @overload
106
+ async def run_rlm_analysis(
107
+ context: ContextType,
108
+ query: str,
109
+ model: str = "openai:gpt-5",
110
+ sub_model: str | None = None,
111
+ config: RLMConfig | None = None,
112
+ max_tool_calls: int = 50,
113
+ *,
114
+ grounded: Literal[False] = False,
115
+ **agent_kwargs: Any,
116
+ ) -> str: ...
117
+
118
+
119
+ @overload
120
+ async def run_rlm_analysis(
121
+ context: ContextType,
122
+ query: str,
123
+ model: str = "openai:gpt-5",
124
+ sub_model: str | None = None,
125
+ config: RLMConfig | None = None,
126
+ max_tool_calls: int = 50,
127
+ *,
128
+ grounded: Literal[True],
129
+ **agent_kwargs: Any,
130
+ ) -> GroundedResponse: ...
131
+
132
+
71
133
  async def run_rlm_analysis(
72
134
  context: ContextType,
73
135
  query: str,
@@ -75,8 +137,10 @@ async def run_rlm_analysis(
75
137
  sub_model: str | None = None,
76
138
  config: RLMConfig | None = None,
77
139
  max_tool_calls: int = 50,
140
+ *,
141
+ grounded: bool = False,
78
142
  **agent_kwargs: Any,
79
- ) -> str:
143
+ ) -> str | GroundedResponse:
80
144
  """
81
145
  Convenience function to run RLM analysis on a context.
82
146
 
@@ -89,25 +153,36 @@ async def run_rlm_analysis(
89
153
  available in the REPL, allowing the agent to delegate sub-queries.
90
154
  config: Optional RLMConfig for customization
91
155
  max_tool_calls: Maximum tool calls allowed
156
+ grounded: If True, return a GroundedResponse with citation markers
92
157
  **agent_kwargs: Additional arguments passed to create_rlm_agent()
93
158
 
94
159
  Returns:
95
- The agent's final answer as a string
160
+ The agent's final answer. Returns GroundedResponse when grounded=True,
161
+ otherwise returns str.
96
162
 
97
163
  Example:
98
164
  ```python
99
165
  from pydantic_ai_rlm import run_rlm_analysis
100
166
 
101
- # With sub-model for llm_query
167
+ # Standard string response
102
168
  answer = await run_rlm_analysis(
103
169
  context=huge_document,
104
170
  query="Find the magic number hidden in the text",
105
171
  sub_model="openai:gpt-5-mini",
106
172
  )
107
173
  print(answer)
174
+
175
+ # Grounded response with citations
176
+ result = await run_rlm_analysis(
177
+ context=document,
178
+ query="What was the revenue change?",
179
+ grounded=True,
180
+ )
181
+ print(result.info) # "Revenue grew [1]..."
182
+ print(result.grounding) # {"1": "increased by 45%", ...}
108
183
  ```
109
184
  """
110
- agent = create_rlm_agent(model=model, sub_model=sub_model, **agent_kwargs)
185
+ agent = create_rlm_agent(model=model, sub_model=sub_model, grounded=grounded, **agent_kwargs)
111
186
 
112
187
  effective_config = config or RLMConfig()
113
188
  if sub_model and not effective_config.sub_model:
@@ -127,6 +202,7 @@ async def run_rlm_analysis(
127
202
  return result.output
128
203
 
129
204
 
205
+ @overload
130
206
  def run_rlm_analysis_sync(
131
207
  context: ContextType,
132
208
  query: str,
@@ -134,14 +210,63 @@ def run_rlm_analysis_sync(
134
210
  sub_model: str | None = None,
135
211
  config: RLMConfig | None = None,
136
212
  max_tool_calls: int = 50,
213
+ *,
214
+ grounded: Literal[False] = False,
137
215
  **agent_kwargs: Any,
138
- ) -> str:
216
+ ) -> str: ...
217
+
218
+
219
+ @overload
220
+ def run_rlm_analysis_sync(
221
+ context: ContextType,
222
+ query: str,
223
+ model: str = "openai:gpt-5",
224
+ sub_model: str | None = None,
225
+ config: RLMConfig | None = None,
226
+ max_tool_calls: int = 50,
227
+ *,
228
+ grounded: Literal[True],
229
+ **agent_kwargs: Any,
230
+ ) -> GroundedResponse: ...
231
+
232
+
233
+ def run_rlm_analysis_sync(
234
+ context: ContextType,
235
+ query: str,
236
+ model: str = "openai:gpt-5",
237
+ sub_model: str | None = None,
238
+ config: RLMConfig | None = None,
239
+ max_tool_calls: int = 50,
240
+ *,
241
+ grounded: bool = False,
242
+ **agent_kwargs: Any,
243
+ ) -> str | GroundedResponse:
139
244
  """
140
245
  Synchronous version of run_rlm_analysis.
141
246
 
142
247
  See run_rlm_analysis() for full documentation.
248
+
249
+ Example:
250
+ ```python
251
+ from pydantic_ai_rlm import run_rlm_analysis_sync
252
+
253
+ # Standard string response
254
+ answer = run_rlm_analysis_sync(
255
+ context=document,
256
+ query="What happened?",
257
+ )
258
+
259
+ # Grounded response with citations
260
+ result = run_rlm_analysis_sync(
261
+ context=document,
262
+ query="What was the revenue change?",
263
+ grounded=True,
264
+ )
265
+ print(result.info) # "Revenue grew [1]..."
266
+ print(result.grounding) # {"1": "increased by 45%", ...}
267
+ ```
143
268
  """
144
- agent = create_rlm_agent(model=model, sub_model=sub_model, **agent_kwargs)
269
+ agent = create_rlm_agent(model=model, sub_model=sub_model, grounded=grounded, **agent_kwargs)
145
270
 
146
271
  effective_config = config or RLMConfig()
147
272
  if sub_model and not effective_config.sub_model:
@@ -47,7 +47,7 @@ class RLMLogger:
47
47
  )
48
48
  self.console.print(panel)
49
49
  else:
50
- print(f"\n{'='*50}")
50
+ print(f"\n{'=' * 50}")
51
51
  print("CODE EXECUTION")
52
52
  print("=" * 50)
53
53
  print(code)
@@ -141,7 +141,7 @@ class RLMLogger:
141
141
  def _log_result_plain(self, result: REPLResult) -> None:
142
142
  """Log result using plain text."""
143
143
  status = "SUCCESS" if result.success else "ERROR"
144
- print(f"\n{'='*50}")
144
+ print(f"\n{'=' * 50}")
145
145
  print(f"RESULT: {status} (executed in {result.execution_time:.3f}s)")
146
146
  print("=" * 50)
147
147
 
@@ -159,11 +159,7 @@ class RLMLogger:
159
159
  stderr = stderr[:1000] + "\n... (truncated)"
160
160
  print(stderr)
161
161
 
162
- user_vars = {
163
- k: v
164
- for k, v in result.locals.items()
165
- if not k.startswith("_") and k not in ("context", "json", "re", "os")
166
- }
162
+ user_vars = {k: v for k, v in result.locals.items() if not k.startswith("_") and k not in ("context", "json", "re", "os")}
167
163
  if user_vars:
168
164
  print("\nVariables:")
169
165
  for name, value in list(user_vars.items())[:10]:
@@ -198,7 +194,7 @@ class RLMLogger:
198
194
  )
199
195
  self.console.print(panel)
200
196
  else:
201
- print(f"\n{'='*50}")
197
+ print(f"\n{'=' * 50}")
202
198
  print("LLM QUERY")
203
199
  print("=" * 50)
204
200
  display_prompt = prompt
@@ -226,7 +222,7 @@ class RLMLogger:
226
222
  )
227
223
  self.console.print(panel)
228
224
  else:
229
- print(f"\n{'='*50}")
225
+ print(f"\n{'=' * 50}")
230
226
  print("LLM RESPONSE")
231
227
  print("=" * 50)
232
228
  display_response = response
@@ -0,0 +1,23 @@
1
+ """Pydantic models for structured RLM outputs."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from pydantic import BaseModel, Field
6
+
7
+
8
+ class GroundedResponse(BaseModel):
9
+ """A response with citation markers mapping to exact quotes from source documents.
10
+
11
+ Example:
12
+ ```python
13
+ response = GroundedResponse(
14
+ info="Revenue grew [1] driven by expansion [2]", grounding={"1": "increased by 45%", "2": "new markets in Asia"}
15
+ )
16
+ ```
17
+ """
18
+
19
+ info: str = Field(description="Response text with citation markers like [1]")
20
+ grounding: dict[str, str] = Field(
21
+ default_factory=dict,
22
+ description="Mapping from citation markers to exact quotes from the source",
23
+ )
@@ -50,6 +50,61 @@ print(f"Final answer: {results}")
50
50
  4. **Be thorough** - For needle-in-haystack, search the entire context
51
51
  """
52
52
 
53
+ GROUNDING_INSTRUCTIONS = """
54
+
55
+ ## Grounding Requirements
56
+
57
+ Your response MUST include grounded citations. This means:
58
+
59
+ 1. **Citation Format**: Use markers like `[1]`, `[2]`, etc. in your response text
60
+ 2. **Exact Quotes**: Each marker must map to an EXACT quote from the source context (verbatim, no paraphrasing)
61
+ 3. **Quote Length**: Each quote should be 10-200 characters - enough to be meaningful but not too long
62
+ 4. **Consecutive Numbering**: Number citations consecutively starting from 1
63
+
64
+ ### Output Format
65
+
66
+ Your final answer must be valid JSON with this structure:
67
+ ```json
68
+ {
69
+ "info": "The document states that X Y Z [1]. Additionally, A B C [2]",
70
+ "grounding": {
71
+ "1": "exact quote from source",
72
+ "2": "another exact quote from source"
73
+ }
74
+ }
75
+ ```
76
+
77
+ ### Example
78
+
79
+ If the context contains: "The company's revenue increased by 45% in Q3 2024, driven by expansion into new markets in Asia."
80
+
81
+ Your response should look like:
82
+ ```json
83
+ {
84
+ "info": "Revenue showed strong growth [1] with geographic expansion being a key driver [2].",
85
+ "grounding": {
86
+ "1": "revenue increased by 45% in Q3 2024",
87
+ "2": "driven by expansion into new markets in Asia"
88
+ }
89
+ }
90
+ ```
91
+
92
+ ### Finding Quotes in Code
93
+
94
+ Use this approach to find and verify exact quotes:
95
+ ```python
96
+ # Find a specific phrase in context
97
+ search_term = "revenue"
98
+ idx = context.lower().find(search_term)
99
+ if idx != -1:
100
+ # Extract surrounding context for the quote
101
+ quote = context[max(0, idx):idx+100]
102
+ print(f"Found: {quote}")
103
+ ```
104
+
105
+ **Important**: Every citation marker in your `info` field MUST have a corresponding entry in `grounding`. Only output the JSON object, no additional text.
106
+ """
107
+
53
108
  LLM_QUERY_INSTRUCTIONS = """
54
109
 
55
110
  ## Sub-LLM Queries
@@ -93,14 +148,15 @@ print(result)
93
148
 
94
149
  def build_rlm_instructions(
95
150
  include_llm_query: bool = False,
151
+ include_grounding: bool = False,
96
152
  custom_suffix: str | None = None,
97
153
  ) -> str:
98
154
  """
99
155
  Build RLM instructions with optional customization.
100
156
 
101
157
  Args:
102
- include_examples: Whether to include detailed examples
103
158
  include_llm_query: Whether to include llm_query() documentation
159
+ include_grounding: Whether to include grounding/citation instructions
104
160
  custom_suffix: Additional instructions to append
105
161
 
106
162
  Returns:
@@ -109,8 +165,10 @@ def build_rlm_instructions(
109
165
  base = RLM_INSTRUCTIONS
110
166
 
111
167
  if include_llm_query:
112
- llm_docs = LLM_QUERY_INSTRUCTIONS
113
- base = f"{base}{llm_docs}"
168
+ base = f"{base}{LLM_QUERY_INSTRUCTIONS}"
169
+
170
+ if include_grounding:
171
+ base = f"{base}{GROUNDING_INSTRUCTIONS}"
114
172
 
115
173
  if custom_suffix:
116
174
  base = f"{base}\n\n## Additional Instructions\n\n{custom_suffix}"
pydantic_ai_rlm/repl.py CHANGED
@@ -7,6 +7,7 @@ import os
7
7
  import shutil
8
8
  import sys
9
9
  import tempfile
10
+ import textwrap
10
11
  import threading
11
12
  import time
12
13
  from contextlib import contextmanager
@@ -328,6 +329,9 @@ with open(r'{context_path}', 'r', encoding='utf-8') as f:
328
329
  Returns:
329
330
  REPLResult with stdout, stderr, locals, and timing
330
331
  """
332
+ # Normalize code: remove common leading whitespace and strip
333
+ code = textwrap.dedent(code).strip()
334
+
331
335
  start_time = time.time()
332
336
  success = True
333
337
  stdout_content = ""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-rlm
3
- Version: 0.1.0
3
+ Version: 0.1.2
4
4
  Summary: Recursive Language Model (RLM) toolset for Pydantic AI - handle extremely large contexts
5
5
  Author: Pydantic AI RLM Contributors
6
6
  License-Expression: MIT
@@ -16,7 +16,7 @@ Classifier: Programming Language :: Python :: 3.12
16
16
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
17
  Classifier: Typing :: Typed
18
18
  Requires-Python: >=3.10
19
- Requires-Dist: pydantic-ai>=0.1.0
19
+ Requires-Dist: pydantic-ai-slim[cli]>=0.1.0
20
20
  Provides-Extra: dev
21
21
  Requires-Dist: mypy>=1.0; extra == 'dev'
22
22
  Requires-Dist: pytest-asyncio>=0.21; extra == 'dev'
@@ -34,6 +34,7 @@ Description-Content-Type: text/markdown
34
34
 
35
35
  <p align="center">
36
36
  <a href="https://github.com/vstorm-co/pydantic-ai-rlm">GitHub</a> •
37
+ <a href="https://pypi.org/project/pydantic-ai-rlm/">PyPI</a> •
37
38
  <a href="https://github.com/vstorm-co/pydantic-ai-rlm#examples">Examples</a>
38
39
  </p>
39
40
 
@@ -41,6 +42,7 @@ Description-Content-Type: text/markdown
41
42
  <a href="https://www.python.org/downloads/"><img src="https://img.shields.io/badge/python-3.10+-blue.svg" alt="Python 3.10+"></a>
42
43
  <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
43
44
  <a href="https://github.com/pydantic/pydantic-ai"><img src="https://img.shields.io/badge/Powered%20by-Pydantic%20AI-E92063?logo=pydantic&logoColor=white" alt="Pydantic AI"></a>
45
+ <a href="https://pypi.org/project/pydantic-ai-rlm/"><img src="https://img.shields.io/pypi/v/pydantic-ai-rlm.svg" alt="PyPI version"></a>
44
46
  </p>
45
47
 
46
48
  <p align="center">
@@ -50,6 +52,8 @@ Description-Content-Type: text/markdown
50
52
  &nbsp;•&nbsp;
51
53
  <b>Sub-Model Delegation</b>
52
54
  &nbsp;•&nbsp;
55
+ <b>Grounded Citations</b>
56
+ &nbsp;•&nbsp;
53
57
  <b>Fully Type-Safe</b>
54
58
  </p>
55
59
 
@@ -201,6 +205,30 @@ result = await agent.run(
201
205
  )
202
206
  ```
203
207
 
208
+ ### Grounded Responses with Citations
209
+
210
+ Get answers with traceable citations back to the source:
211
+
212
+ ```python
213
+ from pydantic_ai_rlm import run_rlm_analysis
214
+
215
+ # Enable grounding for citation tracking
216
+ result = await run_rlm_analysis(
217
+ context=financial_report,
218
+ query="What were the key revenue changes?",
219
+ model="openai:gpt-5",
220
+ grounded=True, # Returns GroundedResponse instead of str
221
+ )
222
+
223
+ # Response contains citation markers
224
+ print(result.info)
225
+ # "Revenue increased [1] primarily due to [2]"
226
+
227
+ # Grounding maps markers to exact quotes from the source
228
+ print(result.grounding)
229
+ # {"1": "by 45% year-over-year", "2": "expansion into Asian markets"}
230
+ ```
231
+
204
232
  ---
205
233
 
206
234
  ## API Reference
@@ -215,6 +243,7 @@ agent = create_rlm_agent(
215
243
  sub_model="openai:gpt-5-mini", # Model for llm_query() (optional)
216
244
  code_timeout=60.0, # Timeout for code execution
217
245
  custom_instructions="...", # Additional instructions
246
+ grounded=True, # Return GroundedResponse with citations
218
247
  )
219
248
  ```
220
249
 
@@ -239,6 +268,11 @@ answer = await run_rlm_analysis(context, query, model="openai:gpt-5")
239
268
 
240
269
  # Sync
241
270
  answer = run_rlm_analysis_sync(context, query, model="openai:gpt-5")
271
+
272
+ # With grounding (returns GroundedResponse)
273
+ result = await run_rlm_analysis(context, query, grounded=True)
274
+ print(result.info) # Text with [N] markers
275
+ print(result.grounding) # {"1": "exact quote", ...}
242
276
  ```
243
277
 
244
278
  ### `RLMDependencies`
@@ -278,16 +312,19 @@ configure_logging(enabled=False)
278
312
  ```
279
313
 
280
314
  Install with rich logging support for syntax highlighting and styled output:
315
+
281
316
  ```bash
282
317
  pip install pydantic-ai-rlm[logging]
283
318
  ```
284
319
 
285
320
  Or install rich separately:
321
+
286
322
  ```bash
287
323
  pip install rich
288
324
  ```
289
325
 
290
326
  When enabled, you'll see:
327
+
291
328
  - Syntax-highlighted code being executed (with rich)
292
329
  - Execution results with status indicators (SUCCESS/ERROR)
293
330
  - Execution time for each code block
@@ -0,0 +1,14 @@
1
+ pydantic_ai_rlm/__init__.py,sha256=iU_ToTPYqbgW3at4DI5fUVqcvsXvXtt2m61x-z3kVYM,913
2
+ pydantic_ai_rlm/agent.py,sha256=9wZdl1POW6B9LOf4IquFCrHt34gBPGEw2Z9631GMmXQ,8379
3
+ pydantic_ai_rlm/dependencies.py,sha256=bzPXhCRJ-0D5N0FS32LAETXxp6cbInU00qaMr0qd_iQ,1306
4
+ pydantic_ai_rlm/logging.py,sha256=6AzSXBve5pRCfL1pGT5KkWwOZLvZGBYxS-USwieE6lY,9270
5
+ pydantic_ai_rlm/models.py,sha256=9VMVybNBXU3kGZPzg6LPmxTN9qBhVFl7qY5RVd9Ch7o,716
6
+ pydantic_ai_rlm/prompts.py,sha256=vof8tYJWG4ChLR5cEJeB8x0oeODdqyW7yoHPgS0veIY,5415
7
+ pydantic_ai_rlm/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ pydantic_ai_rlm/repl.py,sha256=9Bja7NJg5qObKwxvBOVKeGEOwDWgTNS2X2259r5tHgA,14995
9
+ pydantic_ai_rlm/toolset.py,sha256=g1244qq2VzaQVhAQixLGPCWW7vJqQfjhUgf0kDxkkzo,5564
10
+ pydantic_ai_rlm/utils.py,sha256=_tqdMcD-_UbRbdK6ECD-XlXfRHgeAKj7ulLaXrYOJx4,1500
11
+ pydantic_ai_rlm-0.1.2.dist-info/METADATA,sha256=ntFi4yQHtRnc75ySjxg8jfpNw1CRNDw7dKsWiDyTirQ,12822
12
+ pydantic_ai_rlm-0.1.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
13
+ pydantic_ai_rlm-0.1.2.dist-info/licenses/LICENSE,sha256=ktSo7DxQBRIZIhD4P53iVxq88CG3cvXRTBRqPT8zO1c,1085
14
+ pydantic_ai_rlm-0.1.2.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- pydantic_ai_rlm/__init__.py,sha256=94ev1VspkrVdB-PabCD6I791vZkBVASXv_qqneYQQ0I,794
2
- pydantic_ai_rlm/agent.py,sha256=_zHBLatfhquGw3vjHC06b0lBZRpLv2nrxi7w69f7lBY,4824
3
- pydantic_ai_rlm/dependencies.py,sha256=bzPXhCRJ-0D5N0FS32LAETXxp6cbInU00qaMr0qd_iQ,1306
4
- pydantic_ai_rlm/logging.py,sha256=DXJzfrfOnyR3VAbKF4_0Mc2MP7zqPmxyFDph4Q9kAb0,9308
5
- pydantic_ai_rlm/prompts.py,sha256=BpbugghZ_I_IjC_Il5pUClMG7bgWfzWgtsUsxwyjH10,3648
6
- pydantic_ai_rlm/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- pydantic_ai_rlm/repl.py,sha256=twn6ArhaONYg49Feoi-sD6z0HkYHyHHc-prdBCsj0jo,14864
8
- pydantic_ai_rlm/toolset.py,sha256=g1244qq2VzaQVhAQixLGPCWW7vJqQfjhUgf0kDxkkzo,5564
9
- pydantic_ai_rlm/utils.py,sha256=_tqdMcD-_UbRbdK6ECD-XlXfRHgeAKj7ulLaXrYOJx4,1500
10
- pydantic_ai_rlm-0.1.0.dist-info/METADATA,sha256=2w1yJoJGl5HHCWcl6JRz5pDys0o4iBTLcWSsXuGt8nk,11613
11
- pydantic_ai_rlm-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
12
- pydantic_ai_rlm-0.1.0.dist-info/licenses/LICENSE,sha256=ktSo7DxQBRIZIhD4P53iVxq88CG3cvXRTBRqPT8zO1c,1085
13
- pydantic_ai_rlm-0.1.0.dist-info/RECORD,,