pydantic-ai-rlm 0.1.0__tar.gz → 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. pydantic_ai_rlm-0.1.1/.github/workflows/publish.yml +44 -0
  2. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/PKG-INFO +7 -2
  3. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/README.md +5 -0
  4. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/examples/needle_in_haystack.py +2 -4
  5. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/examples/semantic_search.py +3 -8
  6. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/pyproject.toml +2 -2
  7. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/src/pydantic_ai_rlm/logging.py +5 -9
  8. pydantic_ai_rlm-0.1.0/.logfire/.gitignore +0 -1
  9. pydantic_ai_rlm-0.1.0/.logfire/logfire_credentials.json +0 -6
  10. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/.gitignore +0 -0
  11. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/.pre-commit-config.yaml +0 -0
  12. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/LICENSE +0 -0
  13. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/src/pydantic_ai_rlm/__init__.py +0 -0
  14. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/src/pydantic_ai_rlm/agent.py +0 -0
  15. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/src/pydantic_ai_rlm/dependencies.py +0 -0
  16. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/src/pydantic_ai_rlm/prompts.py +0 -0
  17. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/src/pydantic_ai_rlm/py.typed +0 -0
  18. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/src/pydantic_ai_rlm/repl.py +0 -0
  19. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/src/pydantic_ai_rlm/toolset.py +0 -0
  20. {pydantic_ai_rlm-0.1.0 → pydantic_ai_rlm-0.1.1}/src/pydantic_ai_rlm/utils.py +0 -0
@@ -0,0 +1,44 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+
7
+ jobs:
8
+ build:
9
+ name: Build distribution
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+
14
+ - name: Install uv
15
+ uses: astral-sh/setup-uv@v5
16
+
17
+ - name: Build package
18
+ run: uv build
19
+
20
+ - name: Upload distribution artifacts
21
+ uses: actions/upload-artifact@v4
22
+ with:
23
+ name: python-package-distributions
24
+ path: dist/
25
+
26
+ publish-pypi:
27
+ name: Publish to PyPI
28
+ needs: build
29
+ runs-on: ubuntu-latest
30
+ environment:
31
+ name: pypi
32
+ url: https://pypi.org/p/pydantic-ai-rlm
33
+ permissions:
34
+ id-token: write
35
+
36
+ steps:
37
+ - name: Download distribution artifacts
38
+ uses: actions/download-artifact@v4
39
+ with:
40
+ name: python-package-distributions
41
+ path: dist/
42
+
43
+ - name: Publish to PyPI
44
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-rlm
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: Recursive Language Model (RLM) toolset for Pydantic AI - handle extremely large contexts
5
5
  Author: Pydantic AI RLM Contributors
6
6
  License-Expression: MIT
@@ -16,7 +16,7 @@ Classifier: Programming Language :: Python :: 3.12
16
16
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
17
  Classifier: Typing :: Typed
18
18
  Requires-Python: >=3.10
19
- Requires-Dist: pydantic-ai>=0.1.0
19
+ Requires-Dist: pydantic-ai-slim[cli]>=0.1.0
20
20
  Provides-Extra: dev
21
21
  Requires-Dist: mypy>=1.0; extra == 'dev'
22
22
  Requires-Dist: pytest-asyncio>=0.21; extra == 'dev'
@@ -34,6 +34,7 @@ Description-Content-Type: text/markdown
34
34
 
35
35
  <p align="center">
36
36
  <a href="https://github.com/vstorm-co/pydantic-ai-rlm">GitHub</a> •
37
+ <a href="https://pypi.org/project/pydantic-ai-rlm/">PyPI</a> •
37
38
  <a href="https://github.com/vstorm-co/pydantic-ai-rlm#examples">Examples</a>
38
39
  </p>
39
40
 
@@ -41,6 +42,7 @@ Description-Content-Type: text/markdown
41
42
  <a href="https://www.python.org/downloads/"><img src="https://img.shields.io/badge/python-3.10+-blue.svg" alt="Python 3.10+"></a>
42
43
  <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
43
44
  <a href="https://github.com/pydantic/pydantic-ai"><img src="https://img.shields.io/badge/Powered%20by-Pydantic%20AI-E92063?logo=pydantic&logoColor=white" alt="Pydantic AI"></a>
45
+ <a href="https://pypi.org/project/pydantic-ai-rlm/"><img src="https://img.shields.io/pypi/v/pydantic-ai-rlm.svg" alt="PyPI version"></a>
44
46
  </p>
45
47
 
46
48
  <p align="center">
@@ -278,16 +280,19 @@ configure_logging(enabled=False)
278
280
  ```
279
281
 
280
282
  Install with rich logging support for syntax highlighting and styled output:
283
+
281
284
  ```bash
282
285
  pip install pydantic-ai-rlm[logging]
283
286
  ```
284
287
 
285
288
  Or install rich separately:
289
+
286
290
  ```bash
287
291
  pip install rich
288
292
  ```
289
293
 
290
294
  When enabled, you'll see:
295
+
291
296
  - Syntax-highlighted code being executed (with rich)
292
297
  - Execution results with status indicators (SUCCESS/ERROR)
293
298
  - Execution time for each code block
@@ -6,6 +6,7 @@
6
6
 
7
7
  <p align="center">
8
8
  <a href="https://github.com/vstorm-co/pydantic-ai-rlm">GitHub</a> •
9
+ <a href="https://pypi.org/project/pydantic-ai-rlm/">PyPI</a> •
9
10
  <a href="https://github.com/vstorm-co/pydantic-ai-rlm#examples">Examples</a>
10
11
  </p>
11
12
 
@@ -13,6 +14,7 @@
13
14
  <a href="https://www.python.org/downloads/"><img src="https://img.shields.io/badge/python-3.10+-blue.svg" alt="Python 3.10+"></a>
14
15
  <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
15
16
  <a href="https://github.com/pydantic/pydantic-ai"><img src="https://img.shields.io/badge/Powered%20by-Pydantic%20AI-E92063?logo=pydantic&logoColor=white" alt="Pydantic AI"></a>
17
+ <a href="https://pypi.org/project/pydantic-ai-rlm/"><img src="https://img.shields.io/pypi/v/pydantic-ai-rlm.svg" alt="PyPI version"></a>
16
18
  </p>
17
19
 
18
20
  <p align="center">
@@ -250,16 +252,19 @@ configure_logging(enabled=False)
250
252
  ```
251
253
 
252
254
  Install with rich logging support for syntax highlighting and styled output:
255
+
253
256
  ```bash
254
257
  pip install pydantic-ai-rlm[logging]
255
258
  ```
256
259
 
257
260
  Or install rich separately:
261
+
258
262
  ```bash
259
263
  pip install rich
260
264
  ```
261
265
 
262
266
  When enabled, you'll see:
267
+
263
268
  - Syntax-highlighted code being executed (with rich)
264
269
  - Execution results with status indicators (SUCCESS/ERROR)
265
270
  - Execution time for each code block
@@ -10,12 +10,10 @@ import random
10
10
 
11
11
  from dotenv import load_dotenv
12
12
 
13
- from pydantic_ai_rlm import run_rlm_analysis_sync, configure_logging
13
+ from pydantic_ai_rlm import configure_logging, run_rlm_analysis_sync
14
14
 
15
15
 
16
- def generate_massive_context(
17
- num_lines: int = 1_000_000, answer: str = "1298418"
18
- ) -> str:
16
+ def generate_massive_context(num_lines: int = 1_000_000, answer: str = "1298418") -> str:
19
17
  """Generate a massive text context with a hidden magic number."""
20
18
  print(f"Generating massive context with {num_lines:,} lines...")
21
19
 
@@ -10,7 +10,7 @@ import random
10
10
 
11
11
  from dotenv import load_dotenv
12
12
 
13
- from pydantic_ai_rlm import run_rlm_analysis_sync, configure_logging
13
+ from pydantic_ai_rlm import configure_logging, run_rlm_analysis_sync
14
14
 
15
15
 
16
16
  def generate_semantic_context(num_documents: int = 500) -> tuple[str, str]:
@@ -67,10 +67,7 @@ def generate_semantic_context(num_documents: int = 500) -> tuple[str, str]:
67
67
  "Ware",
68
68
  "Hub",
69
69
  ]
70
- companies = [
71
- f"{random.choice(prefixes)}{random.choice(suffixes)}"
72
- for _ in range(num_documents)
73
- ]
70
+ companies = [f"{random.choice(prefixes)}{random.choice(suffixes)}" for _ in range(num_documents)]
74
71
 
75
72
  # Pick a random company to be the bankrupt one
76
73
  bankrupt_idx = random.randint(100, num_documents - 100)
@@ -112,9 +109,7 @@ def generate_semantic_context(num_documents: int = 500) -> tuple[str, str]:
112
109
  random.shuffle(documents)
113
110
 
114
111
  context = "\n\n".join(documents)
115
- print(
116
- f"Bankrupt company: {bankrupt_company} (was at original index {bankrupt_idx})"
117
- )
112
+ print(f"Bankrupt company: {bankrupt_company} (was at original index {bankrupt_idx})")
118
113
 
119
114
  return context, bankrupt_company
120
115
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "pydantic-ai-rlm"
3
- version = "0.1.0"
3
+ version = "0.1.1"
4
4
  description = "Recursive Language Model (RLM) toolset for Pydantic AI - handle extremely large contexts"
5
5
  readme = "README.md"
6
6
  license = "MIT"
@@ -27,7 +27,7 @@ classifiers = [
27
27
  "Topic :: Scientific/Engineering :: Artificial Intelligence",
28
28
  "Typing :: Typed",
29
29
  ]
30
- dependencies = ["pydantic-ai>=0.1.0"]
30
+ dependencies = ["pydantic-ai-slim[cli]>=0.1.0"]
31
31
 
32
32
  [project.optional-dependencies]
33
33
  dev = ["pytest>=7.0", "pytest-asyncio>=0.21", "mypy>=1.0", "ruff>=0.1"]
@@ -47,7 +47,7 @@ class RLMLogger:
47
47
  )
48
48
  self.console.print(panel)
49
49
  else:
50
- print(f"\n{'='*50}")
50
+ print(f"\n{'=' * 50}")
51
51
  print("CODE EXECUTION")
52
52
  print("=" * 50)
53
53
  print(code)
@@ -141,7 +141,7 @@ class RLMLogger:
141
141
  def _log_result_plain(self, result: REPLResult) -> None:
142
142
  """Log result using plain text."""
143
143
  status = "SUCCESS" if result.success else "ERROR"
144
- print(f"\n{'='*50}")
144
+ print(f"\n{'=' * 50}")
145
145
  print(f"RESULT: {status} (executed in {result.execution_time:.3f}s)")
146
146
  print("=" * 50)
147
147
 
@@ -159,11 +159,7 @@ class RLMLogger:
159
159
  stderr = stderr[:1000] + "\n... (truncated)"
160
160
  print(stderr)
161
161
 
162
- user_vars = {
163
- k: v
164
- for k, v in result.locals.items()
165
- if not k.startswith("_") and k not in ("context", "json", "re", "os")
166
- }
162
+ user_vars = {k: v for k, v in result.locals.items() if not k.startswith("_") and k not in ("context", "json", "re", "os")}
167
163
  if user_vars:
168
164
  print("\nVariables:")
169
165
  for name, value in list(user_vars.items())[:10]:
@@ -198,7 +194,7 @@ class RLMLogger:
198
194
  )
199
195
  self.console.print(panel)
200
196
  else:
201
- print(f"\n{'='*50}")
197
+ print(f"\n{'=' * 50}")
202
198
  print("LLM QUERY")
203
199
  print("=" * 50)
204
200
  display_prompt = prompt
@@ -226,7 +222,7 @@ class RLMLogger:
226
222
  )
227
223
  self.console.print(panel)
228
224
  else:
229
- print(f"\n{'='*50}")
225
+ print(f"\n{'=' * 50}")
230
226
  print("LLM RESPONSE")
231
227
  print("=" * 50)
232
228
  display_response = response
@@ -1 +0,0 @@
1
- *
@@ -1,6 +0,0 @@
1
- {
2
- "token": "pylf_v1_us_bBcHq1CyJmt3JYVtRjpTpgTGrr3SW3yy7Cvn3nN6kh11",
3
- "project_name": "internal-demos",
4
- "project_url": "https://logfire-us.pydantic.dev/wojciech-achtelik/internal-demos",
5
- "logfire_api_url": "https://logfire-api.pydantic.dev"
6
- }
File without changes