slowburn 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3 @@
1
+ # API keys for LLM providers (copy to .env and fill in values)
2
+ OPENROUTER_API_KEY=
3
+ OPENAI_API_KEY=
@@ -0,0 +1,19 @@
1
+ name: linting
2
+
3
+ on: [push, pull_request]
4
+
5
+ jobs:
6
+ ruff:
7
+ runs-on: ubuntu-latest
8
+ steps:
9
+ - uses: actions/checkout@v4
10
+
11
+ - uses: astral-sh/ruff-action@v3
12
+ with:
13
+ args: check --select E,F,I,W
14
+ src: "./src"
15
+
16
+ - uses: astral-sh/ruff-action@v3
17
+ with:
18
+ args: format --check
19
+ src: "./src"
@@ -0,0 +1,38 @@
1
+ name: release
2
+
3
+ on:
4
+ release:
5
+ types:
6
+ - published
7
+
8
+ jobs:
9
+ pypi:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - name: Check out repository
13
+ uses: actions/checkout@v4
14
+ with:
15
+ fetch-depth: 0 # Required for hatch-vcs git tag versioning
16
+
17
+ - name: Set up Python
18
+ uses: actions/setup-python@v5
19
+ with:
20
+ python-version: "3.12"
21
+
22
+ - name: Install build tools
23
+ run: |
24
+ python -m pip install --upgrade pip
25
+ python -m pip install uv
26
+ python -m uv pip install hatch twine
27
+
28
+ - name: Verify version from git tag
29
+ run: hatch version
30
+
31
+ - name: Build package
32
+ run: hatch build
33
+
34
+ - name: Publish to PyPI
35
+ env:
36
+ TWINE_USERNAME: "__token__"
37
+ TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
38
+ run: twine upload --repository pypi dist/*
@@ -0,0 +1,48 @@
1
+ name: tests
2
+
3
+ on:
4
+ push:
5
+ branches: ["main", "mainline"]
6
+ pull_request:
7
+ branches: ["main", "mainline"]
8
+
9
+ jobs:
10
+ pytest:
11
+ runs-on: ubuntu-latest
12
+ timeout-minutes: 30
13
+
14
+ strategy:
15
+ fail-fast: false
16
+ matrix:
17
+ python-version: ["3.10", "3.12", "3.13"]
18
+
19
+ name: "tests (py${{ matrix.python-version }})"
20
+
21
+ steps:
22
+ - name: Check out repository
23
+ uses: actions/checkout@v4
24
+ with:
25
+ fetch-depth: 0
26
+
27
+ - name: Set up Python ${{ matrix.python-version }}
28
+ uses: actions/setup-python@v5
29
+ with:
30
+ python-version: ${{ matrix.python-version }}
31
+
32
+ - name: Cache pip
33
+ uses: actions/cache@v4
34
+ with:
35
+ path: ~/.cache/pip
36
+ key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }}
37
+ restore-keys: |
38
+ ${{ runner.os }}-pip-${{ matrix.python-version }}-
39
+
40
+ - name: Install dependencies
41
+ run: |
42
+ python -m pip install --upgrade pip
43
+ python -m pip install --upgrade uv
44
+ python -m uv pip install -e ".[dev]"
45
+
46
+ - name: Run tests
47
+ run: |
48
+ pytest tests --tb=short -rf --verbose --timeout=120
@@ -0,0 +1,208 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ _env
140
+ .envrc
141
+ .venv
142
+ env/
143
+ venv/
144
+ ENV/
145
+ env.bak/
146
+ venv.bak/
147
+
148
+ # Spyder project settings
149
+ .spyderproject
150
+ .spyproject
151
+
152
+ # Rope project settings
153
+ .ropeproject
154
+
155
+ # mkdocs documentation
156
+ /site
157
+
158
+ # mypy
159
+ .mypy_cache/
160
+ .dmypy.json
161
+ dmypy.json
162
+
163
+ # Pyre type checker
164
+ .pyre/
165
+
166
+ # pytype static type analyzer
167
+ .pytype/
168
+
169
+ # Cython debug symbols
170
+ cython_debug/
171
+
172
+ # PyCharm
173
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
174
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
175
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
176
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
177
+ #.idea/
178
+
179
+ # Abstra
180
+ # Abstra is an AI-powered process automation framework.
181
+ # Ignore directories containing user credentials, local state, and settings.
182
+ # Learn more at https://abstra.io/docs
183
+ .abstra/
184
+
185
+ # Visual Studio Code
186
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
187
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
188
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
189
+ # you could uncomment the following to ignore the entire vscode folder
190
+ # .vscode/
191
+
192
+ # Ruff stuff:
193
+ .ruff_cache/
194
+
195
+ # PyPI configuration file
196
+ .pypirc
197
+
198
+ # Cursor
199
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
200
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
201
+ # refer to https://docs.cursor.com/context/ignore-files
202
+ .cursorignore
203
+ .cursorindexingignore
204
+
205
+ # Marimo
206
+ marimo/_static/
207
+ marimo/_lsp/
208
+ __marimo__/
slowburn-0.2.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Abhishek Divekar
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,81 @@
1
+ Metadata-Version: 2.4
2
+ Name: slowburn
3
+ Version: 0.2.0
4
+ Summary: Cost-Sustainable Concurrent Execution for Long-Horizon LLM Agents
5
+ Project-URL: Homepage, https://github.com/adivekar-utexas/slowburn
6
+ Project-URL: Repository, https://github.com/adivekar-utexas/slowburn
7
+ Project-URL: Issues, https://github.com/adivekar-utexas/slowburn/issues
8
+ Author-email: Abhishek Divekar <adivekar@utexas.edu>
9
+ License-File: LICENSE
10
+ Keywords: agents,concurrency,cost-optimization,llm,long-horizon
11
+ Classifier: Development Status :: 2 - Pre-Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3.13
21
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
22
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
23
+ Classifier: Typing :: Typed
24
+ Requires-Python: >=3.10
25
+ Requires-Dist: concurry>=0.13.0
26
+ Requires-Dist: litellm>=1.0.0
27
+ Requires-Dist: morphic>=0.1.0
28
+ Requires-Dist: pydantic>=2.0.0
29
+ Provides-Extra: all
30
+ Requires-Dist: ag2>=0.8.0; extra == 'all'
31
+ Requires-Dist: crewai>=0.80.0; extra == 'all'
32
+ Requires-Dist: pytest-cov>=4.0; extra == 'all'
33
+ Requires-Dist: pytest-timeout>=2.0; extra == 'all'
34
+ Requires-Dist: pytest>=7.0; extra == 'all'
35
+ Requires-Dist: ray>=2.0.0; extra == 'all'
36
+ Requires-Dist: ruff>=0.4; extra == 'all'
37
+ Provides-Extra: autogen
38
+ Requires-Dist: ag2>=0.8.0; extra == 'autogen'
39
+ Provides-Extra: crewai
40
+ Requires-Dist: crewai>=0.80.0; extra == 'crewai'
41
+ Provides-Extra: dev
42
+ Requires-Dist: pytest-cov>=4.0; extra == 'dev'
43
+ Requires-Dist: pytest-timeout>=2.0; extra == 'dev'
44
+ Requires-Dist: pytest>=7.0; extra == 'dev'
45
+ Requires-Dist: ruff>=0.4; extra == 'dev'
46
+ Provides-Extra: ray
47
+ Requires-Dist: ray>=2.0.0; extra == 'ray'
48
+ Description-Content-Type: text/markdown
49
+
50
+ # SlowBurn
51
+
52
+ [![PyPI version](https://img.shields.io/pypi/v/slowburn.svg)](https://pypi.org/project/slowburn/)
53
+ [![Tests](https://github.com/adivekar-utexas/slowburn/actions/workflows/tests.yml/badge.svg)](https://github.com/adivekar-utexas/slowburn/actions/workflows/tests.yml)
54
+ [![Linting](https://github.com/adivekar-utexas/slowburn/actions/workflows/linting.yml/badge.svg)](https://github.com/adivekar-utexas/slowburn/actions/workflows/linting.yml)
55
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/downloads/)
56
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
57
+
58
+ **SlowBurn: Cost-Sustainable Concurrent Execution for Long-Horizon LLM Agents**
59
+
60
+ SlowBurn is a Python library that lets LLM agent workflows run within a dollar budget by automatically slowing down — not crashing — when the budget is tight. It combines concurrent execution with dollar-denominated rate limiting and per-call cost tracking, so researchers can launch overnight batch experiments on a $5/day budget and wake up to completed results instead of a crashed script.
61
+
62
+ SlowBurn provides a native asyncio LLM worker for direct use, and drop-in integrations for [AutoGen (AG2)](https://github.com/ag2ai/ag2) and [CrewAI](https://github.com/crewAIInc/crewAI) that add budget control to existing multi-agent workflows without code changes.
63
+
64
+ ## Installation
65
+
66
+ ```bash
67
+ pip install slowburn
68
+ ```
69
+
70
+ ## Development
71
+
72
+ ```bash
73
+ git clone https://github.com/adivekar-utexas/slowburn.git
74
+ cd slowburn
75
+ pip install -e ".[dev]"
76
+ pytest
77
+ ```
78
+
79
+ ## License
80
+
81
+ MIT
@@ -0,0 +1,32 @@
1
+ # SlowBurn
2
+
3
+ [![PyPI version](https://img.shields.io/pypi/v/slowburn.svg)](https://pypi.org/project/slowburn/)
4
+ [![Tests](https://github.com/adivekar-utexas/slowburn/actions/workflows/tests.yml/badge.svg)](https://github.com/adivekar-utexas/slowburn/actions/workflows/tests.yml)
5
+ [![Linting](https://github.com/adivekar-utexas/slowburn/actions/workflows/linting.yml/badge.svg)](https://github.com/adivekar-utexas/slowburn/actions/workflows/linting.yml)
6
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/downloads/)
7
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
8
+
9
+ **SlowBurn: Cost-Sustainable Concurrent Execution for Long-Horizon LLM Agents**
10
+
11
+ SlowBurn is a Python library that lets LLM agent workflows run within a dollar budget by automatically slowing down — not crashing — when the budget is tight. It combines concurrent execution with dollar-denominated rate limiting and per-call cost tracking, so researchers can launch overnight batch experiments on a $5/day budget and wake up to completed results instead of a crashed script.
12
+
13
+ SlowBurn provides a native asyncio LLM worker for direct use, and drop-in integrations for [AutoGen (AG2)](https://github.com/ag2ai/ag2) and [CrewAI](https://github.com/crewAIInc/crewAI) that add budget control to existing multi-agent workflows without code changes.
14
+
15
+ ## Installation
16
+
17
+ ```bash
18
+ pip install slowburn
19
+ ```
20
+
21
+ ## Development
22
+
23
+ ```bash
24
+ git clone https://github.com/adivekar-utexas/slowburn.git
25
+ cd slowburn
26
+ pip install -e ".[dev]"
27
+ pytest
28
+ ```
29
+
30
+ ## License
31
+
32
+ MIT
@@ -0,0 +1,92 @@
1
+ """
2
+ Demo: AutoGen (AG2) multi-agent chat with SlowBurn cost control.
3
+
4
+ Shows how to use SlowBurnModelClient to add dollar-budget backpressure
5
+ to any AutoGen agent. The ModelClient protocol gives us full access to
6
+ the litellm response object, providing exact cost tracking.
7
+
8
+ Usage:
9
+ pip install slowburn[autogen]
10
+ export OPENAI_API_KEY="sk-..."
11
+ python examples/demo_autogen_budget.py
12
+ """
13
+
14
+ import os
15
+
16
+
17
+ def main():
18
+ try:
19
+ from autogen import AssistantAgent, UserProxyAgent, gather_usage_summary
20
+ except ImportError:
21
+ print("AutoGen (AG2) not installed. Run: pip install slowburn[autogen]")
22
+ return
23
+
24
+ from concurry import LimitSet
25
+
26
+ from slowburn.integrations.autogen import SlowBurnModelClient
27
+ from slowburn.limits import CostLimit
28
+ from slowburn.reporter import CostReporter
29
+
30
+ api_key = os.getenv("OPENAI_API_KEY", "")
31
+ if not api_key:
32
+ print("Set OPENAI_API_KEY to run this demo.")
33
+ print("Showing the setup pattern instead.\n")
34
+
35
+ # === 1. Create shared budget (all agents share this) ===
36
+ limit_set = LimitSet(
37
+ limits=[CostLimit(budget_usd=3.0, window_seconds=3600)], # $3/hour
38
+ mode="thread",
39
+ shared=True,
40
+ )
41
+ reporter = CostReporter()
42
+
43
+ # === 2. Create AG2 agents with SlowBurn model client ===
44
+ config_list = [{"model": "slowburn/gpt-4o-mini", "api_key": api_key}]
45
+
46
+ assistant = AssistantAgent(
47
+ "assistant",
48
+ llm_config={"config_list": config_list},
49
+ system_message="You are a helpful AI assistant. Be concise.",
50
+ )
51
+ assistant.register_model_client(
52
+ model_client_cls=SlowBurnModelClient,
53
+ limit_set=limit_set,
54
+ reporter=reporter,
55
+ )
56
+
57
+ user_proxy = UserProxyAgent(
58
+ "user",
59
+ human_input_mode="NEVER",
60
+ max_consecutive_auto_reply=3,
61
+ code_execution_config=False,
62
+ )
63
+
64
+ # === 3. Run conversation ===
65
+ if api_key:
66
+ user_proxy.initiate_chat(
67
+ assistant,
68
+ message="Explain in 3 bullet points why cost control matters for LLM agents.",
69
+ )
70
+ else:
71
+ print("[dry run] Would run multi-turn conversation with budget-controlled LLM calls")
72
+
73
+ # === 4. Report costs ===
74
+ print(f"\n{'=' * 60}")
75
+ print("SLOWBURN COST REPORT")
76
+ print(f"{'=' * 60}")
77
+ print(reporter.to_markdown())
78
+ print(f"\nTotal cost: ${reporter.total_cost():.6f}")
79
+
80
+ # AG2's built-in tracking also works:
81
+ if api_key:
82
+ print(f"\n{'=' * 60}")
83
+ print("AG2 USAGE SUMMARY")
84
+ print(f"{'=' * 60}")
85
+ usage = gather_usage_summary([assistant, user_proxy])
86
+ print(usage)
87
+
88
+ print("\nDone.")
89
+
90
+
91
+ if __name__ == "__main__":
92
+ main()
@@ -0,0 +1,91 @@
1
+ """
2
+ Demo: Batch prompt optimization under a daily dollar budget.
3
+
4
+ Shows how SlowBurn automatically slows down (backpressure) when
5
+ approaching the budget limit, allowing long-running experiments
6
+ to complete overnight without crashing.
7
+
8
+ Without SlowBurn:
9
+ Runs at full speed, burns $15 in 20 minutes, crashes on rate limit.
10
+
11
+ With SlowBurn (budget_usd=5.0, window="daily"):
12
+ Automatically paces API calls. All iterations complete within budget.
13
+ Cost report shows per-step breakdown.
14
+
15
+ Usage:
16
+ export OPENROUTER_API_KEY="sk-or-v1-..."
17
+ python examples/demo_batch_optimization.py
18
+ """
19
+
20
+ import os
21
+ import time
22
+
23
+
24
+ def main():
25
+ from slowburn import create_llm
26
+
27
+ api_key = os.getenv("OPENROUTER_API_KEY", "")
28
+ if not api_key:
29
+ print("Set OPENROUTER_API_KEY to run this demo with real API calls.")
30
+ print("Showing the setup pattern instead.\n")
31
+
32
+ # === 1. Create a cost-controlled LLM worker ===
33
+ llm = create_llm(
34
+ model="gpt-4o-mini",
35
+ budget_usd=5.0,
36
+ window="daily",
37
+ max_rpm=500,
38
+ max_input_tpm=1_000_000,
39
+ max_output_tpm=200_000,
40
+ api_key=api_key,
41
+ temperature=0.7,
42
+ max_tokens=512,
43
+ )
44
+
45
+ # === 2. Simulate a prompt optimization loop ===
46
+ base_prompt = "Write a concise summary of the following concept: {concept}"
47
+ concepts = [
48
+ "gradient descent",
49
+ "attention mechanisms",
50
+ "reinforcement learning",
51
+ "diffusion models",
52
+ "mixture of experts",
53
+ ]
54
+
55
+ num_iterations = 3
56
+ print(f"Running {num_iterations} optimization iterations over {len(concepts)} concepts...\n")
57
+
58
+ for iteration in range(num_iterations):
59
+ print(f"--- Iteration {iteration + 1}/{num_iterations} ---")
60
+ start = time.time()
61
+
62
+ # Generate candidate prompts (batch LLM call)
63
+ prompts = [base_prompt.format(concept=c) for c in concepts]
64
+
65
+ if api_key:
66
+ results = llm.call_llm_batch(prompts=prompts).result()
67
+ elapsed = time.time() - start
68
+ print(f" Completed {len(results)} calls in {elapsed:.1f}s")
69
+ else:
70
+ print(f" [dry run] Would send {len(prompts)} prompts to gpt-4o-mini")
71
+
72
+ # === 3. Report costs ===
73
+ reporter = llm.get_reporter().result()
74
+ print(f"\n{'=' * 60}")
75
+ print("COST REPORT")
76
+ print(f"{'=' * 60}")
77
+ print(f"Total calls: {reporter.num_calls}")
78
+ print(f"Total cost: ${reporter.total_cost():.6f}")
79
+ print()
80
+ print(reporter.to_markdown())
81
+ print()
82
+
83
+ # Optionally save to JSON
84
+ # reporter.to_json(Path("cost_report.json"))
85
+
86
+ llm.stop()
87
+ print("\nDone.")
88
+
89
+
90
+ if __name__ == "__main__":
91
+ main()