tetherai-python 0.1.0a0__tar.gz → 0.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. tetherai_python-0.1.6/PKG-INFO +166 -0
  2. tetherai_python-0.1.6/README.md +133 -0
  3. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/pyproject.toml +1 -1
  4. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/budget.py +8 -8
  5. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/circuit_breaker.py +2 -0
  6. tetherai_python-0.1.6/src/tetherai/crewai/integration.py +55 -0
  7. tetherai_python-0.1.6/src/tetherai/interceptor.py +560 -0
  8. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/pricing.py +5 -2
  9. tetherai_python-0.1.6/src/tetherai_python.egg-info/PKG-INFO +166 -0
  10. tetherai_python-0.1.0a0/PKG-INFO +0 -35
  11. tetherai_python-0.1.0a0/README.md +0 -2
  12. tetherai_python-0.1.0a0/src/tetherai/crewai/integration.py +0 -68
  13. tetherai_python-0.1.0a0/src/tetherai/interceptor.py +0 -258
  14. tetherai_python-0.1.0a0/src/tetherai_python.egg-info/PKG-INFO +0 -35
  15. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/setup.cfg +0 -0
  16. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/__init__.py +0 -0
  17. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/_version.py +0 -0
  18. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/config.py +0 -0
  19. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/crewai/__init__.py +0 -0
  20. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/exceptions.py +0 -0
  21. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/exporter.py +0 -0
  22. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/token_counter.py +0 -0
  23. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai/trace.py +0 -0
  24. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai_python.egg-info/SOURCES.txt +0 -0
  25. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai_python.egg-info/dependency_links.txt +0 -0
  26. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai_python.egg-info/requires.txt +0 -0
  27. {tetherai_python-0.1.0a0 → tetherai_python-0.1.6}/src/tetherai_python.egg-info/top_level.txt +0 -0
@@ -0,0 +1,166 @@
1
+ Metadata-Version: 2.4
2
+ Name: tetherai-python
3
+ Version: 0.1.6
4
+ Summary: AI budget guardrails for LLM applications
5
+ Author-email: TetherAI Engineering <engineering@tetherai.com>
6
+ License: Apache-2.0
7
+ Project-URL: Homepage, https://github.com/tetherai/tetherai-python
8
+ Project-URL: Repository, https://github.com/tetherai/tetherai-python
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: Apache Software License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Requires-Python: >=3.10
18
+ Description-Content-Type: text/markdown
19
+ Requires-Dist: tiktoken>=0.7.0
20
+ Provides-Extra: crewai
21
+ Requires-Dist: crewai>=1.0.0; extra == "crewai"
22
+ Provides-Extra: litellm
23
+ Requires-Dist: litellm>=1.40.0; extra == "litellm"
24
+ Provides-Extra: dev
25
+ Requires-Dist: pytest>=8.0; extra == "dev"
26
+ Requires-Dist: pytest-cov>=5.0; extra == "dev"
27
+ Requires-Dist: pytest-asyncio>=0.23; extra == "dev"
28
+ Requires-Dist: pytest-mock>=3.12; extra == "dev"
29
+ Requires-Dist: ruff>=0.4; extra == "dev"
30
+ Requires-Dist: mypy>=1.10; extra == "dev"
31
+ Requires-Dist: crewai>=1.0.0; extra == "dev"
32
+ Requires-Dist: litellm>=1.40.0; extra == "dev"
33
+
34
+ # TetherAI
35
+
36
+ [![CI](https://github.com/tetherai/tetherai-python/actions/workflows/ci.yml/badge.svg)](https://github.com/tetherai/tetherai-python/actions/workflows/ci.yml)
37
+ [![PyPI](https://img.shields.io/pypi/v/tetherai-python)](https://pypi.org/project/tetherai-python/)
38
+ [![Python](https://img.shields.io/pypi/pyversions/tetherai-python)](https://pypi.org/project/tetherai-python/)
39
+ [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](LICENSE)
40
+
41
+ **Financial circuit breaker for AI agents. Stop runaway LLM costs before they happen.**
42
+
43
+ AI agents don't crash when they fail — they loop, hallucinate, and retry. A stuck CrewAI crew can silently burn hundreds of dollars in LLM API calls over a weekend. Traditional monitoring tools like Datadog will show you an API cost spike on Monday morning. TetherAI kills the agent before it spends your money.
44
+
45
+ <!-- TODO: Replace with asciinema embed or GIF -->
46
+ <!-- Record with: asciinema rec demo.cast then upload to asciinema.org -->
47
+
48
+ ## Install
49
+
50
+ ```bash
51
+ pip install tetherai-python
52
+ ```
53
+
54
+ With CrewAI support:
55
+
56
+ ```bash
57
+ pip install tetherai-python[crewai]
58
+ ```
59
+
60
+ ## Quick Start
61
+
62
+ The `@enforce_budget` decorator wraps any function with budget guardrails:
63
+
64
+ ```python
65
+ from tetherai import tether, BudgetExceededError
66
+ import litellm
67
+
68
+
69
+ @tether.enforce_budget(max_usd=0.05)
70
+ def my_workflow():
71
+ for i in range(100):
72
+ response = litellm.completion(
73
+ model="gpt-4o-mini",
74
+ messages=[{"role": "user", "content": f"Count to {i}"}]
75
+ )
76
+ print(f"Call {i+1}: {response.choices[0].message.content[:50]}...")
77
+
78
+
79
+ try:
80
+ my_workflow()
81
+ except BudgetExceededError as e:
82
+ print(f"\n🛑 Stopped! Spent ${e.spent_usd:.4f} of ${e.budget_usd:.2f} budget")
83
+ ```
84
+
85
+ Expected output:
86
+ ```
87
+ Call 1: 0...
88
+ Call 2: 0, 1...
89
+ Call 3: 0, 1, 2...
90
+
91
+ 🛑 Stopped! Spent $0.0501 of $0.05 budget
92
+ ```
93
+
94
+ ## CrewAI Integration
95
+
96
+ For CrewAI crews, use `protect_crew()` to wrap budget enforcement around the entire crew:
97
+
98
+ ```python
99
+ from tetherai import protect_crew, BudgetExceededError
100
+ from crewai import Agent, Task, Crew, Process
101
+
102
+ researcher = Agent(
103
+ role="Research Analyst",
104
+ goal="Find comprehensive information on AI observability tools",
105
+ backstory="You are a thorough research analyst known for detailed analysis.",
106
+ verbose=True,
107
+ )
108
+
109
+ task = Task(
110
+ description="Research the competitive landscape of AI observability tools",
111
+ agent=researcher,
112
+ )
113
+
114
+ crew = Crew(agents=[researcher], tasks=[task], process=Process.sequential)
115
+
116
+ protected_crew = protect_crew(crew, max_usd=0.10)
117
+
118
+ try:
119
+ result = protected_crew.kickoff()
120
+ except BudgetExceededError as e:
121
+ print(f"Budget exceeded: ${e.spent_usd:.2f} of ${e.budget_usd:.2f}")
122
+ ```
123
+
124
+ ## What Happens When Budget Is Exceeded
125
+
126
+ When the budget is exceeded, TetherAI raises `BudgetExceededError` with details about the run:
127
+
128
+ ```json
129
+ {
130
+ "run_id": "run_abc123",
131
+ "budget_usd": 0.10,
132
+ "spent_usd": 0.13,
133
+ "turns": 7,
134
+ "spans": [
135
+ {
136
+ "span_type": "llm_call",
137
+ "model": "gpt-4o-mini",
138
+ "input_tokens": 1250,
139
+ "output_tokens": 340,
140
+ "cost_usd": 0.0019,
141
+ "status": "ok"
142
+ }
143
+ ]
144
+ }
145
+ ```
146
+
147
+ ## How It Works
148
+
149
+ TetherAI patches `litellm.completion` at runtime to intercept every LLM call your agent makes. Before each call, it counts input tokens locally using tiktoken and checks the projected cost against your budget. If the budget would be exceeded, the call is blocked and a `BudgetExceededError` is raised. After each successful call, actual token usage from the LLM response is recorded for accurate cost tracking.
150
+
151
+ ## Supported Frameworks
152
+
153
+ | Framework | Status | Integration |
154
+ |-----------|--------|-------------|
155
+ | CrewAI | ✅ Supported | `protect_crew()` |
156
+ | LiteLLM (direct) | ✅ Supported | `@enforce_budget` decorator |
157
+ | LangChain | 🔜 Coming soon | — |
158
+ | smolagents | 🔜 Coming soon | — |
159
+
160
+ ## Contributing
161
+
162
+ We welcome contributions! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
163
+
164
+ ## License
165
+
166
+ Apache 2.0
@@ -0,0 +1,133 @@
1
+ # TetherAI
2
+
3
+ [![CI](https://github.com/tetherai/tetherai-python/actions/workflows/ci.yml/badge.svg)](https://github.com/tetherai/tetherai-python/actions/workflows/ci.yml)
4
+ [![PyPI](https://img.shields.io/pypi/v/tetherai-python)](https://pypi.org/project/tetherai-python/)
5
+ [![Python](https://img.shields.io/pypi/pyversions/tetherai-python)](https://pypi.org/project/tetherai-python/)
6
+ [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](LICENSE)
7
+
8
+ **Financial circuit breaker for AI agents. Stop runaway LLM costs before they happen.**
9
+
10
+ AI agents don't crash when they fail — they loop, hallucinate, and retry. A stuck CrewAI crew can silently burn hundreds of dollars in LLM API calls over a weekend. Traditional monitoring tools like Datadog will show you an API cost spike on Monday morning. TetherAI kills the agent before it spends your money.
11
+
12
+ <!-- TODO: Replace with asciinema embed or GIF -->
13
+ <!-- Record with: asciinema rec demo.cast then upload to asciinema.org -->
14
+
15
+ ## Install
16
+
17
+ ```bash
18
+ pip install tetherai-python
19
+ ```
20
+
21
+ With CrewAI support:
22
+
23
+ ```bash
24
+ pip install tetherai-python[crewai]
25
+ ```
26
+
27
+ ## Quick Start
28
+
29
+ The `@enforce_budget` decorator wraps any function with budget guardrails:
30
+
31
+ ```python
32
+ from tetherai import tether, BudgetExceededError
33
+ import litellm
34
+
35
+
36
+ @tether.enforce_budget(max_usd=0.05)
37
+ def my_workflow():
38
+ for i in range(100):
39
+ response = litellm.completion(
40
+ model="gpt-4o-mini",
41
+ messages=[{"role": "user", "content": f"Count to {i}"}]
42
+ )
43
+ print(f"Call {i+1}: {response.choices[0].message.content[:50]}...")
44
+
45
+
46
+ try:
47
+ my_workflow()
48
+ except BudgetExceededError as e:
49
+ print(f"\n🛑 Stopped! Spent ${e.spent_usd:.4f} of ${e.budget_usd:.2f} budget")
50
+ ```
51
+
52
+ Expected output:
53
+ ```
54
+ Call 1: 0...
55
+ Call 2: 0, 1...
56
+ Call 3: 0, 1, 2...
57
+
58
+ 🛑 Stopped! Spent $0.0501 of $0.05 budget
59
+ ```
60
+
61
+ ## CrewAI Integration
62
+
63
+ For CrewAI crews, use `protect_crew()` to wrap budget enforcement around the entire crew:
64
+
65
+ ```python
66
+ from tetherai import protect_crew, BudgetExceededError
67
+ from crewai import Agent, Task, Crew, Process
68
+
69
+ researcher = Agent(
70
+ role="Research Analyst",
71
+ goal="Find comprehensive information on AI observability tools",
72
+ backstory="You are a thorough research analyst known for detailed analysis.",
73
+ verbose=True,
74
+ )
75
+
76
+ task = Task(
77
+ description="Research the competitive landscape of AI observability tools",
78
+ agent=researcher,
79
+ )
80
+
81
+ crew = Crew(agents=[researcher], tasks=[task], process=Process.sequential)
82
+
83
+ protected_crew = protect_crew(crew, max_usd=0.10)
84
+
85
+ try:
86
+ result = protected_crew.kickoff()
87
+ except BudgetExceededError as e:
88
+ print(f"Budget exceeded: ${e.spent_usd:.2f} of ${e.budget_usd:.2f}")
89
+ ```
90
+
91
+ ## What Happens When Budget Is Exceeded
92
+
93
+ When the budget is exceeded, TetherAI raises `BudgetExceededError` with details about the run:
94
+
95
+ ```json
96
+ {
97
+ "run_id": "run_abc123",
98
+ "budget_usd": 0.10,
99
+ "spent_usd": 0.13,
100
+ "turns": 7,
101
+ "spans": [
102
+ {
103
+ "span_type": "llm_call",
104
+ "model": "gpt-4o-mini",
105
+ "input_tokens": 1250,
106
+ "output_tokens": 340,
107
+ "cost_usd": 0.0019,
108
+ "status": "ok"
109
+ }
110
+ ]
111
+ }
112
+ ```
113
+
114
+ ## How It Works
115
+
116
+ TetherAI patches `litellm.completion` at runtime to intercept every LLM call your agent makes. Before each call, it counts input tokens locally using tiktoken and checks the projected cost against your budget. If the budget would be exceeded, the call is blocked and a `BudgetExceededError` is raised. After each successful call, actual token usage from the LLM response is recorded for accurate cost tracking.
117
+
118
+ ## Supported Frameworks
119
+
120
+ | Framework | Status | Integration |
121
+ |-----------|--------|-------------|
122
+ | CrewAI | ✅ Supported | `protect_crew()` |
123
+ | LiteLLM (direct) | ✅ Supported | `@enforce_budget` decorator |
124
+ | LangChain | 🔜 Coming soon | — |
125
+ | smolagents | 🔜 Coming soon | — |
126
+
127
+ ## Contributing
128
+
129
+ We welcome contributions! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
130
+
131
+ ## License
132
+
133
+ Apache 2.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "tetherai-python"
3
- version = "0.1.0-alpha"
3
+ version = "0.1.6"
4
4
  description = "AI budget guardrails for LLM applications"
5
5
  readme = "README.md"
6
6
  license = { text = "Apache-2.0" }
@@ -49,16 +49,16 @@ class BudgetTracker:
49
49
  with self._lock:
50
50
  return self._spent_usd >= self.max_usd
51
51
 
52
- def pre_check(self, estimated_input_cost: float) -> None:
52
+ def pre_check(self, estimated_cost: float, model: str = "unknown") -> None:
53
53
  with self._lock:
54
- projected = self._spent_usd + estimated_input_cost
55
- if projected > self.max_usd:
54
+ projected = self._spent_usd + estimated_cost
55
+ if projected >= self.max_usd:
56
56
  raise BudgetExceededError(
57
- message=f"Budget exceeded: ${projected:.2f} > ${self.max_usd:.2f}",
57
+ message=f"Budget exceeded: ${projected:.6f} >= ${self.max_usd:.6f}",
58
58
  run_id=self.run_id,
59
59
  budget_usd=self.max_usd,
60
60
  spent_usd=projected,
61
- last_model="unknown",
61
+ last_model=model,
62
62
  )
63
63
 
64
64
  def record_call(
@@ -84,6 +84,9 @@ class BudgetTracker:
84
84
  self._spent_usd += cost_usd
85
85
  self._turn_count += 1
86
86
 
87
+ if self._spent_usd > self.max_usd:
88
+ self._spent_usd = self.max_usd
89
+
87
90
  self._calls.append(
88
91
  CallRecord(
89
92
  input_tokens=input_tokens,
@@ -94,9 +97,6 @@ class BudgetTracker:
94
97
  )
95
98
  )
96
99
 
97
- if self._spent_usd > self.max_usd:
98
- self._spent_usd = self.max_usd
99
-
100
100
  def get_summary(self) -> dict[str, Any]:
101
101
  with self._lock:
102
102
  return {
@@ -83,6 +83,7 @@ def _run_with_budget(
83
83
  interceptor.deactivate()
84
84
  trace = trace_collector.end_trace()
85
85
  if trace and trace_export != "none":
86
+ trace.budget_summary = budget_tracker.get_summary()
86
87
  exporter = get_exporter(trace_export, config.trace_export_path)
87
88
  exporter.export(trace)
88
89
 
@@ -128,5 +129,6 @@ async def _run_with_budget_async(
128
129
  interceptor.deactivate()
129
130
  trace = trace_collector.end_trace()
130
131
  if trace and trace_export != "none":
132
+ trace.budget_summary = budget_tracker.get_summary()
131
133
  exporter = get_exporter(trace_export, config.trace_export_path)
132
134
  exporter.export(trace)
@@ -0,0 +1,55 @@
1
+ from typing import TYPE_CHECKING, Any
2
+
3
+ if TYPE_CHECKING:
4
+ from crewai import Crew
5
+
6
+
7
+ def _check_crewai_installed() -> None:
8
+ try:
9
+ import crewai # noqa: F401
10
+ except ImportError:
11
+ raise ImportError(
12
+ "crewai is not installed. Install it with: pip install tetherai[crewai]"
13
+ ) from None
14
+
15
+
16
+ class ProtectedCrew:
17
+ """Wrapper around CrewAI crew with budget enforcement."""
18
+
19
+ def __init__(
20
+ self,
21
+ crew: "Crew",
22
+ max_usd: float,
23
+ max_turns: int | None = None,
24
+ trace_export: str | None = None,
25
+ ):
26
+ from tetherai.circuit_breaker import enforce_budget
27
+
28
+ self._crew = crew
29
+ self._max_usd = max_usd
30
+ self._max_turns = max_turns
31
+
32
+ self.kickoff = enforce_budget(
33
+ max_usd=max_usd, max_turns=max_turns, trace_export=trace_export
34
+ )(crew.kickoff)
35
+
36
+ def __getattr__(self, name: str) -> Any:
37
+ return getattr(self._crew, name)
38
+
39
+
40
+ def protect_crew(
41
+ crew: "Crew",
42
+ max_usd: float,
43
+ max_turns: int | None = None,
44
+ trace_export: str | None = None,
45
+ ) -> ProtectedCrew:
46
+ _check_crewai_installed()
47
+ return ProtectedCrew(crew, max_usd, max_turns, trace_export)
48
+
49
+
50
+ def tether_step_callback(step_output: Any) -> None:
51
+ pass
52
+
53
+
54
+ def tether_task_callback(task_output: Any) -> None:
55
+ pass