tei-loop 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. tei_loop-0.1.0/LICENSE +21 -0
  2. tei_loop-0.1.0/PKG-INFO +185 -0
  3. tei_loop-0.1.0/README.md +151 -0
  4. tei_loop-0.1.0/examples/example_crewai.py +35 -0
  5. tei_loop-0.1.0/examples/example_custom_python.py +33 -0
  6. tei_loop-0.1.0/examples/example_langgraph.py +40 -0
  7. tei_loop-0.1.0/pyproject.toml +65 -0
  8. tei_loop-0.1.0/setup.cfg +4 -0
  9. tei_loop-0.1.0/tei_loop/__init__.py +46 -0
  10. tei_loop-0.1.0/tei_loop/adapters/__init__.py +5 -0
  11. tei_loop-0.1.0/tei_loop/adapters/crewai.py +237 -0
  12. tei_loop-0.1.0/tei_loop/adapters/generic.py +41 -0
  13. tei_loop-0.1.0/tei_loop/adapters/langgraph.py +159 -0
  14. tei_loop-0.1.0/tei_loop/cli.py +225 -0
  15. tei_loop-0.1.0/tei_loop/dimensions/__init__.py +11 -0
  16. tei_loop-0.1.0/tei_loop/dimensions/base.py +92 -0
  17. tei_loop-0.1.0/tei_loop/dimensions/execution_accuracy.py +82 -0
  18. tei_loop-0.1.0/tei_loop/dimensions/output_integrity.py +67 -0
  19. tei_loop-0.1.0/tei_loop/dimensions/reasoning_soundness.py +75 -0
  20. tei_loop-0.1.0/tei_loop/dimensions/target_alignment.py +70 -0
  21. tei_loop-0.1.0/tei_loop/evaluator.py +122 -0
  22. tei_loop-0.1.0/tei_loop/improver.py +327 -0
  23. tei_loop-0.1.0/tei_loop/llm_provider.py +427 -0
  24. tei_loop-0.1.0/tei_loop/loop.py +409 -0
  25. tei_loop-0.1.0/tei_loop/models.py +252 -0
  26. tei_loop-0.1.0/tei_loop/tests/__init__.py +0 -0
  27. tei_loop-0.1.0/tei_loop/tests/test_evaluator.py +141 -0
  28. tei_loop-0.1.0/tei_loop/tests/test_improver.py +107 -0
  29. tei_loop-0.1.0/tei_loop/tests/test_llm_provider.py +91 -0
  30. tei_loop-0.1.0/tei_loop/tests/test_loop.py +126 -0
  31. tei_loop-0.1.0/tei_loop/tests/test_models.py +135 -0
  32. tei_loop-0.1.0/tei_loop/tests/test_tracer.py +60 -0
  33. tei_loop-0.1.0/tei_loop/tracer.py +218 -0
  34. tei_loop-0.1.0/tei_loop.egg-info/PKG-INFO +185 -0
  35. tei_loop-0.1.0/tei_loop.egg-info/SOURCES.txt +37 -0
  36. tei_loop-0.1.0/tei_loop.egg-info/dependency_links.txt +1 -0
  37. tei_loop-0.1.0/tei_loop.egg-info/entry_points.txt +2 -0
  38. tei_loop-0.1.0/tei_loop.egg-info/requires.txt +20 -0
  39. tei_loop-0.1.0/tei_loop.egg-info/top_level.txt +4 -0
tei_loop-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Orkhan Javadli
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,185 @@
1
+ Metadata-Version: 2.4
2
+ Name: tei-loop
3
+ Version: 0.1.0
4
+ Summary: Target, Evaluate, Improve: A self-improving loop for agentic systems
5
+ Author-email: Orkhan Javadli <ojavadli@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/ojavadli/tei-loop
8
+ Project-URL: Repository, https://github.com/ojavadli/tei-loop
9
+ Keywords: agents,evaluation,improvement,llm,agentic-systems,self-improving
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
15
+ Requires-Python: >=3.9
16
+ Description-Content-Type: text/markdown
17
+ License-File: LICENSE
18
+ Requires-Dist: pydantic>=2.0
19
+ Provides-Extra: openai
20
+ Requires-Dist: openai>=1.40; extra == "openai"
21
+ Provides-Extra: anthropic
22
+ Requires-Dist: anthropic>=0.34; extra == "anthropic"
23
+ Provides-Extra: google
24
+ Requires-Dist: google-generativeai>=0.8; extra == "google"
25
+ Provides-Extra: all
26
+ Requires-Dist: openai>=1.40; extra == "all"
27
+ Requires-Dist: anthropic>=0.34; extra == "all"
28
+ Requires-Dist: google-generativeai>=0.8; extra == "all"
29
+ Provides-Extra: dev
30
+ Requires-Dist: pytest>=8.0; extra == "dev"
31
+ Requires-Dist: pytest-asyncio>=0.23; extra == "dev"
32
+ Requires-Dist: ruff>=0.5; extra == "dev"
33
+ Dynamic: license-file
34
+
35
+ # TEI Loop
36
+
37
+ **Target, Evaluate, Improve** — a self-improving loop for agentic systems.
38
+
39
+ TEI wraps any Python agent as a black box, evaluates its output across 4 dimensions using assertion-based LLM judges, and automatically applies targeted fixes when failures are detected.
40
+
41
+ ## Install
42
+
43
+ ```bash
44
+ pip install tei-loop
45
+
46
+ # With your preferred LLM provider:
47
+ pip install 'tei-loop[openai]' # OpenAI
48
+ pip install 'tei-loop[anthropic]' # Anthropic
49
+ pip install 'tei-loop[google]' # Google Gemini
50
+ pip install 'tei-loop[all]' # All providers
51
+ ```
52
+
53
+ ## Quick Start
54
+
55
+ ```python
56
+ import asyncio
57
+ from tei_loop import TEILoop
58
+
59
+ # Your existing agent — any function that takes input and returns output
60
+ def my_agent(query: str) -> str:
61
+ # ... your agent logic ...
62
+ return result
63
+
64
+ async def main():
65
+ loop = TEILoop(agent=my_agent)
66
+
67
+ # Evaluate only (baseline measurement)
68
+ result = await loop.evaluate_only("your test query")
69
+ print(result.summary())
70
+
71
+ # Full TEI loop (evaluate + improve + retry)
72
+ result = await loop.run("your test query")
73
+ print(result.summary())
74
+
75
+ # Before/after comparison
76
+ comparison = await loop.compare("your test query")
77
+ print(f"Baseline: {comparison['baseline'].baseline_score:.2f}")
78
+ print(f"With TEI: {comparison['improved'].final_score:.2f}")
79
+
80
+ asyncio.run(main())
81
+ ```
82
+
83
+ ## CLI
84
+
85
+ ```bash
86
+ # Evaluate your agent (baseline)
87
+ tei evaluate my_agent.py --query "test input" --verbose
88
+
89
+ # Run full improvement loop
90
+ tei improve my_agent.py --query "test input" --max-retries 3
91
+
92
+ # Before/after comparison
93
+ tei compare my_agent.py --query "test input"
94
+
95
+ # Generate config file
96
+ tei init
97
+ ```
98
+
99
+ TEI CLI looks for a function named `agent`, `run`, or `main` in your Python file.
100
+
101
+ ## How It Works
102
+
103
+ ### 1. Target
104
+ Define what success looks like. TEI evaluates across 4 dimensions:
105
+
106
+ | Dimension | What it checks |
107
+ |---|---|
108
+ | **Target Alignment** | Did the agent pursue the correct objective? |
109
+ | **Reasoning Soundness** | Was the reasoning logical and non-contradictory? |
110
+ | **Execution Accuracy** | Were the right tools called with correct parameters? |
111
+ | **Output Integrity** | Is the output complete, accurate, and consistent? |
112
+
113
+ ### 2. Evaluate
114
+ TEI runs 4 LLM judges in parallel (~3-5 seconds). Each judge produces verifiable assertions, not subjective scores. Every claim is backed by evidence from the agent's output.
115
+
116
+ ### 3. Improve
117
+ When a dimension fails, TEI applies the targeted fix strategy:
118
+
119
+ | Failure | Fix Strategy |
120
+ |---|---|
121
+ | Target drift | Re-anchor to original objective |
122
+ | Flawed reasoning | Regenerate plan with failure context |
123
+ | Execution errors | Correct tool calls and parameters |
124
+ | Output issues | Repair factual errors and fill gaps |
125
+
126
+ The loop retries automatically. Each cycle is sharper because the last was diagnosed.
127
+
128
+ ## Two Modes
129
+
130
+ **Runtime mode** (default): Per-query, 1-3 retries, fixes individual failures in seconds.
131
+
132
+ **Development mode**: Across many queries, proposes permanent prompt improvements.
133
+
134
+ ```python
135
+ # Development mode
136
+ dev_results = await loop.develop(
137
+ queries=["query1", "query2", "query3", ...],
138
+ max_iterations=50,
139
+ )
140
+ print(f"Avg improvement: {dev_results['avg_improvement']:+.2f}")
141
+ ```
142
+
143
+ ## Configuration
144
+
145
+ TEI auto-detects your LLM provider from environment variables (`OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GOOGLE_API_KEY`). No new accounts or API keys needed.
146
+
147
+ ```python
148
+ loop = TEILoop(
149
+ agent=my_agent,
150
+ eval_llm="gpt-5.2", # Smartest for evaluation
151
+ improve_llm="gpt-5.2-mini", # Cost-effective for fixes
152
+ max_retries=3,
153
+ verbose=True,
154
+ )
155
+ ```
156
+
157
+ Or via `.tei.yaml`:
158
+
159
+ ```bash
160
+ tei init # generates config file
161
+ ```
162
+
163
+ ## Cost
164
+
165
+ | Scenario | Cost |
166
+ |---|---|
167
+ | Agent passes all dimensions | ~$0.005 |
168
+ | One improvement cycle | ~$0.025 |
169
+ | Full 3-retry loop | ~$0.07 |
170
+
171
+ TEI shows the cost estimate before running.
172
+
173
+ ## Works With
174
+
175
+ TEI wraps any Python callable. No framework lock-in:
176
+
177
+ - **LangGraph** agents
178
+ - **CrewAI** crews
179
+ - **Custom Python** functions
180
+ - **FastAPI** endpoints
181
+ - **Any callable** that takes input and returns output
182
+
183
+ ## License
184
+
185
+ MIT
@@ -0,0 +1,151 @@
1
+ # TEI Loop
2
+
3
+ **Target, Evaluate, Improve** — a self-improving loop for agentic systems.
4
+
5
+ TEI wraps any Python agent as a black box, evaluates its output across 4 dimensions using assertion-based LLM judges, and automatically applies targeted fixes when failures are detected.
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ pip install tei-loop
11
+
12
+ # With your preferred LLM provider:
13
+ pip install 'tei-loop[openai]' # OpenAI
14
+ pip install 'tei-loop[anthropic]' # Anthropic
15
+ pip install 'tei-loop[google]' # Google Gemini
16
+ pip install 'tei-loop[all]' # All providers
17
+ ```
18
+
19
+ ## Quick Start
20
+
21
+ ```python
22
+ import asyncio
23
+ from tei_loop import TEILoop
24
+
25
+ # Your existing agent — any function that takes input and returns output
26
+ def my_agent(query: str) -> str:
27
+ # ... your agent logic ...
28
+ return result
29
+
30
+ async def main():
31
+ loop = TEILoop(agent=my_agent)
32
+
33
+ # Evaluate only (baseline measurement)
34
+ result = await loop.evaluate_only("your test query")
35
+ print(result.summary())
36
+
37
+ # Full TEI loop (evaluate + improve + retry)
38
+ result = await loop.run("your test query")
39
+ print(result.summary())
40
+
41
+ # Before/after comparison
42
+ comparison = await loop.compare("your test query")
43
+ print(f"Baseline: {comparison['baseline'].baseline_score:.2f}")
44
+ print(f"With TEI: {comparison['improved'].final_score:.2f}")
45
+
46
+ asyncio.run(main())
47
+ ```
48
+
49
+ ## CLI
50
+
51
+ ```bash
52
+ # Evaluate your agent (baseline)
53
+ tei evaluate my_agent.py --query "test input" --verbose
54
+
55
+ # Run full improvement loop
56
+ tei improve my_agent.py --query "test input" --max-retries 3
57
+
58
+ # Before/after comparison
59
+ tei compare my_agent.py --query "test input"
60
+
61
+ # Generate config file
62
+ tei init
63
+ ```
64
+
65
+ TEI CLI looks for a function named `agent`, `run`, or `main` in your Python file.
66
+
67
+ ## How It Works
68
+
69
+ ### 1. Target
70
+ Define what success looks like. TEI evaluates across 4 dimensions:
71
+
72
+ | Dimension | What it checks |
73
+ |---|---|
74
+ | **Target Alignment** | Did the agent pursue the correct objective? |
75
+ | **Reasoning Soundness** | Was the reasoning logical and non-contradictory? |
76
+ | **Execution Accuracy** | Were the right tools called with correct parameters? |
77
+ | **Output Integrity** | Is the output complete, accurate, and consistent? |
78
+
79
+ ### 2. Evaluate
80
+ TEI runs 4 LLM judges in parallel (~3-5 seconds). Each judge produces verifiable assertions, not subjective scores. Every claim is backed by evidence from the agent's output.
81
+
82
+ ### 3. Improve
83
+ When a dimension fails, TEI applies the targeted fix strategy:
84
+
85
+ | Failure | Fix Strategy |
86
+ |---|---|
87
+ | Target drift | Re-anchor to original objective |
88
+ | Flawed reasoning | Regenerate plan with failure context |
89
+ | Execution errors | Correct tool calls and parameters |
90
+ | Output issues | Repair factual errors and fill gaps |
91
+
92
+ The loop retries automatically. Each cycle is sharper because the last was diagnosed.
93
+
94
+ ## Two Modes
95
+
96
+ **Runtime mode** (default): Per-query, 1-3 retries, fixes individual failures in seconds.
97
+
98
+ **Development mode**: Across many queries, proposes permanent prompt improvements.
99
+
100
+ ```python
101
+ # Development mode
102
+ dev_results = await loop.develop(
103
+ queries=["query1", "query2", "query3", ...],
104
+ max_iterations=50,
105
+ )
106
+ print(f"Avg improvement: {dev_results['avg_improvement']:+.2f}")
107
+ ```
108
+
109
+ ## Configuration
110
+
111
+ TEI auto-detects your LLM provider from environment variables (`OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GOOGLE_API_KEY`). No new accounts or API keys needed.
112
+
113
+ ```python
114
+ loop = TEILoop(
115
+ agent=my_agent,
116
+ eval_llm="gpt-5.2", # Smartest for evaluation
117
+ improve_llm="gpt-5.2-mini", # Cost-effective for fixes
118
+ max_retries=3,
119
+ verbose=True,
120
+ )
121
+ ```
122
+
123
+ Or via `.tei.yaml`:
124
+
125
+ ```bash
126
+ tei init # generates config file
127
+ ```
128
+
129
+ ## Cost
130
+
131
+ | Scenario | Cost |
132
+ |---|---|
133
+ | Agent passes all dimensions | ~$0.005 |
134
+ | One improvement cycle | ~$0.025 |
135
+ | Full 3-retry loop | ~$0.07 |
136
+
137
+ TEI shows the cost estimate before running.
138
+
139
+ ## Works With
140
+
141
+ TEI wraps any Python callable. No framework lock-in:
142
+
143
+ - **LangGraph** agents
144
+ - **CrewAI** crews
145
+ - **Custom Python** functions
146
+ - **FastAPI** endpoints
147
+ - **Any callable** that takes input and returns output
148
+
149
+ ## License
150
+
151
+ MIT
@@ -0,0 +1,35 @@
1
+ """
2
+ Example: Using TEI with a CrewAI crew.
3
+
4
+ TEI wraps the crew's kickoff as a callable.
5
+ """
6
+ import asyncio
7
+ from tei_loop import TEILoop
8
+
9
+
10
+ # Assuming you have a CrewAI crew:
11
+ # from my_crew import my_crew
12
+ #
13
+ # def run_crew(query: str) -> str:
14
+ # result = my_crew.kickoff(inputs={"query": query})
15
+ # return str(result)
16
+ #
17
+ # async def main():
18
+ # loop = TEILoop(agent=run_crew, verbose=True)
19
+ # result = await loop.run("Research the latest AI trends")
20
+ # print(result.summary())
21
+
22
+
23
+ def mock_crewai_agent(query: str) -> str:
24
+ """Placeholder — replace with your actual CrewAI crew."""
25
+ return f"CrewAI would research: {query}"
26
+
27
+
28
+ async def main():
29
+ loop = TEILoop(agent=mock_crewai_agent, verbose=True)
30
+ result = await loop.evaluate_only("Research competitor pricing strategies")
31
+ print(result.summary())
32
+
33
+
34
+ if __name__ == "__main__":
35
+ asyncio.run(main())
@@ -0,0 +1,33 @@
1
+ """
2
+ Example: Using TEI with a custom Python agent.
3
+
4
+ This is the simplest case — your agent is a plain Python function.
5
+ """
6
+ import asyncio
7
+ from tei_loop import TEILoop
8
+
9
+
10
+ def my_summarizer(text: str) -> str:
11
+ """Your existing agent — any function that takes input and returns output."""
12
+ from openai import OpenAI
13
+ client = OpenAI()
14
+ response = client.chat.completions.create(
15
+ model="gpt-5.2-mini",
16
+ messages=[
17
+ {"role": "system", "content": "Summarize the following text concisely."},
18
+ {"role": "user", "content": text},
19
+ ],
20
+ )
21
+ return response.choices[0].message.content
22
+
23
+
24
+ async def main():
25
+ loop = TEILoop(agent=my_summarizer, verbose=True)
26
+
27
+ result = await loop.run("The quick brown fox jumped over the lazy dog. "
28
+ "It was a sunny day in the forest.")
29
+ print(result.summary())
30
+
31
+
32
+ if __name__ == "__main__":
33
+ asyncio.run(main())
@@ -0,0 +1,40 @@
1
+ """
2
+ Example: Using TEI with a LangGraph agent.
3
+
4
+ TEI wraps the compiled graph as a callable — no changes to your graph needed.
5
+ """
6
+ import asyncio
7
+ from tei_loop import TEILoop
8
+
9
+
10
+ # Assuming you have a LangGraph compiled graph:
11
+ # from my_app import compiled_graph
12
+ #
13
+ # async def main():
14
+ # loop = TEILoop(
15
+ # agent=lambda q: compiled_graph.invoke({"messages": [("user", q)]}),
16
+ # verbose=True,
17
+ # )
18
+ # result = await loop.run("What restaurants are near me?")
19
+ # print(result.summary())
20
+
21
+ # Or using the LangGraph adapter for deeper tracing:
22
+ # from tei_loop.adapters.langgraph import LangGraphAdapter
23
+ #
24
+ # adapter = LangGraphAdapter(compiled_graph)
25
+ # loop = TEILoop(agent=adapter.run, verbose=True)
26
+
27
+
28
+ def mock_langgraph_agent(query: str) -> str:
29
+ """Placeholder — replace with your actual LangGraph agent."""
30
+ return f"LangGraph would process: {query}"
31
+
32
+
33
+ async def main():
34
+ loop = TEILoop(agent=mock_langgraph_agent, verbose=True)
35
+ result = await loop.evaluate_only("Find Italian restaurants in SF for 4 people tonight")
36
+ print(result.summary())
37
+
38
+
39
+ if __name__ == "__main__":
40
+ asyncio.run(main())
@@ -0,0 +1,65 @@
1
+ [build-system]
2
+ requires = ["setuptools>=68.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "tei-loop"
7
+ version = "0.1.0"
8
+ description = "Target, Evaluate, Improve: A self-improving loop for agentic systems"
9
+ readme = "README.md"
10
+ license = {text = "MIT"}
11
+ requires-python = ">=3.9"
12
+ authors = [
13
+ {name = "Orkhan Javadli", email = "ojavadli@gmail.com"},
14
+ ]
15
+ keywords = [
16
+ "agents",
17
+ "evaluation",
18
+ "improvement",
19
+ "llm",
20
+ "agentic-systems",
21
+ "self-improving",
22
+ ]
23
+ classifiers = [
24
+ "Development Status :: 3 - Alpha",
25
+ "Intended Audience :: Developers",
26
+ "License :: OSI Approved :: MIT License",
27
+ "Programming Language :: Python :: 3",
28
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
29
+ ]
30
+ dependencies = [
31
+ "pydantic>=2.0",
32
+ ]
33
+
34
+ [project.optional-dependencies]
35
+ openai = ["openai>=1.40"]
36
+ anthropic = ["anthropic>=0.34"]
37
+ google = ["google-generativeai>=0.8"]
38
+ all = [
39
+ "openai>=1.40",
40
+ "anthropic>=0.34",
41
+ "google-generativeai>=0.8",
42
+ ]
43
+ dev = [
44
+ "pytest>=8.0",
45
+ "pytest-asyncio>=0.23",
46
+ "ruff>=0.5",
47
+ ]
48
+
49
+ [project.scripts]
50
+ tei = "tei_loop.cli:main"
51
+
52
+ [project.urls]
53
+ Homepage = "https://github.com/ojavadli/tei-loop"
54
+ Repository = "https://github.com/ojavadli/tei-loop"
55
+
56
+ [tool.setuptools.packages.find]
57
+ where = ["."]
58
+
59
+ [tool.ruff]
60
+ target-version = "py310"
61
+ line-length = 100
62
+
63
+ [tool.pytest.ini_options]
64
+ asyncio_mode = "auto"
65
+ testpaths = ["tei_loop/tests"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,46 @@
1
+ """
2
+ TEI Loop: Target -> Evaluate -> Improve
3
+
4
+ A self-improving loop for agentic systems.
5
+
6
+ Usage:
7
+ from tei_loop import TEILoop
8
+
9
+ loop = TEILoop(agent=my_agent_function)
10
+ result = await loop.run("user query")
11
+ print(result.summary())
12
+ """
13
+
14
+ from .models import (
15
+ Dimension,
16
+ TEIConfig,
17
+ TEIResult,
18
+ EvalResult,
19
+ Trace,
20
+ TraceStep,
21
+ Failure,
22
+ Fix,
23
+ RunMode,
24
+ )
25
+ from .loop import TEILoop
26
+ from .tracer import tei_trace
27
+ from .evaluator import TEIEvaluator
28
+ from .improver import TEIImprover
29
+
30
+ __version__ = "0.1.0"
31
+
32
+ __all__ = [
33
+ "TEILoop",
34
+ "TEIConfig",
35
+ "TEIResult",
36
+ "EvalResult",
37
+ "Trace",
38
+ "TraceStep",
39
+ "Dimension",
40
+ "Failure",
41
+ "Fix",
42
+ "RunMode",
43
+ "TEIEvaluator",
44
+ "TEIImprover",
45
+ "tei_trace",
46
+ ]
@@ -0,0 +1,5 @@
1
+ from .generic import GenericAdapter
2
+ from .langgraph import LangGraphAdapter
3
+ from .crewai import CrewAIAdapter
4
+
5
+ __all__ = ["GenericAdapter", "LangGraphAdapter", "CrewAIAdapter"]