tei-loop 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,35 @@
1
+ """
2
+ Example: Using TEI with a CrewAI crew.
3
+
4
+ TEI wraps the crew's kickoff as a callable.
5
+ """
6
+ import asyncio
7
+ from tei_loop import TEILoop
8
+
9
+
10
+ # Assuming you have a CrewAI crew:
11
+ # from my_crew import my_crew
12
+ #
13
+ # def run_crew(query: str) -> str:
14
+ # result = my_crew.kickoff(inputs={"query": query})
15
+ # return str(result)
16
+ #
17
+ # async def main():
18
+ # loop = TEILoop(agent=run_crew, verbose=True)
19
+ # result = await loop.run("Research the latest AI trends")
20
+ # print(result.summary())
21
+
22
+
23
+ def mock_crewai_agent(query: str) -> str:
24
+ """Placeholder — replace with your actual CrewAI crew."""
25
+ return f"CrewAI would research: {query}"
26
+
27
+
28
+ async def main():
29
+ loop = TEILoop(agent=mock_crewai_agent, verbose=True)
30
+ result = await loop.evaluate_only("Research competitor pricing strategies")
31
+ print(result.summary())
32
+
33
+
34
+ if __name__ == "__main__":
35
+ asyncio.run(main())
@@ -0,0 +1,33 @@
1
+ """
2
+ Example: Using TEI with a custom Python agent.
3
+
4
+ This is the simplest case — your agent is a plain Python function.
5
+ """
6
+ import asyncio
7
+ from tei_loop import TEILoop
8
+
9
+
10
+ def my_summarizer(text: str) -> str:
11
+ """Your existing agent — any function that takes input and returns output."""
12
+ from openai import OpenAI
13
+ client = OpenAI()
14
+ response = client.chat.completions.create(
15
+ model="gpt-5.2-mini",
16
+ messages=[
17
+ {"role": "system", "content": "Summarize the following text concisely."},
18
+ {"role": "user", "content": text},
19
+ ],
20
+ )
21
+ return response.choices[0].message.content
22
+
23
+
24
+ async def main():
25
+ loop = TEILoop(agent=my_summarizer, verbose=True)
26
+
27
+ result = await loop.run("The quick brown fox jumped over the lazy dog. "
28
+ "It was a sunny day in the forest.")
29
+ print(result.summary())
30
+
31
+
32
+ if __name__ == "__main__":
33
+ asyncio.run(main())
@@ -0,0 +1,40 @@
1
+ """
2
+ Example: Using TEI with a LangGraph agent.
3
+
4
+ TEI wraps the compiled graph as a callable — no changes to your graph needed.
5
+ """
6
+ import asyncio
7
+ from tei_loop import TEILoop
8
+
9
+
10
+ # Assuming you have a LangGraph compiled graph:
11
+ # from my_app import compiled_graph
12
+ #
13
+ # async def main():
14
+ # loop = TEILoop(
15
+ # agent=lambda q: compiled_graph.invoke({"messages": [("user", q)]}),
16
+ # verbose=True,
17
+ # )
18
+ # result = await loop.run("What restaurants are near me?")
19
+ # print(result.summary())
20
+
21
+ # Or using the LangGraph adapter for deeper tracing:
22
+ # from tei_loop.adapters.langgraph import LangGraphAdapter
23
+ #
24
+ # adapter = LangGraphAdapter(compiled_graph)
25
+ # loop = TEILoop(agent=adapter.run, verbose=True)
26
+
27
+
28
+ def mock_langgraph_agent(query: str) -> str:
29
+ """Placeholder — replace with your actual LangGraph agent."""
30
+ return f"LangGraph would process: {query}"
31
+
32
+
33
+ async def main():
34
+ loop = TEILoop(agent=mock_langgraph_agent, verbose=True)
35
+ result = await loop.evaluate_only("Find Italian restaurants in SF for 4 people tonight")
36
+ print(result.summary())
37
+
38
+
39
+ if __name__ == "__main__":
40
+ asyncio.run(main())
tei_loop/__init__.py ADDED
@@ -0,0 +1,46 @@
1
+ """
2
+ TEI Loop: Target -> Evaluate -> Improve
3
+
4
+ A self-improving loop for agentic systems.
5
+
6
+ Usage:
7
+ from tei_loop import TEILoop
8
+
9
+ loop = TEILoop(agent=my_agent_function)
10
+ result = await loop.run("user query")
11
+ print(result.summary())
12
+ """
13
+
14
+ from .models import (
15
+ Dimension,
16
+ TEIConfig,
17
+ TEIResult,
18
+ EvalResult,
19
+ Trace,
20
+ TraceStep,
21
+ Failure,
22
+ Fix,
23
+ RunMode,
24
+ )
25
+ from .loop import TEILoop
26
+ from .tracer import tei_trace
27
+ from .evaluator import TEIEvaluator
28
+ from .improver import TEIImprover
29
+
30
+ __version__ = "0.1.0"
31
+
32
+ __all__ = [
33
+ "TEILoop",
34
+ "TEIConfig",
35
+ "TEIResult",
36
+ "EvalResult",
37
+ "Trace",
38
+ "TraceStep",
39
+ "Dimension",
40
+ "Failure",
41
+ "Fix",
42
+ "RunMode",
43
+ "TEIEvaluator",
44
+ "TEIImprover",
45
+ "tei_trace",
46
+ ]
@@ -0,0 +1,5 @@
1
+ from .generic import GenericAdapter
2
+ from .langgraph import LangGraphAdapter
3
+ from .crewai import CrewAIAdapter
4
+
5
+ __all__ = ["GenericAdapter", "LangGraphAdapter", "CrewAIAdapter"]
@@ -0,0 +1,237 @@
1
+ """
2
+ CrewAI Adapter — deep task-level tracing.
3
+
4
+ Hooks into CrewAI's Crew execution to capture each task as a separate
5
+ TraceStep, giving TEI per-task visibility for diagnosis.
6
+
7
+ Works with CrewAI >=0.60. If CrewAI is not installed, the adapter
8
+ still works via generic kickoff wrapping.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import asyncio
14
+ import inspect
15
+ import time
16
+ from typing import Any, Callable, Optional
17
+
18
+ from ..tracer import TEITracer, _safe_serialize
19
+ from ..models import Trace, TraceStep
20
+
21
+
22
+ class CrewAIAdapter:
23
+ """Wraps a CrewAI Crew for TEI with per-task tracing."""
24
+
25
+ def __init__(self, crew: Any, name: str = "crewai_agent"):
26
+ self.crew = crew
27
+ self.name = name
28
+ self._tracer = TEITracer()
29
+
30
+ async def run(self, query: Any, context: Optional[dict[str, Any]] = None) -> Trace:
31
+ self._tracer.start()
32
+ overall_start = time.time()
33
+
34
+ inputs = {"query": str(query)}
35
+ if context:
36
+ inputs.update(context)
37
+
38
+ output = None
39
+ error_msg = None
40
+
41
+ try:
42
+ self._trace_crew_config()
43
+
44
+ if self._has_task_callbacks():
45
+ output = await self._run_with_callbacks(inputs)
46
+ elif hasattr(self.crew, "kickoff_async"):
47
+ output = await self._run_async(inputs)
48
+ else:
49
+ output = await self._run_sync(inputs)
50
+ except Exception as e:
51
+ error_msg = f"{type(e).__name__}: {e}"
52
+
53
+ overall_ms = (time.time() - overall_start) * 1000
54
+ self._tracer.add_step(
55
+ name="crewai_total",
56
+ step_type="orchestrator",
57
+ input_data=query,
58
+ output_data=output,
59
+ duration_ms=overall_ms,
60
+ error=error_msg,
61
+ metadata={
62
+ "adapter": "crewai",
63
+ "task_count": self._get_task_count(),
64
+ "agent_count": self._get_agent_count(),
65
+ },
66
+ )
67
+
68
+ return self._tracer.finish(agent_input=query, agent_output=output)
69
+
70
+ def _trace_crew_config(self) -> None:
71
+ """Capture crew configuration as a trace step for diagnostic context."""
72
+ agents_info = []
73
+ if hasattr(self.crew, "agents"):
74
+ for ag in self.crew.agents:
75
+ agents_info.append({
76
+ "role": getattr(ag, "role", "unknown"),
77
+ "goal": str(getattr(ag, "goal", ""))[:200],
78
+ "backstory": str(getattr(ag, "backstory", ""))[:200],
79
+ "tools": [
80
+ getattr(t, "name", str(t))
81
+ for t in getattr(ag, "tools", [])
82
+ ],
83
+ })
84
+
85
+ tasks_info = []
86
+ if hasattr(self.crew, "tasks"):
87
+ for task in self.crew.tasks:
88
+ tasks_info.append({
89
+ "description": str(getattr(task, "description", ""))[:200],
90
+ "expected_output": str(getattr(task, "expected_output", ""))[:200],
91
+ "agent_role": getattr(
92
+ getattr(task, "agent", None), "role", "unassigned"
93
+ ),
94
+ })
95
+
96
+ self._tracer.add_step(
97
+ name="crew_config",
98
+ step_type="crewai_config",
99
+ metadata={
100
+ "agents": agents_info,
101
+ "tasks": tasks_info,
102
+ "process": str(getattr(self.crew, "process", "sequential")),
103
+ },
104
+ )
105
+
106
+ def _has_task_callbacks(self) -> bool:
107
+ """Check if crew tasks support callback injection."""
108
+ if not hasattr(self.crew, "tasks"):
109
+ return False
110
+ tasks = self.crew.tasks
111
+ if not tasks:
112
+ return False
113
+ return hasattr(tasks[0], "callback")
114
+
115
+ async def _run_with_callbacks(self, inputs: dict[str, Any]) -> Any:
116
+ """Inject per-task callbacks to capture task-level output."""
117
+ task_results: list[dict[str, Any]] = []
118
+
119
+ original_callbacks = []
120
+ for i, task in enumerate(self.crew.tasks):
121
+ original_callbacks.append(getattr(task, "callback", None))
122
+
123
+ task_name = getattr(
124
+ getattr(task, "agent", None), "role", f"task_{i}"
125
+ )
126
+ task_desc = str(getattr(task, "description", ""))[:200]
127
+
128
+ def make_callback(idx: int, name: str, desc: str):
129
+ task_start = time.time()
130
+
131
+ def cb(output: Any) -> None:
132
+ task_ms = (time.time() - task_start) * 1000
133
+ raw_output = getattr(output, "raw", str(output))
134
+
135
+ self._tracer.add_step(
136
+ name=name,
137
+ step_type="crewai_task",
138
+ input_data=desc,
139
+ output_data=raw_output,
140
+ duration_ms=task_ms,
141
+ metadata={
142
+ "task_index": idx,
143
+ "agent_role": name,
144
+ },
145
+ )
146
+ task_results.append({
147
+ "task_index": idx,
148
+ "agent": name,
149
+ "output_preview": str(raw_output)[:300],
150
+ })
151
+
152
+ return cb
153
+
154
+ task.callback = make_callback(i, task_name, task_desc)
155
+
156
+ try:
157
+ if hasattr(self.crew, "kickoff_async"):
158
+ result = await self.crew.kickoff_async(inputs=inputs)
159
+ else:
160
+ result = await asyncio.to_thread(
161
+ self.crew.kickoff, inputs=inputs
162
+ )
163
+ finally:
164
+ for i, task in enumerate(self.crew.tasks):
165
+ task.callback = original_callbacks[i]
166
+
167
+ return self._extract_output(result)
168
+
169
+ async def _run_async(self, inputs: dict[str, Any]) -> Any:
170
+ """Fallback: async kickoff without per-task callbacks."""
171
+ task_start = time.time()
172
+ result = await self.crew.kickoff_async(inputs=inputs)
173
+ task_ms = (time.time() - task_start) * 1000
174
+
175
+ self._tracer.add_step(
176
+ name="kickoff_async",
177
+ step_type="crewai_kickoff",
178
+ input_data=inputs,
179
+ output_data=self._extract_output(result),
180
+ duration_ms=task_ms,
181
+ )
182
+
183
+ self._trace_task_outputs(result)
184
+ return self._extract_output(result)
185
+
186
+ async def _run_sync(self, inputs: dict[str, Any]) -> Any:
187
+ """Fallback: sync kickoff wrapped in thread."""
188
+ task_start = time.time()
189
+ result = await asyncio.to_thread(self.crew.kickoff, inputs=inputs)
190
+ task_ms = (time.time() - task_start) * 1000
191
+
192
+ self._tracer.add_step(
193
+ name="kickoff",
194
+ step_type="crewai_kickoff",
195
+ input_data=inputs,
196
+ output_data=self._extract_output(result),
197
+ duration_ms=task_ms,
198
+ )
199
+
200
+ self._trace_task_outputs(result)
201
+ return self._extract_output(result)
202
+
203
+ def _trace_task_outputs(self, crew_output: Any) -> None:
204
+ """Extract per-task outputs from CrewOutput (post-hoc tracing)."""
205
+ tasks_output = getattr(crew_output, "tasks_output", None)
206
+ if not tasks_output:
207
+ return
208
+
209
+ for i, task_out in enumerate(tasks_output):
210
+ agent_name = getattr(task_out, "agent", f"task_{i}")
211
+ raw = getattr(task_out, "raw", str(task_out))
212
+
213
+ self._tracer.add_step(
214
+ name=str(agent_name),
215
+ step_type="crewai_task_output",
216
+ output_data=raw,
217
+ metadata={
218
+ "task_index": i,
219
+ "description": str(
220
+ getattr(task_out, "description", "")
221
+ )[:200],
222
+ },
223
+ )
224
+
225
+ def _extract_output(self, result: Any) -> Any:
226
+ if hasattr(result, "raw"):
227
+ return result.raw
228
+ return str(result)
229
+
230
+ def _get_task_count(self) -> int:
231
+ return len(getattr(self.crew, "tasks", []))
232
+
233
+ def _get_agent_count(self) -> int:
234
+ return len(getattr(self.crew, "agents", []))
235
+
236
+ def __repr__(self) -> str:
237
+ return f"CrewAIAdapter(name={self.name!r})"
@@ -0,0 +1,41 @@
1
+ """
2
+ Generic Python Adapter.
3
+
4
+ Wraps any Python callable (sync or async) as a TEI-compatible agent.
5
+ This is the primary adapter and covers 100% of agents since every
6
+ framework (LangGraph, CrewAI, custom) produces a Python callable.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import inspect
12
+ from typing import Any, Callable, Optional
13
+
14
+ from ..tracer import TEITracer, run_and_trace
15
+ from ..models import Trace
16
+
17
+
18
+ class GenericAdapter:
19
+ """Wraps any Python callable as a TEI agent."""
20
+
21
+ def __init__(
22
+ self,
23
+ agent_fn: Callable,
24
+ name: str = "agent",
25
+ description: str = "",
26
+ ):
27
+ self.agent_fn = agent_fn
28
+ self.name = name or getattr(agent_fn, "__name__", "agent")
29
+ self.description = description
30
+ self._tracer = TEITracer()
31
+
32
+ async def run(self, query: Any, context: Optional[dict[str, Any]] = None) -> Trace:
33
+ return await run_and_trace(
34
+ self.agent_fn,
35
+ query,
36
+ tracer=self._tracer,
37
+ context=context,
38
+ )
39
+
40
+ def __repr__(self) -> str:
41
+ return f"GenericAdapter(name={self.name!r})"
@@ -0,0 +1,159 @@
1
+ """
2
+ LangGraph Adapter — deep node-level tracing.
3
+
4
+ Hooks into LangGraph's compiled graph to capture each node execution
5
+ as a separate TraceStep, giving TEI per-node visibility for diagnosis.
6
+
7
+ Works with LangGraph >=0.2 compiled StateGraphs.
8
+ If LangGraph is not installed, falls back to generic black-box tracing.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import asyncio
14
+ import inspect
15
+ import time
16
+ from typing import Any, Callable, Optional
17
+
18
+ from ..tracer import TEITracer, _safe_serialize
19
+ from ..models import Trace, TraceStep
20
+
21
+
22
+ class LangGraphAdapter:
23
+ """Wraps a LangGraph compiled graph for TEI with per-node tracing."""
24
+
25
+ def __init__(self, graph: Any, name: str = "langgraph_agent"):
26
+ self.graph = graph
27
+ self.name = name
28
+ self._tracer = TEITracer()
29
+
30
+ async def run(self, query: Any, context: Optional[dict[str, Any]] = None) -> Trace:
31
+ self._tracer.start()
32
+ overall_start = time.time()
33
+
34
+ input_payload = self._build_input(query)
35
+ output = None
36
+ error_msg = None
37
+
38
+ try:
39
+ if self._has_stream_events():
40
+ output = await self._run_with_stream(input_payload)
41
+ elif hasattr(self.graph, "ainvoke"):
42
+ output = await self._run_async(input_payload)
43
+ else:
44
+ output = await self._run_sync(input_payload)
45
+ except Exception as e:
46
+ error_msg = f"{type(e).__name__}: {e}"
47
+
48
+ overall_ms = (time.time() - overall_start) * 1000
49
+ self._tracer.add_step(
50
+ name="langgraph_total",
51
+ step_type="orchestrator",
52
+ input_data=query,
53
+ output_data=output,
54
+ duration_ms=overall_ms,
55
+ error=error_msg,
56
+ metadata={"adapter": "langgraph", "node_count": len(self._tracer._steps)},
57
+ )
58
+
59
+ return self._tracer.finish(agent_input=query, agent_output=output)
60
+
61
+ def _build_input(self, query: Any) -> dict[str, Any]:
62
+ if isinstance(query, dict):
63
+ return query
64
+ return {"messages": [("user", str(query))]}
65
+
66
+ def _has_stream_events(self) -> bool:
67
+ return hasattr(self.graph, "astream_events")
68
+
69
+ async def _run_with_stream(self, input_payload: dict[str, Any]) -> Any:
70
+ """Use LangGraph's astream_events to capture per-node execution."""
71
+ final_output = None
72
+ current_node: Optional[str] = None
73
+ node_start: float = 0.0
74
+ node_input: Any = None
75
+
76
+ async for event in self.graph.astream_events(input_payload, version="v2"):
77
+ kind = event.get("event", "")
78
+ name = event.get("name", "")
79
+ data = event.get("data", {})
80
+
81
+ if kind == "on_chain_start" and name != self.name:
82
+ current_node = name
83
+ node_start = time.time()
84
+ node_input = data.get("input")
85
+
86
+ elif kind == "on_chain_end" and name == current_node:
87
+ node_output = data.get("output")
88
+ node_ms = (time.time() - node_start) * 1000
89
+
90
+ self._tracer.add_step(
91
+ name=current_node,
92
+ step_type="langgraph_node",
93
+ input_data=node_input,
94
+ output_data=node_output,
95
+ duration_ms=node_ms,
96
+ metadata={"event_kind": kind},
97
+ )
98
+ final_output = node_output
99
+ current_node = None
100
+
101
+ elif kind == "on_chain_error" and name == current_node:
102
+ node_ms = (time.time() - node_start) * 1000
103
+ self._tracer.add_step(
104
+ name=current_node,
105
+ step_type="langgraph_node",
106
+ input_data=node_input,
107
+ duration_ms=node_ms,
108
+ error=str(data.get("error", "unknown")),
109
+ )
110
+ current_node = None
111
+
112
+ elif kind == "on_tool_start":
113
+ self._tracer.add_step(
114
+ name=name,
115
+ step_type="tool_call",
116
+ input_data=data.get("input"),
117
+ metadata={"tool_name": name},
118
+ )
119
+
120
+ elif kind == "on_tool_end":
121
+ if self._tracer._steps and self._tracer._steps[-1].name == name:
122
+ self._tracer._steps[-1].output_data = _safe_serialize(
123
+ data.get("output")
124
+ )
125
+
126
+ return final_output
127
+
128
+ async def _run_async(self, input_payload: dict[str, Any]) -> Any:
129
+ """Fallback: ainvoke without streaming (less granular)."""
130
+ node_start = time.time()
131
+ result = await self.graph.ainvoke(input_payload)
132
+ node_ms = (time.time() - node_start) * 1000
133
+
134
+ self._tracer.add_step(
135
+ name="ainvoke",
136
+ step_type="langgraph_invoke",
137
+ input_data=input_payload,
138
+ output_data=result,
139
+ duration_ms=node_ms,
140
+ )
141
+ return result
142
+
143
+ async def _run_sync(self, input_payload: dict[str, Any]) -> Any:
144
+ """Fallback: sync invoke wrapped in thread."""
145
+ node_start = time.time()
146
+ result = await asyncio.to_thread(self.graph.invoke, input_payload)
147
+ node_ms = (time.time() - node_start) * 1000
148
+
149
+ self._tracer.add_step(
150
+ name="invoke",
151
+ step_type="langgraph_invoke",
152
+ input_data=input_payload,
153
+ output_data=result,
154
+ duration_ms=node_ms,
155
+ )
156
+ return result
157
+
158
+ def __repr__(self) -> str:
159
+ return f"LangGraphAdapter(name={self.name!r})"