agent-lighthouse 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_lighthouse-0.3.0/PKG-INFO +150 -0
- agent_lighthouse-0.3.0/README.md +118 -0
- agent_lighthouse-0.3.0/agent_lighthouse/__init__.py +50 -0
- agent_lighthouse-0.3.0/agent_lighthouse/adapters/__init__.py +13 -0
- agent_lighthouse-0.3.0/agent_lighthouse/adapters/autogen.py +113 -0
- agent_lighthouse-0.3.0/agent_lighthouse/adapters/crewai.py +179 -0
- agent_lighthouse-0.3.0/agent_lighthouse/adapters/langchain.py +255 -0
- agent_lighthouse-0.3.0/agent_lighthouse/auto.py +543 -0
- agent_lighthouse-0.3.0/agent_lighthouse/client.py +459 -0
- agent_lighthouse-0.3.0/agent_lighthouse/pricing.py +94 -0
- agent_lighthouse-0.3.0/agent_lighthouse/serialization.py +61 -0
- agent_lighthouse-0.3.0/agent_lighthouse/tracer.py +686 -0
- agent_lighthouse-0.3.0/agent_lighthouse.egg-info/PKG-INFO +150 -0
- agent_lighthouse-0.3.0/agent_lighthouse.egg-info/SOURCES.txt +19 -0
- agent_lighthouse-0.3.0/agent_lighthouse.egg-info/dependency_links.txt +1 -0
- agent_lighthouse-0.3.0/agent_lighthouse.egg-info/requires.txt +14 -0
- agent_lighthouse-0.3.0/agent_lighthouse.egg-info/top_level.txt +1 -0
- agent_lighthouse-0.3.0/pyproject.toml +45 -0
- agent_lighthouse-0.3.0/setup.cfg +4 -0
- agent_lighthouse-0.3.0/tests/test_auto_instrumentation.py +227 -0
- agent_lighthouse-0.3.0/tests/test_tracer_context.py +108 -0
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: agent-lighthouse
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: Multi-Agent Observability SDK for Agent Lighthouse
|
|
5
|
+
Author-email: Agent Lighthouse Contributors <maintainers@agent-lighthouse.dev>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/noogler-aditya/Agent-Lighthouse
|
|
8
|
+
Project-URL: Documentation, https://github.com/noogler-aditya/Agent-Lighthouse#readme
|
|
9
|
+
Project-URL: Repository, https://github.com/noogler-aditya/Agent-Lighthouse
|
|
10
|
+
Keywords: agents,multi-agent,observability,tracing,debugging
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Requires-Python: >=3.9
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
Requires-Dist: httpx>=0.25.0
|
|
22
|
+
Provides-Extra: crewai
|
|
23
|
+
Requires-Dist: crewai>=0.1.0; extra == "crewai"
|
|
24
|
+
Provides-Extra: langgraph
|
|
25
|
+
Requires-Dist: langgraph>=0.0.1; extra == "langgraph"
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest>=8.3.0; extra == "dev"
|
|
28
|
+
Requires-Dist: pytest-asyncio>=0.24.0; extra == "dev"
|
|
29
|
+
Requires-Dist: pytest-cov>=5.0.0; extra == "dev"
|
|
30
|
+
Requires-Dist: ruff>=0.8.0; extra == "dev"
|
|
31
|
+
Requires-Dist: bandit>=1.8.0; extra == "dev"
|
|
32
|
+
|
|
33
|
+
# Agent Lighthouse SDK (Python)
|
|
34
|
+
|
|
35
|
+
The official Python client for instrumenting AI agents with Agent Lighthouse.
|
|
36
|
+
|
|
37
|
+
## Features
|
|
38
|
+
|
|
39
|
+
- **Automatic Tracing**: Decorators for agents, tools, and LLM calls.
|
|
40
|
+
- **Async Support**: Fully compatible with async/await workflows.
|
|
41
|
+
- **State Management**: Expose internal agent state (memory, context) for real-time inspection.
|
|
42
|
+
- **Token Tracking**: Automatically capture token usage and costs from LLM responses.
|
|
43
|
+
|
|
44
|
+
## Installation
|
|
45
|
+
|
|
46
|
+
Install from PyPI:
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
pip install agent-lighthouse
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
Or install from source in development mode:
|
|
53
|
+
|
|
54
|
+
```bash
|
|
55
|
+
cd sdk
|
|
56
|
+
pip install -e .
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
## Quick Start
|
|
60
|
+
|
|
61
|
+
### 1. Initialize Tracer
|
|
62
|
+
|
|
63
|
+
```python
|
|
64
|
+
from agent_lighthouse import LighthouseTracer
|
|
65
|
+
|
|
66
|
+
# Use your API Key (starts with lh_)
|
|
67
|
+
tracer = LighthouseTracer(api_key="lh_...")
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
### 2. Add Decorators
|
|
71
|
+
|
|
72
|
+
Wrap your functions with `@trace_agent`, `@trace_tool`, or `@trace_llm`.
|
|
73
|
+
|
|
74
|
+
```python
|
|
75
|
+
from agent_lighthouse import trace_agent, trace_tool, trace_llm
|
|
76
|
+
|
|
77
|
+
@trace_tool("Web Search")
|
|
78
|
+
def search_web(query):
|
|
79
|
+
# ... logic ...
|
|
80
|
+
return results
|
|
81
|
+
|
|
82
|
+
@trace_llm("GPT-4", model="gpt-4-turbo", cost_per_1k_prompt=0.01)
|
|
83
|
+
def call_llm(prompt):
|
|
84
|
+
# ... call OpenAI ...
|
|
85
|
+
return response
|
|
86
|
+
|
|
87
|
+
@trace_agent("Researcher")
|
|
88
|
+
def run_research_agent(topic):
|
|
89
|
+
data = search_web(topic)
|
|
90
|
+
summary = call_llm(f"Summarize {data}")
|
|
91
|
+
return summary
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
### 3. Run It
|
|
95
|
+
|
|
96
|
+
Just run your script as normal. The SDK will automatically send traces to the backend.
|
|
97
|
+
|
|
98
|
+
## State Inspection
|
|
99
|
+
|
|
100
|
+
Allow humans to inspect and modify agent state during execution:
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
from agent_lighthouse import get_tracer
|
|
104
|
+
|
|
105
|
+
@trace_agent("Writer")
|
|
106
|
+
def writer_agent():
|
|
107
|
+
tracer = get_tracer()
|
|
108
|
+
|
|
109
|
+
# Expose state
|
|
110
|
+
tracer.update_state(
|
|
111
|
+
memory={"draft": "Initial draft..."},
|
|
112
|
+
context={"tone": "Professional"}
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# ... execution continues ...
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
## Zero-Touch Auto-Instrumentation (Magic Import)
|
|
119
|
+
|
|
120
|
+
No code changes to your LLM calls. Just import once at the top of your script:
|
|
121
|
+
|
|
122
|
+
```python
|
|
123
|
+
import agent_lighthouse.auto # auto-instruments OpenAI, Anthropic, requests, and frameworks
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
This automatically captures:
|
|
127
|
+
- LLM latency
|
|
128
|
+
- Token usage
|
|
129
|
+
- Cost (best-effort pricing)
|
|
130
|
+
|
|
131
|
+
Content capture is **off by default**. Enable if you explicitly want payloads:
|
|
132
|
+
|
|
133
|
+
```bash
|
|
134
|
+
export LIGHTHOUSE_CAPTURE_CONTENT=true
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
## Configuration
|
|
138
|
+
|
|
139
|
+
You can configure the SDK via environment variables:
|
|
140
|
+
|
|
141
|
+
| Variable | Description | Default |
|
|
142
|
+
|----------|-------------|---------|
|
|
143
|
+
| `LIGHTHOUSE_API_KEY` | Your machine API key | `None` |
|
|
144
|
+
| `LIGHTHOUSE_BASE_URL` | URL of the backend API | `http://localhost:8000` |
|
|
145
|
+
| `LIGHTHOUSE_AUTO_INSTRUMENT` | Enable auto-instrumentation | `1` |
|
|
146
|
+
| `LIGHTHOUSE_CAPTURE_CONTENT` | Capture request/response payloads | `false` |
|
|
147
|
+
| `LIGHTHOUSE_LLM_HOSTS` | Allowlist extra LLM hosts for requests instrumentation | `""` |
|
|
148
|
+
| `LIGHTHOUSE_PRICING_JSON` | Pricing override JSON string | `""` |
|
|
149
|
+
| `LIGHTHOUSE_PRICING_PATH` | Pricing override JSON file path | `""` |
|
|
150
|
+
| `LIGHTHOUSE_DISABLE_FRAMEWORKS` | Disable framework adapters (csv) | `""` |
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
# Agent Lighthouse SDK (Python)
|
|
2
|
+
|
|
3
|
+
The official Python client for instrumenting AI agents with Agent Lighthouse.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Automatic Tracing**: Decorators for agents, tools, and LLM calls.
|
|
8
|
+
- **Async Support**: Fully compatible with async/await workflows.
|
|
9
|
+
- **State Management**: Expose internal agent state (memory, context) for real-time inspection.
|
|
10
|
+
- **Token Tracking**: Automatically capture token usage and costs from LLM responses.
|
|
11
|
+
|
|
12
|
+
## Installation
|
|
13
|
+
|
|
14
|
+
Install from PyPI:
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
pip install agent-lighthouse
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
Or install from source in development mode:
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
cd sdk
|
|
24
|
+
pip install -e .
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Quick Start
|
|
28
|
+
|
|
29
|
+
### 1. Initialize Tracer
|
|
30
|
+
|
|
31
|
+
```python
|
|
32
|
+
from agent_lighthouse import LighthouseTracer
|
|
33
|
+
|
|
34
|
+
# Use your API Key (starts with lh_)
|
|
35
|
+
tracer = LighthouseTracer(api_key="lh_...")
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### 2. Add Decorators
|
|
39
|
+
|
|
40
|
+
Wrap your functions with `@trace_agent`, `@trace_tool`, or `@trace_llm`.
|
|
41
|
+
|
|
42
|
+
```python
|
|
43
|
+
from agent_lighthouse import trace_agent, trace_tool, trace_llm
|
|
44
|
+
|
|
45
|
+
@trace_tool("Web Search")
|
|
46
|
+
def search_web(query):
|
|
47
|
+
# ... logic ...
|
|
48
|
+
return results
|
|
49
|
+
|
|
50
|
+
@trace_llm("GPT-4", model="gpt-4-turbo", cost_per_1k_prompt=0.01)
|
|
51
|
+
def call_llm(prompt):
|
|
52
|
+
# ... call OpenAI ...
|
|
53
|
+
return response
|
|
54
|
+
|
|
55
|
+
@trace_agent("Researcher")
|
|
56
|
+
def run_research_agent(topic):
|
|
57
|
+
data = search_web(topic)
|
|
58
|
+
summary = call_llm(f"Summarize {data}")
|
|
59
|
+
return summary
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### 3. Run It
|
|
63
|
+
|
|
64
|
+
Just run your script as normal. The SDK will automatically send traces to the backend.
|
|
65
|
+
|
|
66
|
+
## State Inspection
|
|
67
|
+
|
|
68
|
+
Allow humans to inspect and modify agent state during execution:
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
from agent_lighthouse import get_tracer
|
|
72
|
+
|
|
73
|
+
@trace_agent("Writer")
|
|
74
|
+
def writer_agent():
|
|
75
|
+
tracer = get_tracer()
|
|
76
|
+
|
|
77
|
+
# Expose state
|
|
78
|
+
tracer.update_state(
|
|
79
|
+
memory={"draft": "Initial draft..."},
|
|
80
|
+
context={"tone": "Professional"}
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# ... execution continues ...
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Zero-Touch Auto-Instrumentation (Magic Import)
|
|
87
|
+
|
|
88
|
+
No code changes to your LLM calls. Just import once at the top of your script:
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
import agent_lighthouse.auto # auto-instruments OpenAI, Anthropic, requests, and frameworks
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
This automatically captures:
|
|
95
|
+
- LLM latency
|
|
96
|
+
- Token usage
|
|
97
|
+
- Cost (best-effort pricing)
|
|
98
|
+
|
|
99
|
+
Content capture is **off by default**. Enable if you explicitly want payloads:
|
|
100
|
+
|
|
101
|
+
```bash
|
|
102
|
+
export LIGHTHOUSE_CAPTURE_CONTENT=true
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
## Configuration
|
|
106
|
+
|
|
107
|
+
You can configure the SDK via environment variables:
|
|
108
|
+
|
|
109
|
+
| Variable | Description | Default |
|
|
110
|
+
|----------|-------------|---------|
|
|
111
|
+
| `LIGHTHOUSE_API_KEY` | Your machine API key | `None` |
|
|
112
|
+
| `LIGHTHOUSE_BASE_URL` | URL of the backend API | `http://localhost:8000` |
|
|
113
|
+
| `LIGHTHOUSE_AUTO_INSTRUMENT` | Enable auto-instrumentation | `1` |
|
|
114
|
+
| `LIGHTHOUSE_CAPTURE_CONTENT` | Capture request/response payloads | `false` |
|
|
115
|
+
| `LIGHTHOUSE_LLM_HOSTS` | Allowlist extra LLM hosts for requests instrumentation | `""` |
|
|
116
|
+
| `LIGHTHOUSE_PRICING_JSON` | Pricing override JSON string | `""` |
|
|
117
|
+
| `LIGHTHOUSE_PRICING_PATH` | Pricing override JSON file path | `""` |
|
|
118
|
+
| `LIGHTHOUSE_DISABLE_FRAMEWORKS` | Disable framework adapters (csv) | `""` |
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent Lighthouse SDK
|
|
3
|
+
Multi-Agent Observability for AI Systems
|
|
4
|
+
|
|
5
|
+
Features:
|
|
6
|
+
- Framework-agnostic tracing for any multi-agent system
|
|
7
|
+
- Sync + async support
|
|
8
|
+
- Thread-safe span tracking
|
|
9
|
+
- Fail-silent mode (never crashes host application)
|
|
10
|
+
- Automatic output capture
|
|
11
|
+
- OpenAI-style token extraction
|
|
12
|
+
"""
|
|
13
|
+
from .tracer import (
|
|
14
|
+
LighthouseTracer,
|
|
15
|
+
get_tracer,
|
|
16
|
+
reset_global_tracer,
|
|
17
|
+
trace_agent,
|
|
18
|
+
trace_tool,
|
|
19
|
+
trace_llm,
|
|
20
|
+
)
|
|
21
|
+
from .client import LighthouseClient
|
|
22
|
+
|
|
23
|
+
__version__ = "0.3.0"
|
|
24
|
+
__all__ = [
|
|
25
|
+
"LighthouseTracer",
|
|
26
|
+
"LighthouseClient",
|
|
27
|
+
"get_tracer",
|
|
28
|
+
"reset_global_tracer",
|
|
29
|
+
"trace_agent",
|
|
30
|
+
"trace_tool",
|
|
31
|
+
"trace_llm",
|
|
32
|
+
"instrument",
|
|
33
|
+
"uninstrument",
|
|
34
|
+
"is_instrumented",
|
|
35
|
+
]
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def instrument() -> bool:
|
|
39
|
+
from .auto import instrument as _instrument
|
|
40
|
+
return _instrument()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def uninstrument() -> None:
|
|
44
|
+
from .auto import uninstrument as _uninstrument
|
|
45
|
+
return _uninstrument()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def is_instrumented() -> bool:
|
|
49
|
+
from .auto import is_instrumented as _is_instrumented
|
|
50
|
+
return _is_instrumented()
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Framework adapters for zero-touch auto-instrumentation.
|
|
3
|
+
"""
|
|
4
|
+
from .langchain import LighthouseLangChainCallbackHandler, register_langchain_callbacks
|
|
5
|
+
from .crewai import register_crewai_hooks
|
|
6
|
+
from .autogen import register_autogen_logging
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"LighthouseLangChainCallbackHandler",
|
|
10
|
+
"register_langchain_callbacks",
|
|
11
|
+
"register_crewai_hooks",
|
|
12
|
+
"register_autogen_logging",
|
|
13
|
+
]
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AutoGen adapter via logging handler.
|
|
3
|
+
Best-effort parsing of structured log records.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import time
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
from ..tracer import get_tracer
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger("agent_lighthouse.adapters.autogen")
|
|
14
|
+
|
|
15
|
+
_REGISTERED = False
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class _AutoGenLogHandler(logging.Handler):
|
|
19
|
+
def __init__(self):
|
|
20
|
+
super().__init__()
|
|
21
|
+
self._spans: dict[str, tuple[str, float]] = {}
|
|
22
|
+
self._traces: dict[str, str] = {}
|
|
23
|
+
|
|
24
|
+
def _ensure_trace(self, event_id: str, name: str) -> Optional[str]:
|
|
25
|
+
tracer = get_tracer()
|
|
26
|
+
trace_id = tracer.trace_id
|
|
27
|
+
if trace_id:
|
|
28
|
+
return trace_id
|
|
29
|
+
existing = self._traces.get(event_id)
|
|
30
|
+
if existing:
|
|
31
|
+
return existing
|
|
32
|
+
trace = tracer.client.create_trace(
|
|
33
|
+
name=name,
|
|
34
|
+
framework="autogen",
|
|
35
|
+
metadata={"event_id": event_id},
|
|
36
|
+
)
|
|
37
|
+
trace_id = trace.get("trace_id")
|
|
38
|
+
if trace_id:
|
|
39
|
+
self._traces[event_id] = trace_id
|
|
40
|
+
return trace_id
|
|
41
|
+
|
|
42
|
+
def emit(self, record: logging.LogRecord) -> None:
|
|
43
|
+
try:
|
|
44
|
+
data = record.__dict__
|
|
45
|
+
event = data.get("event") or data.get("event_name")
|
|
46
|
+
if not event:
|
|
47
|
+
return
|
|
48
|
+
|
|
49
|
+
event_id = (
|
|
50
|
+
str(data.get("event_id") or data.get("run_id") or f"{record.created}-{event}")
|
|
51
|
+
)
|
|
52
|
+
tracer = get_tracer()
|
|
53
|
+
kind = data.get("kind") or "internal"
|
|
54
|
+
name = data.get("name") or data.get("agent_name") or event
|
|
55
|
+
|
|
56
|
+
if event.endswith("_start"):
|
|
57
|
+
trace_id = self._ensure_trace(event_id, f"{name} (autogen)")
|
|
58
|
+
if not trace_id:
|
|
59
|
+
return
|
|
60
|
+
span = tracer.client.create_span(
|
|
61
|
+
trace_id=trace_id,
|
|
62
|
+
name=name,
|
|
63
|
+
kind=kind,
|
|
64
|
+
parent_span_id=tracer.span_id,
|
|
65
|
+
input_data=data.get("input_data"),
|
|
66
|
+
attributes={"event": event},
|
|
67
|
+
)
|
|
68
|
+
span_id = span.get("span_id")
|
|
69
|
+
if span_id:
|
|
70
|
+
self._spans[event_id] = (span_id, time.perf_counter())
|
|
71
|
+
return
|
|
72
|
+
|
|
73
|
+
if event.endswith("_end") or event.endswith("_error"):
|
|
74
|
+
span_entry = self._spans.pop(event_id, None)
|
|
75
|
+
trace_id = tracer.trace_id or self._traces.get(event_id)
|
|
76
|
+
if not span_entry or not trace_id:
|
|
77
|
+
return
|
|
78
|
+
span_id, start = span_entry
|
|
79
|
+
duration_ms = (time.perf_counter() - start) * 1000
|
|
80
|
+
status = "error" if event.endswith("_error") else "success"
|
|
81
|
+
tracer.client.update_span(
|
|
82
|
+
trace_id=trace_id,
|
|
83
|
+
span_id=span_id,
|
|
84
|
+
status=status,
|
|
85
|
+
output_data=data.get("output_data"),
|
|
86
|
+
error_message=data.get("error_message"),
|
|
87
|
+
error_type=data.get("error_type"),
|
|
88
|
+
duration_ms=duration_ms,
|
|
89
|
+
)
|
|
90
|
+
if event_id in self._traces and tracer.trace_id is None:
|
|
91
|
+
tracer.client.complete_trace(
|
|
92
|
+
trace_id, "success" if status == "success" else "error"
|
|
93
|
+
)
|
|
94
|
+
self._traces.pop(event_id, None)
|
|
95
|
+
except Exception: # noqa: BLE001
|
|
96
|
+
logger.debug("AutoGen log handling failed", exc_info=True)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def register_autogen_logging() -> bool:
|
|
100
|
+
global _REGISTERED
|
|
101
|
+
if _REGISTERED:
|
|
102
|
+
return True
|
|
103
|
+
try:
|
|
104
|
+
import autogen # type: ignore # noqa: F401
|
|
105
|
+
except Exception: # noqa: BLE001
|
|
106
|
+
return False
|
|
107
|
+
|
|
108
|
+
handler = _AutoGenLogHandler()
|
|
109
|
+
log = logging.getLogger("autogen")
|
|
110
|
+
if handler not in log.handlers:
|
|
111
|
+
log.addHandler(handler)
|
|
112
|
+
_REGISTERED = True
|
|
113
|
+
return True
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CrewAI adapter with best-effort feature detection.
|
|
3
|
+
Fail-silent when CrewAI APIs are unavailable.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import time
|
|
9
|
+
from typing import Any, Optional
|
|
10
|
+
|
|
11
|
+
from ..tracer import get_tracer
|
|
12
|
+
from ..serialization import _capture_args, _capture_output
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger("agent_lighthouse.adapters.crewai")
|
|
15
|
+
|
|
16
|
+
_REGISTERED = False
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class _CrewAIEventHandler:
|
|
20
|
+
def __init__(self, tracer=None):
|
|
21
|
+
self.tracer = tracer or get_tracer()
|
|
22
|
+
self._run_spans: dict[str, str] = {}
|
|
23
|
+
self._run_start: dict[str, float] = {}
|
|
24
|
+
self._run_traces: dict[str, str] = {}
|
|
25
|
+
|
|
26
|
+
def _ensure_trace(self, run_id: str, name: str) -> Optional[str]:
|
|
27
|
+
trace_id = self.tracer.trace_id
|
|
28
|
+
if trace_id:
|
|
29
|
+
return trace_id
|
|
30
|
+
existing = self._run_traces.get(run_id)
|
|
31
|
+
if existing:
|
|
32
|
+
return existing
|
|
33
|
+
trace = self.tracer.client.create_trace(
|
|
34
|
+
name=name,
|
|
35
|
+
framework="crewai",
|
|
36
|
+
metadata={"run_id": run_id},
|
|
37
|
+
)
|
|
38
|
+
trace_id = trace.get("trace_id")
|
|
39
|
+
if trace_id:
|
|
40
|
+
self._run_traces[run_id] = trace_id
|
|
41
|
+
return trace_id
|
|
42
|
+
|
|
43
|
+
def _start_span(
|
|
44
|
+
self,
|
|
45
|
+
run_id: str,
|
|
46
|
+
name: str,
|
|
47
|
+
kind: str,
|
|
48
|
+
input_data: Optional[dict] = None,
|
|
49
|
+
attributes: Optional[dict] = None,
|
|
50
|
+
) -> None:
|
|
51
|
+
trace_id = self._ensure_trace(run_id, name)
|
|
52
|
+
if not trace_id:
|
|
53
|
+
return
|
|
54
|
+
parent_span_id = self.tracer.span_id
|
|
55
|
+
span = self.tracer.client.create_span(
|
|
56
|
+
trace_id=trace_id,
|
|
57
|
+
name=name,
|
|
58
|
+
kind=kind,
|
|
59
|
+
parent_span_id=parent_span_id,
|
|
60
|
+
input_data=input_data,
|
|
61
|
+
attributes=attributes or {},
|
|
62
|
+
)
|
|
63
|
+
span_id = span.get("span_id")
|
|
64
|
+
if span_id:
|
|
65
|
+
self._run_spans[run_id] = span_id
|
|
66
|
+
self._run_start[run_id] = time.perf_counter()
|
|
67
|
+
|
|
68
|
+
def _end_span(self, run_id: str, status: str, output_data: Optional[dict] = None) -> None:
|
|
69
|
+
span_id = self._run_spans.pop(run_id, None)
|
|
70
|
+
start = self._run_start.pop(run_id, None)
|
|
71
|
+
trace_id = self.tracer.trace_id or self._run_traces.get(run_id)
|
|
72
|
+
if not trace_id or not span_id:
|
|
73
|
+
return
|
|
74
|
+
duration_ms = None
|
|
75
|
+
if start is not None:
|
|
76
|
+
duration_ms = (time.perf_counter() - start) * 1000
|
|
77
|
+
self.tracer.client.update_span(
|
|
78
|
+
trace_id=trace_id,
|
|
79
|
+
span_id=span_id,
|
|
80
|
+
status=status,
|
|
81
|
+
output_data=output_data,
|
|
82
|
+
duration_ms=duration_ms,
|
|
83
|
+
)
|
|
84
|
+
if run_id in self._run_traces and self.tracer.trace_id is None:
|
|
85
|
+
self.tracer.client.complete_trace(trace_id, "success" if status == "success" else "error")
|
|
86
|
+
self._run_traces.pop(run_id, None)
|
|
87
|
+
|
|
88
|
+
# Best-effort event handlers (method names depend on CrewAI version)
|
|
89
|
+
def on_agent_start(self, agent_name: str, run_id: str, **kwargs: Any):
|
|
90
|
+
self._start_span(
|
|
91
|
+
run_id=run_id,
|
|
92
|
+
name=agent_name or "CrewAI Agent",
|
|
93
|
+
kind="agent",
|
|
94
|
+
input_data=_capture_args((), kwargs),
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
def on_agent_end(self, output: Any, run_id: str, **kwargs: Any):
|
|
98
|
+
self._end_span(run_id, "success", _capture_output(output))
|
|
99
|
+
|
|
100
|
+
def on_agent_error(self, error: Exception, run_id: str, **kwargs: Any):
|
|
101
|
+
self._end_span(run_id, "error", {"error": str(error)})
|
|
102
|
+
|
|
103
|
+
def on_tool_start(self, tool_name: str, run_id: str, **kwargs: Any):
|
|
104
|
+
self._start_span(
|
|
105
|
+
run_id=run_id,
|
|
106
|
+
name=tool_name or "CrewAI Tool",
|
|
107
|
+
kind="tool",
|
|
108
|
+
input_data=_capture_args((), kwargs),
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
def on_tool_end(self, output: Any, run_id: str, **kwargs: Any):
|
|
112
|
+
self._end_span(run_id, "success", _capture_output(output))
|
|
113
|
+
|
|
114
|
+
def on_tool_error(self, error: Exception, run_id: str, **kwargs: Any):
|
|
115
|
+
self._end_span(run_id, "error", {"error": str(error)})
|
|
116
|
+
|
|
117
|
+
def on_task_start(self, task_name: str, run_id: str, **kwargs: Any):
|
|
118
|
+
self._start_span(
|
|
119
|
+
run_id=run_id,
|
|
120
|
+
name=task_name or "CrewAI Task",
|
|
121
|
+
kind="chain",
|
|
122
|
+
input_data=_capture_args((), kwargs),
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
def on_task_end(self, output: Any, run_id: str, **kwargs: Any):
|
|
126
|
+
self._end_span(run_id, "success", _capture_output(output))
|
|
127
|
+
|
|
128
|
+
def on_task_error(self, error: Exception, run_id: str, **kwargs: Any):
|
|
129
|
+
self._end_span(run_id, "error", {"error": str(error)})
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def register_crewai_hooks() -> bool:
|
|
133
|
+
"""
|
|
134
|
+
Best-effort registration using available CrewAI telemetry/callback APIs.
|
|
135
|
+
"""
|
|
136
|
+
global _REGISTERED
|
|
137
|
+
if _REGISTERED:
|
|
138
|
+
return True
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
import crewai # type: ignore
|
|
142
|
+
except Exception: # noqa: BLE001
|
|
143
|
+
return False
|
|
144
|
+
|
|
145
|
+
handler = _CrewAIEventHandler()
|
|
146
|
+
|
|
147
|
+
# Telemetry hooks (if available)
|
|
148
|
+
telemetry = getattr(crewai, "telemetry", None)
|
|
149
|
+
if telemetry is not None:
|
|
150
|
+
for attr in ("register_handler", "add_handler", "add_listener", "register"):
|
|
151
|
+
if hasattr(telemetry, attr):
|
|
152
|
+
try:
|
|
153
|
+
getattr(telemetry, attr)(handler)
|
|
154
|
+
_REGISTERED = True
|
|
155
|
+
return True
|
|
156
|
+
except Exception: # noqa: BLE001
|
|
157
|
+
logger.debug("CrewAI telemetry hook registration failed", exc_info=True)
|
|
158
|
+
|
|
159
|
+
# Callback manager (if available)
|
|
160
|
+
callbacks = getattr(crewai, "callbacks", None)
|
|
161
|
+
if callbacks is not None:
|
|
162
|
+
manager_cls = getattr(callbacks, "CallbackManager", None)
|
|
163
|
+
if manager_cls is not None:
|
|
164
|
+
original_init = manager_cls.__init__
|
|
165
|
+
|
|
166
|
+
def patched_init(self, *args, **kwargs): # type: ignore[no-redef]
|
|
167
|
+
original_init(self, *args, **kwargs)
|
|
168
|
+
try:
|
|
169
|
+
if hasattr(self, "add_handler"):
|
|
170
|
+
self.add_handler(handler)
|
|
171
|
+
except Exception: # noqa: BLE001
|
|
172
|
+
logger.debug("CrewAI callback registration failed", exc_info=True)
|
|
173
|
+
|
|
174
|
+
manager_cls.__init__ = patched_init # type: ignore[assignment]
|
|
175
|
+
_REGISTERED = True
|
|
176
|
+
return True
|
|
177
|
+
|
|
178
|
+
logger.debug("CrewAI hooks not registered: unsupported API surface")
|
|
179
|
+
return False
|