agentwatchx 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 AgentWatchX
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,103 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentwatchx
3
+ Version: 0.1.0
4
+ Summary: Observability SDK for AI Agent Execution — catches hallucinations, silent failures, and missing executions
5
+ License: MIT
6
+ Project-URL: Homepage, https://agentwatchx.com
7
+ Project-URL: Documentation, https://agentwatchx.com/docs
8
+ Keywords: ai,agents,observability,llm,tracing,openai,anthropic,langchain
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Topic :: Software Development :: Libraries
14
+ Requires-Python: >=3.9
15
+ Description-Content-Type: text/markdown
16
+ License-File: LICENSE
17
+ Requires-Dist: httpx>=0.25.0
18
+ Requires-Dist: pydantic>=2.0.0
19
+ Provides-Extra: openai
20
+ Requires-Dist: openai>=1.0.0; extra == "openai"
21
+ Provides-Extra: anthropic
22
+ Requires-Dist: anthropic>=0.18.0; extra == "anthropic"
23
+ Provides-Extra: langchain
24
+ Requires-Dist: langchain-core>=0.1.0; extra == "langchain"
25
+ Provides-Extra: llamaindex
26
+ Requires-Dist: llama-index-core>=0.10.0; extra == "llamaindex"
27
+ Provides-Extra: all
28
+ Requires-Dist: openai>=1.0.0; extra == "all"
29
+ Requires-Dist: anthropic>=0.18.0; extra == "all"
30
+ Requires-Dist: langchain-core>=0.1.0; extra == "all"
31
+ Requires-Dist: llama-index-core>=0.10.0; extra == "all"
32
+ Dynamic: license-file
33
+
34
+ # AgentWatchX Python SDK
35
+
36
+ Observability for AI agents. Catches hallucinations, silent failures, and missing executions automatically.
37
+
38
+ ## Install
39
+
40
+ ```bash
41
+ pip install agentwatchx
42
+ ```
43
+
44
+ ## 2-Line Integration
45
+
46
+ ```python
47
+ import agentwatchx
48
+ agentwatchx.init(api_key="your_api_key")
49
+
50
+ # That's it. Every OpenAI/Anthropic/LangChain/LlamaIndex call is now traced.
51
+ # No decorators, no wrappers, no code changes needed.
52
+ ```
53
+
54
+ ## What it captures
55
+
56
+ - Model, input, output, token usage, latency — automatically
57
+ - Tool calls made by the LLM (function calling)
58
+ - Hallucination detection — agent claims actions it never performed
59
+ - Failure masking — tool errors the agent hides from users
60
+ - Missing execution — user asks for action, agent just talks
61
+
62
+ ## Supported Libraries
63
+
64
+ | Library | Auto-instrumented |
65
+ |---|---|
66
+ | OpenAI | ✅ |
67
+ | Anthropic | ✅ |
68
+ | LangChain | ✅ |
69
+ | LlamaIndex | ✅ |
70
+
71
+ ## Configuration
72
+
73
+ ```python
74
+ agentwatchx.init(
75
+ api_key="your_api_key",
76
+ endpoint="https://api.agentwatchx.com", # or self-hosted
77
+ service="my-agent",
78
+ flush_interval=5.0,
79
+ batch_size=100,
80
+ auto_instrument=True, # set False to disable auto-patching
81
+ )
82
+ ```
83
+
84
+ ## Advanced Usage
85
+
86
+ ```python
87
+ # Manual trace decorator
88
+ @agentwatchx.trace
89
+ def my_agent(query: str):
90
+ return client.chat.completions.create(...)
91
+
92
+ # Manual logging
93
+ agentwatchx.log({"service": "my-agent", "input": "hello", "output": "world"})
94
+
95
+ # Check status
96
+ print(agentwatchx.status())
97
+ # → {"initialized": True, "instrumented": ["openai"], "buffered_traces": 0}
98
+ ```
99
+
100
+ ## Links
101
+
102
+ - [Documentation](https://agentwatchx.com/docs)
103
+ - [Dashboard](https://agentwatchx.com/dashboard)
@@ -0,0 +1,70 @@
1
+ # AgentWatchX Python SDK
2
+
3
+ Observability for AI agents. Catches hallucinations, silent failures, and missing executions automatically.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ pip install agentwatchx
9
+ ```
10
+
11
+ ## 2-Line Integration
12
+
13
+ ```python
14
+ import agentwatchx
15
+ agentwatchx.init(api_key="your_api_key")
16
+
17
+ # That's it. Every OpenAI/Anthropic/LangChain/LlamaIndex call is now traced.
18
+ # No decorators, no wrappers, no code changes needed.
19
+ ```
20
+
21
+ ## What it captures
22
+
23
+ - Model, input, output, token usage, latency — automatically
24
+ - Tool calls made by the LLM (function calling)
25
+ - Hallucination detection — agent claims actions it never performed
26
+ - Failure masking — tool errors the agent hides from users
27
+ - Missing execution — user asks for action, agent just talks
28
+
29
+ ## Supported Libraries
30
+
31
+ | Library | Auto-instrumented |
32
+ |---|---|
33
+ | OpenAI | ✅ |
34
+ | Anthropic | ✅ |
35
+ | LangChain | ✅ |
36
+ | LlamaIndex | ✅ |
37
+
38
+ ## Configuration
39
+
40
+ ```python
41
+ agentwatchx.init(
42
+ api_key="your_api_key",
43
+ endpoint="https://api.agentwatchx.com", # or self-hosted
44
+ service="my-agent",
45
+ flush_interval=5.0,
46
+ batch_size=100,
47
+ auto_instrument=True, # set False to disable auto-patching
48
+ )
49
+ ```
50
+
51
+ ## Advanced Usage
52
+
53
+ ```python
54
+ # Manual trace decorator
55
+ @agentwatchx.trace
56
+ def my_agent(query: str):
57
+ return client.chat.completions.create(...)
58
+
59
+ # Manual logging
60
+ agentwatchx.log({"service": "my-agent", "input": "hello", "output": "world"})
61
+
62
+ # Check status
63
+ print(agentwatchx.status())
64
+ # → {"initialized": True, "instrumented": ["openai"], "buffered_traces": 0}
65
+ ```
66
+
67
+ ## Links
68
+
69
+ - [Documentation](https://agentwatchx.com/docs)
70
+ - [Dashboard](https://agentwatchx.com/dashboard)
@@ -0,0 +1,106 @@
1
+ """AgentWatchX — Observability SDK for AI Agent Execution."""
2
+
3
+ from agentwatchx.client import AgentWatchXClient
4
+ from agentwatchx.decorators import trace
5
+ from agentwatchx.schemas import TraceData, ToolCallData
6
+
7
+ # Module-level client instance
8
+ _client: AgentWatchXClient | None = None
9
+
10
+
11
+ def init(api_key: str, endpoint: str = "https://api.agentwatchx.com", auto_instrument: bool = True, **kwargs):
12
+ """Initialize the AgentWatchX SDK.
13
+
14
+ After calling init(), tracing begins automatically for supported libraries
15
+ (OpenAI, Anthropic, LangChain, LlamaIndex) if they are installed.
16
+
17
+ Set auto_instrument=False to disable auto-patching and use manual
18
+ capture/wrap/trace only.
19
+ """
20
+ global _client
21
+ _client = AgentWatchXClient(api_key=api_key, endpoint=endpoint, **kwargs)
22
+
23
+ if auto_instrument:
24
+ from agentwatchx.integrations import auto_instrument as _auto
25
+ _auto(_client)
26
+
27
+ return _client
28
+
29
+
30
+ def get_client() -> AgentWatchXClient:
31
+ if _client is None:
32
+ raise RuntimeError("AgentWatchX SDK not initialized. Call agentwatchx.init() first.")
33
+ return _client
34
+
35
+
36
+ def wrap(llm_client):
37
+ """Wrap an LLM client instance for explicit trace capture.
38
+
39
+ This is optional when auto-instrumentation is active. Use it when you
40
+ want to trace a specific client instance without global patching.
41
+
42
+ Supported: OpenAI, Anthropic client instances.
43
+ """
44
+ if _client is None:
45
+ raise RuntimeError("AgentWatchX SDK not initialized. Call agentwatchx.init() first.")
46
+
47
+ # Detect client type and apply instance-level wrapping
48
+ client_type = type(llm_client).__module__
49
+
50
+ if "openai" in client_type:
51
+ from agentwatchx.integrations.openai_patch import patch
52
+ patch(_client)
53
+ elif "anthropic" in client_type:
54
+ from agentwatchx.integrations.anthropic_patch import patch
55
+ patch(_client)
56
+
57
+ return llm_client
58
+
59
+
60
+ def log(data: dict):
61
+ """Manually log a trace from a dict. Convenience for custom events."""
62
+ if _client is None:
63
+ raise RuntimeError("AgentWatchX SDK not initialized. Call agentwatchx.init() first.")
64
+
65
+ tool_calls = []
66
+ for tc in data.get("tool_calls", []):
67
+ tool_calls.append(ToolCallData(**tc) if isinstance(tc, dict) else tc)
68
+
69
+ trace_data = TraceData(
70
+ service=data.get("service", data.get("type", "custom")),
71
+ input_text=str(data.get("input", data.get("input_text", ""))),
72
+ output_text=str(data.get("output", data.get("output_text", ""))) if data.get("output") or data.get("output_text") else None,
73
+ llm_model=data.get("llm_model", data.get("model")),
74
+ duration_ms=data.get("duration_ms"),
75
+ tool_calls=tool_calls,
76
+ metadata={k: v for k, v in data.items() if k not in {
77
+ "service", "type", "input", "input_text", "output", "output_text",
78
+ "llm_model", "model", "duration_ms", "tool_calls",
79
+ }},
80
+ )
81
+ _client.capture(trace_data)
82
+
83
+
84
+ def shutdown():
85
+ """Flush pending traces and close the client."""
86
+ if _client:
87
+ from agentwatchx.integrations import unpatch_all
88
+ unpatch_all()
89
+ _client.flush()
90
+ _client.close()
91
+
92
+
93
+ def status() -> dict:
94
+ """Return SDK connection status."""
95
+ if _client is None:
96
+ return {"initialized": False}
97
+ info = _client.status()
98
+ from agentwatchx.integrations import get_instrumented
99
+ info["instrumented"] = get_instrumented()
100
+ return info
101
+
102
+
103
+ __all__ = [
104
+ "init", "trace", "wrap", "log", "shutdown", "status",
105
+ "get_client", "TraceData", "ToolCallData",
106
+ ]
@@ -0,0 +1,90 @@
1
+ import threading
2
+ import time
3
+ import atexit
4
+
5
+ import httpx
6
+
7
+ from agentwatchx.schemas import TraceData
8
+
9
+
10
+ class AgentWatchXClient:
11
+ """Core SDK client. Buffers traces and flushes in batches."""
12
+
13
+ def __init__(
14
+ self,
15
+ api_key: str,
16
+ endpoint: str = "https://api.agentwatchx.com",
17
+ flush_interval: float = 5.0,
18
+ batch_size: int = 100,
19
+ service: str = "default",
20
+ ):
21
+ self.api_key = api_key
22
+ self.endpoint = endpoint.rstrip("/")
23
+ self.flush_interval = flush_interval
24
+ self.batch_size = batch_size
25
+ self.default_service = service
26
+
27
+ self._buffer: list[dict] = []
28
+ self._lock = threading.Lock()
29
+ self._http = httpx.Client(
30
+ base_url=self.endpoint,
31
+ headers={"Authorization": f"Bearer {api_key}"},
32
+ timeout=10.0,
33
+ )
34
+
35
+ # Background flush thread
36
+ self._running = True
37
+ self._flush_thread = threading.Thread(target=self._flush_loop, daemon=True)
38
+ self._flush_thread.start()
39
+ atexit.register(self.shutdown)
40
+
41
+ def capture(self, trace: TraceData):
42
+ """Add a trace to the buffer."""
43
+ data = trace.model_dump()
44
+ if data["service"] == "default":
45
+ data["service"] = self.default_service
46
+ with self._lock:
47
+ self._buffer.append(data)
48
+ if len(self._buffer) >= self.batch_size:
49
+ self._send_batch()
50
+
51
+ def flush(self):
52
+ """Force flush all buffered traces."""
53
+ with self._lock:
54
+ self._send_batch()
55
+
56
+ def close(self):
57
+ """Stop the flush thread and close HTTP client."""
58
+ self._running = False
59
+ self.flush()
60
+ self._http.close()
61
+
62
+ def shutdown(self):
63
+ """Alias for close — used by atexit."""
64
+ self.close()
65
+
66
+ def status(self) -> dict:
67
+ with self._lock:
68
+ buffered = len(self._buffer)
69
+ return {
70
+ "initialized": True,
71
+ "endpoint": self.endpoint,
72
+ "buffered_traces": buffered,
73
+ }
74
+
75
+ def _flush_loop(self):
76
+ while self._running:
77
+ time.sleep(self.flush_interval)
78
+ self.flush()
79
+
80
+ def _send_batch(self):
81
+ """Send buffered traces. Must be called with self._lock held."""
82
+ if not self._buffer:
83
+ return
84
+ batch = self._buffer[:self.batch_size]
85
+ self._buffer = self._buffer[self.batch_size:]
86
+ try:
87
+ self._http.post("/v1/traces/batch", json={"traces": batch})
88
+ except Exception:
89
+ # Re-add to buffer on failure (best effort)
90
+ self._buffer = batch + self._buffer
@@ -0,0 +1,63 @@
1
+ import functools
2
+ import time
3
+ from typing import Callable
4
+
5
+ from agentwatchx.schemas import TraceData
6
+
7
+
8
+ def trace(
9
+ func: Callable | None = None,
10
+ *,
11
+ service: str = "default",
12
+ llm_model: str | None = None,
13
+ ):
14
+ """
15
+ Decorator to trace a function execution.
16
+
17
+ Usage:
18
+ @agentwatchx.trace
19
+ def my_agent(input_text):
20
+ ...
21
+
22
+ @agentwatchx.trace(service="order-agent", llm_model="gpt-4")
23
+ def my_agent(input_text):
24
+ ...
25
+ """
26
+ def decorator(fn: Callable) -> Callable:
27
+ @functools.wraps(fn)
28
+ def wrapper(*args, **kwargs):
29
+ import agentwatchx
30
+
31
+ input_text = str(args[0]) if args else str(kwargs) if kwargs else None
32
+ start = time.perf_counter()
33
+ error = None
34
+
35
+ try:
36
+ result = fn(*args, **kwargs)
37
+ return result
38
+ except Exception as e:
39
+ error = e
40
+ raise
41
+ finally:
42
+ duration_ms = (time.perf_counter() - start) * 1000
43
+ output_text = None if error else str(result) if 'result' in dir() else None
44
+
45
+ trace_data = TraceData(
46
+ service=service,
47
+ input_text=input_text,
48
+ output_text=output_text,
49
+ llm_model=llm_model,
50
+ duration_ms=round(duration_ms, 2),
51
+ )
52
+
53
+ try:
54
+ client = agentwatchx.get_client()
55
+ client.capture(trace_data)
56
+ except RuntimeError:
57
+ pass # SDK not initialized, silently skip
58
+
59
+ return wrapper
60
+
61
+ if func is not None:
62
+ return decorator(func)
63
+ return decorator
@@ -0,0 +1,59 @@
1
+ """
2
+ Auto-instrumentation registry.
3
+
4
+ Detects installed LLM libraries and patches them to capture traces
5
+ automatically after agentwatchx.init() is called.
6
+ """
7
+
8
+ import importlib
9
+ import logging
10
+
11
+ logger = logging.getLogger("agentwatchx.integrations")
12
+
13
+ _PATCHES = {
14
+ "openai": "agentwatchx.integrations.openai_patch",
15
+ "anthropic": "agentwatchx.integrations.anthropic_patch",
16
+ "langchain_core": "agentwatchx.integrations.langchain_cb",
17
+ "llama_index": "agentwatchx.integrations.llamaindex_cb",
18
+ }
19
+
20
+ _applied: list[str] = []
21
+
22
+
23
+ def auto_instrument(client):
24
+ """Detect installed libraries and apply patches. Called by init()."""
25
+ for lib_name, patch_module in _PATCHES.items():
26
+ if lib_name in _applied:
27
+ continue # already patched, skip
28
+
29
+ try:
30
+ importlib.import_module(lib_name)
31
+ except ImportError:
32
+ continue # library not installed, skip
33
+
34
+ try:
35
+ mod = importlib.import_module(patch_module)
36
+ mod.patch(client)
37
+ _applied.append(lib_name)
38
+ logger.info("Instrumented %s", lib_name)
39
+ except Exception as exc:
40
+ logger.warning("Failed to instrument %s: %s", lib_name, exc)
41
+
42
+
43
+ def get_instrumented() -> list[str]:
44
+ """Return list of libraries that were successfully instrumented."""
45
+ return list(_applied)
46
+
47
+
48
+ def unpatch_all():
49
+ """Remove all patches (for testing / shutdown)."""
50
+ for lib_name, patch_module in _PATCHES.items():
51
+ if lib_name not in _applied:
52
+ continue
53
+ try:
54
+ mod = importlib.import_module(patch_module)
55
+ if hasattr(mod, "unpatch"):
56
+ mod.unpatch()
57
+ except Exception:
58
+ pass
59
+ _applied.clear()