maev-sdk 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- maev_sdk-0.3.0/.gitignore +39 -0
- maev_sdk-0.3.0/PKG-INFO +132 -0
- maev_sdk-0.3.0/README.md +106 -0
- maev_sdk-0.3.0/pyproject.toml +38 -0
- maev_sdk-0.3.0/src/maev/__init__.py +377 -0
- maev_sdk-0.3.0/src/maev/_instrumentors/__init__.py +5 -0
- maev_sdk-0.3.0/src/maev/py.typed +0 -0
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
|
2
|
+
|
|
3
|
+
# dependencies
|
|
4
|
+
node_modules
|
|
5
|
+
/.pnp
|
|
6
|
+
.pnp.js
|
|
7
|
+
.yarn/install-state.gz
|
|
8
|
+
|
|
9
|
+
# testing
|
|
10
|
+
/coverage
|
|
11
|
+
|
|
12
|
+
# next.js
|
|
13
|
+
.next/
|
|
14
|
+
/out/
|
|
15
|
+
|
|
16
|
+
# production
|
|
17
|
+
/build
|
|
18
|
+
|
|
19
|
+
# misc
|
|
20
|
+
.DS_Store
|
|
21
|
+
*.pem
|
|
22
|
+
|
|
23
|
+
# debug
|
|
24
|
+
npm-debug.log*
|
|
25
|
+
yarn-debug.log*
|
|
26
|
+
yarn-error.log*
|
|
27
|
+
|
|
28
|
+
# local env files
|
|
29
|
+
.env*.local
|
|
30
|
+
|
|
31
|
+
# vercel
|
|
32
|
+
.vercel
|
|
33
|
+
|
|
34
|
+
# typescript
|
|
35
|
+
*.tsbuildinfo
|
|
36
|
+
next-env.d.ts
|
|
37
|
+
scripts/test_e2e.py
|
|
38
|
+
scripts/seed-test-org.ts
|
|
39
|
+
scripts/test_failing_agent.py
|
maev_sdk-0.3.0/PKG-INFO
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: maev-sdk
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: Maev AI Agent Observability SDK — one-liner instrumentation for AI agents
|
|
5
|
+
Project-URL: Homepage, https://maev.dev
|
|
6
|
+
Project-URL: Documentation, https://docs.maev.dev
|
|
7
|
+
Project-URL: Repository, https://github.com/ujjwalpreenja1308-web/veil
|
|
8
|
+
Author-email: Maev <eng@maev.dev>
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
Keywords: agents,ai,observability,telemetry
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
21
|
+
Classifier: Topic :: System :: Monitoring
|
|
22
|
+
Classifier: Typing :: Typed
|
|
23
|
+
Requires-Python: >=3.9
|
|
24
|
+
Requires-Dist: openlit>=1.33.0
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
|
|
27
|
+
# Maev Python SDK
|
|
28
|
+
|
|
29
|
+
One-liner observability for AI agents.
|
|
30
|
+
|
|
31
|
+
## Installation
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
pip install maev-sdk
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Quickstart
|
|
38
|
+
|
|
39
|
+
```python
|
|
40
|
+
import maev
|
|
41
|
+
|
|
42
|
+
maev.init(api_key="vl_xxx")
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
That's it. Add this before your agent runs. Maev automatically instruments
|
|
46
|
+
OpenAI, Anthropic, LangChain, LlamaIndex, and other popular LLM libraries,
|
|
47
|
+
and sends all telemetry to your Maev dashboard.
|
|
48
|
+
|
|
49
|
+
## Serverless functions (Lambda, Cloud Functions, Vercel, etc.)
|
|
50
|
+
|
|
51
|
+
In long-running processes (servers, scripts, notebooks), Maev automatically
|
|
52
|
+
sends all telemetry when the process exits. No extra code needed.
|
|
53
|
+
|
|
54
|
+
In **serverless functions**, the process doesn't exit cleanly — it gets frozen
|
|
55
|
+
or killed by the platform the moment your handler returns. Any telemetry still
|
|
56
|
+
in the buffer is silently dropped, and your session never closes in the
|
|
57
|
+
dashboard.
|
|
58
|
+
|
|
59
|
+
Call `maev.flush()` at the end of your handler to force delivery before the
|
|
60
|
+
freeze:
|
|
61
|
+
|
|
62
|
+
```python
|
|
63
|
+
import maev
|
|
64
|
+
|
|
65
|
+
maev.init(api_key="vl_xxx", agent_name="My Lambda")
|
|
66
|
+
|
|
67
|
+
def handler(event, context):
|
|
68
|
+
# ... your agent logic ...
|
|
69
|
+
|
|
70
|
+
maev.flush() # send everything before Lambda freezes
|
|
71
|
+
return result
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
`flush()` does two things in order:
|
|
75
|
+
1. Forces the OpenTelemetry exporter to drain any buffered spans (LLM call data)
|
|
76
|
+
2. Sends a `session.end` event so Maev closes and classifies the session
|
|
77
|
+
|
|
78
|
+
It is safe to call multiple times — only the first call does anything.
|
|
79
|
+
|
|
80
|
+
**Environments where you must call `flush()`:**
|
|
81
|
+
|
|
82
|
+
| Platform | Why |
|
|
83
|
+
|---|---|
|
|
84
|
+
| AWS Lambda | Handler returns → process frozen immediately |
|
|
85
|
+
| Google Cloud Functions | Same — process suspended after return |
|
|
86
|
+
| Vercel / Netlify Functions | Execution context torn down after response |
|
|
87
|
+
| Azure Functions | Consumption plan freezes after invocation |
|
|
88
|
+
|
|
89
|
+
**Environments where `flush()` is optional** (but harmless):
|
|
90
|
+
|
|
91
|
+
- Long-running servers (FastAPI, Flask, Django)
|
|
92
|
+
- CLI scripts
|
|
93
|
+
- Jupyter notebooks
|
|
94
|
+
- Docker containers
|
|
95
|
+
|
|
96
|
+
## How it works
|
|
97
|
+
|
|
98
|
+
- Telemetry is collected and sent asynchronously — zero impact on your agent's performance.
|
|
99
|
+
- Sessions are automatically tracked from the first LLM call through to process exit.
|
|
100
|
+
- Failures are detected and classified server-side — no configuration required.
|
|
101
|
+
- Every failure triggers an alert in your Maev dashboard and via email.
|
|
102
|
+
|
|
103
|
+
## Supported Libraries
|
|
104
|
+
|
|
105
|
+
Maev auto-instruments all major LLM frameworks including:
|
|
106
|
+
|
|
107
|
+
- OpenAI
|
|
108
|
+
- Anthropic
|
|
109
|
+
- LangChain
|
|
110
|
+
- LlamaIndex
|
|
111
|
+
- Cohere
|
|
112
|
+
- Mistral
|
|
113
|
+
- Google Gemini
|
|
114
|
+
- AWS Bedrock
|
|
115
|
+
- And more
|
|
116
|
+
|
|
117
|
+
## Requirements
|
|
118
|
+
|
|
119
|
+
- Python >= 3.9
|
|
120
|
+
|
|
121
|
+
## Your API Key
|
|
122
|
+
|
|
123
|
+
Find your API key in the [Maev Dashboard](https://home.maev.dev/dashboard/settings) under Settings.
|
|
124
|
+
Keys follow the format `vl_` followed by 64 hex characters.
|
|
125
|
+
|
|
126
|
+
## Self-hosting
|
|
127
|
+
|
|
128
|
+
If you are running Maev on your own infrastructure, pass the `endpoint` parameter:
|
|
129
|
+
|
|
130
|
+
```python
|
|
131
|
+
maev.init(api_key="vl_xxx", endpoint="https://your-maev-instance.com")
|
|
132
|
+
```
|
maev_sdk-0.3.0/README.md
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# Maev Python SDK
|
|
2
|
+
|
|
3
|
+
One-liner observability for AI agents.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install maev-sdk
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quickstart
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
import maev
|
|
15
|
+
|
|
16
|
+
maev.init(api_key="vl_xxx")
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
That's it. Add this before your agent runs. Maev automatically instruments
|
|
20
|
+
OpenAI, Anthropic, LangChain, LlamaIndex, and other popular LLM libraries,
|
|
21
|
+
and sends all telemetry to your Maev dashboard.
|
|
22
|
+
|
|
23
|
+
## Serverless functions (Lambda, Cloud Functions, Vercel, etc.)
|
|
24
|
+
|
|
25
|
+
In long-running processes (servers, scripts, notebooks), Maev automatically
|
|
26
|
+
sends all telemetry when the process exits. No extra code needed.
|
|
27
|
+
|
|
28
|
+
In **serverless functions**, the process doesn't exit cleanly — it gets frozen
|
|
29
|
+
or killed by the platform the moment your handler returns. Any telemetry still
|
|
30
|
+
in the buffer is silently dropped, and your session never closes in the
|
|
31
|
+
dashboard.
|
|
32
|
+
|
|
33
|
+
Call `maev.flush()` at the end of your handler to force delivery before the
|
|
34
|
+
freeze:
|
|
35
|
+
|
|
36
|
+
```python
|
|
37
|
+
import maev
|
|
38
|
+
|
|
39
|
+
maev.init(api_key="vl_xxx", agent_name="My Lambda")
|
|
40
|
+
|
|
41
|
+
def handler(event, context):
|
|
42
|
+
# ... your agent logic ...
|
|
43
|
+
|
|
44
|
+
maev.flush() # send everything before Lambda freezes
|
|
45
|
+
return result
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
`flush()` does two things in order:
|
|
49
|
+
1. Forces the OpenTelemetry exporter to drain any buffered spans (LLM call data)
|
|
50
|
+
2. Sends a `session.end` event so Maev closes and classifies the session
|
|
51
|
+
|
|
52
|
+
It is safe to call multiple times — only the first call does anything.
|
|
53
|
+
|
|
54
|
+
**Environments where you must call `flush()`:**
|
|
55
|
+
|
|
56
|
+
| Platform | Why |
|
|
57
|
+
|---|---|
|
|
58
|
+
| AWS Lambda | Handler returns → process frozen immediately |
|
|
59
|
+
| Google Cloud Functions | Same — process suspended after return |
|
|
60
|
+
| Vercel / Netlify Functions | Execution context torn down after response |
|
|
61
|
+
| Azure Functions | Consumption plan freezes after invocation |
|
|
62
|
+
|
|
63
|
+
**Environments where `flush()` is optional** (but harmless):
|
|
64
|
+
|
|
65
|
+
- Long-running servers (FastAPI, Flask, Django)
|
|
66
|
+
- CLI scripts
|
|
67
|
+
- Jupyter notebooks
|
|
68
|
+
- Docker containers
|
|
69
|
+
|
|
70
|
+
## How it works
|
|
71
|
+
|
|
72
|
+
- Telemetry is collected and sent asynchronously — zero impact on your agent's performance.
|
|
73
|
+
- Sessions are automatically tracked from the first LLM call through to process exit.
|
|
74
|
+
- Failures are detected and classified server-side — no configuration required.
|
|
75
|
+
- Every failure triggers an alert in your Maev dashboard and via email.
|
|
76
|
+
|
|
77
|
+
## Supported Libraries
|
|
78
|
+
|
|
79
|
+
Maev auto-instruments all major LLM frameworks including:
|
|
80
|
+
|
|
81
|
+
- OpenAI
|
|
82
|
+
- Anthropic
|
|
83
|
+
- LangChain
|
|
84
|
+
- LlamaIndex
|
|
85
|
+
- Cohere
|
|
86
|
+
- Mistral
|
|
87
|
+
- Google Gemini
|
|
88
|
+
- AWS Bedrock
|
|
89
|
+
- And more
|
|
90
|
+
|
|
91
|
+
## Requirements
|
|
92
|
+
|
|
93
|
+
- Python >= 3.9
|
|
94
|
+
|
|
95
|
+
## Your API Key
|
|
96
|
+
|
|
97
|
+
Find your API key in the [Maev Dashboard](https://home.maev.dev/dashboard/settings) under Settings.
|
|
98
|
+
Keys follow the format `vl_` followed by 64 hex characters.
|
|
99
|
+
|
|
100
|
+
## Self-hosting
|
|
101
|
+
|
|
102
|
+
If you are running Maev on your own infrastructure, pass the `endpoint` parameter:
|
|
103
|
+
|
|
104
|
+
```python
|
|
105
|
+
maev.init(api_key="vl_xxx", endpoint="https://your-maev-instance.com")
|
|
106
|
+
```
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "maev-sdk"
|
|
7
|
+
version = "0.3.0"
|
|
8
|
+
description = "Maev AI Agent Observability SDK — one-liner instrumentation for AI agents"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.9"
|
|
12
|
+
authors = [{ name = "Maev", email = "eng@maev.dev" }]
|
|
13
|
+
keywords = ["observability", "ai", "agents", "telemetry"]
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 3 - Alpha",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"License :: OSI Approved :: MIT License",
|
|
18
|
+
"Programming Language :: Python :: 3",
|
|
19
|
+
"Programming Language :: Python :: 3.9",
|
|
20
|
+
"Programming Language :: Python :: 3.10",
|
|
21
|
+
"Programming Language :: Python :: 3.11",
|
|
22
|
+
"Programming Language :: Python :: 3.12",
|
|
23
|
+
"Programming Language :: Python :: 3.13",
|
|
24
|
+
"Topic :: Software Development :: Libraries",
|
|
25
|
+
"Topic :: System :: Monitoring",
|
|
26
|
+
"Typing :: Typed",
|
|
27
|
+
]
|
|
28
|
+
dependencies = [
|
|
29
|
+
"openlit>=1.33.0",
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
[project.urls]
|
|
33
|
+
Homepage = "https://maev.dev"
|
|
34
|
+
Documentation = "https://docs.maev.dev"
|
|
35
|
+
Repository = "https://github.com/ujjwalpreenja1308-web/veil"
|
|
36
|
+
|
|
37
|
+
[tool.hatch.build.targets.wheel]
|
|
38
|
+
packages = ["src/maev"]
|
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
"""Maev Python SDK - one-liner observability for AI agents.
|
|
2
|
+
|
|
3
|
+
Usage::
|
|
4
|
+
|
|
5
|
+
import maev
|
|
6
|
+
maev.init(api_key="vl_xxx")
|
|
7
|
+
|
|
8
|
+
# Optional: name your agent so it shows up correctly in the dashboard
|
|
9
|
+
maev.init(api_key="vl_xxx", agent_name="Sales Agent")
|
|
10
|
+
|
|
11
|
+
# In serverless functions, call flush() before the handler returns
|
|
12
|
+
# to guarantee telemetry is delivered before the process is frozen.
|
|
13
|
+
maev.flush()
|
|
14
|
+
|
|
15
|
+
That's it. Active Maev Fixes are automatically fetched and injected into
|
|
16
|
+
every LLM call - no additional setup required.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import atexit
|
|
22
|
+
import json
|
|
23
|
+
import sys
|
|
24
|
+
import threading
|
|
25
|
+
import urllib.request
|
|
26
|
+
import warnings
|
|
27
|
+
from datetime import datetime, timezone
|
|
28
|
+
from typing import List, Optional
|
|
29
|
+
|
|
30
|
+
__all__ = ["init", "flush"]
|
|
31
|
+
|
|
32
|
+
_initialized: bool = False
|
|
33
|
+
_session_id: Optional[str] = None
|
|
34
|
+
_api_key: Optional[str] = None
|
|
35
|
+
_endpoint: Optional[str] = None
|
|
36
|
+
_agent_name: Optional[str] = None
|
|
37
|
+
_flushed: bool = False # guard: flush sends session.end exactly once
|
|
38
|
+
|
|
39
|
+
# Active corrective prompts fetched from the registry on init.
|
|
40
|
+
# These are prepended to every LLM system prompt via the OpenLIT hook.
|
|
41
|
+
_active_fixes: List[str] = []
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def init(
|
|
45
|
+
api_key: str,
|
|
46
|
+
*,
|
|
47
|
+
agent_name: str = "default",
|
|
48
|
+
endpoint: str = "https://maevhq.com",
|
|
49
|
+
) -> None:
|
|
50
|
+
"""Initialize the Maev SDK.
|
|
51
|
+
|
|
52
|
+
Parameters
|
|
53
|
+
----------
|
|
54
|
+
api_key:
|
|
55
|
+
Your Maev API key (starts with ``vl_``).
|
|
56
|
+
agent_name:
|
|
57
|
+
Human-readable name for this agent. Shown in the Maev dashboard.
|
|
58
|
+
Defaults to ``"default"``. Use a descriptive name like ``"Sales Agent"``.
|
|
59
|
+
endpoint:
|
|
60
|
+
Override the ingest URL. Leave unset unless self-hosting.
|
|
61
|
+
"""
|
|
62
|
+
global _initialized, _session_id, _api_key, _endpoint, _agent_name # noqa: PLW0603
|
|
63
|
+
|
|
64
|
+
if _initialized:
|
|
65
|
+
warnings.warn(
|
|
66
|
+
"maev.init() already called. Ignoring duplicate.",
|
|
67
|
+
stacklevel=2,
|
|
68
|
+
)
|
|
69
|
+
return
|
|
70
|
+
|
|
71
|
+
if not api_key or not isinstance(api_key, str) or not api_key.startswith("vl_"):
|
|
72
|
+
raise ValueError(
|
|
73
|
+
"Invalid Maev API key - must start with 'vl_'. "
|
|
74
|
+
"Get your key from https://maevhq.com/dashboard/settings"
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
import uuid
|
|
78
|
+
_session_id = str(uuid.uuid4())
|
|
79
|
+
_api_key = api_key
|
|
80
|
+
_endpoint = endpoint.rstrip("/")
|
|
81
|
+
_agent_name = agent_name
|
|
82
|
+
|
|
83
|
+
# 1. Fetch active corrective prompts from the registry (background thread,
|
|
84
|
+
# non-blocking - if it fails, agent runs without fixes, never crashes).
|
|
85
|
+
_fetch_active_fixes_async()
|
|
86
|
+
|
|
87
|
+
# 2. Start OpenLIT instrumentation with Maev as the OTLP backend.
|
|
88
|
+
_start(
|
|
89
|
+
api_key=api_key,
|
|
90
|
+
agent_name=agent_name,
|
|
91
|
+
endpoint=_endpoint,
|
|
92
|
+
session_id=_session_id,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# 3. Register the LLM pre-call hook to inject fixes into system prompts.
|
|
96
|
+
_register_fix_hook()
|
|
97
|
+
|
|
98
|
+
atexit.register(_shutdown)
|
|
99
|
+
_initialized = True
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def flush(timeout: float = 5.0) -> None:
|
|
103
|
+
"""Flush all pending telemetry and close the current session.
|
|
104
|
+
|
|
105
|
+
Call this explicitly in serverless functions (AWS Lambda, Google Cloud
|
|
106
|
+
Functions, Vercel Edge, etc.) before your handler returns, because the
|
|
107
|
+
process may be frozen or killed before the ``atexit`` hook fires.
|
|
108
|
+
|
|
109
|
+
It is safe to call ``flush()`` multiple times - only the first call sends
|
|
110
|
+
the ``session.end`` event; subsequent calls are no-ops.
|
|
111
|
+
|
|
112
|
+
Parameters
|
|
113
|
+
----------
|
|
114
|
+
timeout:
|
|
115
|
+
Seconds to wait for the HTTP request to complete. Default is 5.
|
|
116
|
+
"""
|
|
117
|
+
global _flushed # noqa: PLW0603
|
|
118
|
+
|
|
119
|
+
if not _initialized:
|
|
120
|
+
warnings.warn(
|
|
121
|
+
"maev.flush() called before maev.init() - nothing to flush.",
|
|
122
|
+
stacklevel=2,
|
|
123
|
+
)
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
if _flushed:
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
_flushed = True
|
|
130
|
+
|
|
131
|
+
# 1. Force OpenLIT's OTLP span exporter to flush buffered spans.
|
|
132
|
+
_flush_openlit()
|
|
133
|
+
|
|
134
|
+
# 2. Send session.end to close the session in Maev's pipeline.
|
|
135
|
+
_send_session_end(timeout=timeout)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
# --- Fix Registry -------------------------------------------------------------
|
|
139
|
+
|
|
140
|
+
def _fetch_active_fixes_async() -> None:
|
|
141
|
+
"""Fetch active corrective prompts in a background daemon thread.
|
|
142
|
+
|
|
143
|
+
Runs at init time. Never blocks. On any error, _active_fixes stays empty
|
|
144
|
+
and the agent runs normally without fixes.
|
|
145
|
+
"""
|
|
146
|
+
t = threading.Thread(target=_fetch_active_fixes, daemon=True)
|
|
147
|
+
t.start()
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def _fetch_active_fixes() -> None:
|
|
151
|
+
"""Synchronously fetch active fixes from the Maev registry."""
|
|
152
|
+
global _active_fixes # noqa: PLW0603
|
|
153
|
+
|
|
154
|
+
if not _api_key or not _endpoint or not _agent_name:
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
try:
|
|
158
|
+
import urllib.parse
|
|
159
|
+
url = (
|
|
160
|
+
f"{_endpoint}/api/veil-fix/active"
|
|
161
|
+
f"?agent_name={urllib.parse.quote(_agent_name)}"
|
|
162
|
+
)
|
|
163
|
+
req = urllib.request.Request(
|
|
164
|
+
url,
|
|
165
|
+
headers={"x-api-key": _api_key},
|
|
166
|
+
method="GET",
|
|
167
|
+
)
|
|
168
|
+
with urllib.request.urlopen(req, timeout=3.0) as resp:
|
|
169
|
+
data = json.loads(resp.read().decode("utf-8"))
|
|
170
|
+
prompts = data.get("corrective_prompts", [])
|
|
171
|
+
if isinstance(prompts, list):
|
|
172
|
+
_active_fixes = [str(p) for p in prompts if p]
|
|
173
|
+
if _active_fixes:
|
|
174
|
+
print(
|
|
175
|
+
f"[maev] {len(_active_fixes)} active fix(es) loaded for agent '{_agent_name}'",
|
|
176
|
+
file=sys.stderr,
|
|
177
|
+
)
|
|
178
|
+
except Exception as exc: # noqa: BLE001
|
|
179
|
+
# Non-fatal - agent runs without fixes
|
|
180
|
+
print(f"[maev] Could not fetch active fixes (non-fatal): {exc}", file=sys.stderr)
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def _build_fix_prefix() -> str:
|
|
184
|
+
"""Build the corrective prompt prefix to prepend to any system prompt."""
|
|
185
|
+
if not _active_fixes:
|
|
186
|
+
return ""
|
|
187
|
+
joined = "\n".join(f"- {p}" for p in _active_fixes)
|
|
188
|
+
return f"[Maev Corrections - follow these instructions at all times]\n{joined}\n\n"
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
# --- OpenLIT Hook -------------------------------------------------------------
|
|
192
|
+
|
|
193
|
+
def _register_fix_hook() -> None:
|
|
194
|
+
"""Register a pre-call hook into the OpenLIT-patched LLM clients.
|
|
195
|
+
|
|
196
|
+
OpenLIT monkey-patches openai.ChatCompletion.create (and equivalents) to
|
|
197
|
+
capture spans. We wrap the same call to prepend active fixes to the
|
|
198
|
+
system prompt - same interception point, zero extra setup for the engineer.
|
|
199
|
+
|
|
200
|
+
If no fixes are active, the hook is a no-op and adds zero overhead.
|
|
201
|
+
"""
|
|
202
|
+
if not _active_fixes:
|
|
203
|
+
# No fixes - nothing to inject, skip hook registration entirely
|
|
204
|
+
return
|
|
205
|
+
|
|
206
|
+
_patch_openai()
|
|
207
|
+
_patch_anthropic()
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _patch_openai() -> None:
|
|
211
|
+
"""Prepend fixes to OpenAI chat completion system messages."""
|
|
212
|
+
try:
|
|
213
|
+
import openai # type: ignore[import-untyped]
|
|
214
|
+
except ImportError:
|
|
215
|
+
return
|
|
216
|
+
|
|
217
|
+
prefix = _build_fix_prefix()
|
|
218
|
+
if not prefix:
|
|
219
|
+
return
|
|
220
|
+
|
|
221
|
+
# Patch the synchronous client
|
|
222
|
+
_wrap_openai_client(openai, prefix)
|
|
223
|
+
|
|
224
|
+
# Also patch the module-level create if used directly
|
|
225
|
+
try:
|
|
226
|
+
original_create = openai.chat.completions.create
|
|
227
|
+
|
|
228
|
+
def _patched_create(*args, **kwargs): # type: ignore[no-untyped-def]
|
|
229
|
+
kwargs = _inject_system_prefix(kwargs, prefix)
|
|
230
|
+
return original_create(*args, **kwargs)
|
|
231
|
+
|
|
232
|
+
openai.chat.completions.create = _patched_create
|
|
233
|
+
except AttributeError:
|
|
234
|
+
pass
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def _wrap_openai_client(openai_module, prefix: str) -> None: # type: ignore[no-untyped-def]
|
|
238
|
+
"""Wrap OpenAI.Chat.Completions.create on the module-level client."""
|
|
239
|
+
try:
|
|
240
|
+
OriginalClient = openai_module.OpenAI
|
|
241
|
+
|
|
242
|
+
class PatchedOpenAI(OriginalClient): # type: ignore[misc]
|
|
243
|
+
def __init__(self, *args, **kwargs): # type: ignore[no-untyped-def]
|
|
244
|
+
super().__init__(*args, **kwargs)
|
|
245
|
+
orig = self.chat.completions.create
|
|
246
|
+
|
|
247
|
+
def _create(*a, **kw): # type: ignore[no-untyped-def]
|
|
248
|
+
kw = _inject_system_prefix(kw, prefix)
|
|
249
|
+
return orig(*a, **kw)
|
|
250
|
+
|
|
251
|
+
self.chat.completions.create = _create # type: ignore[method-assign]
|
|
252
|
+
|
|
253
|
+
openai_module.OpenAI = PatchedOpenAI
|
|
254
|
+
except (AttributeError, TypeError):
|
|
255
|
+
pass
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def _patch_anthropic() -> None:
|
|
259
|
+
"""Prepend fixes to Anthropic Messages.create system prompts."""
|
|
260
|
+
try:
|
|
261
|
+
import anthropic # type: ignore[import-untyped]
|
|
262
|
+
except ImportError:
|
|
263
|
+
return
|
|
264
|
+
|
|
265
|
+
prefix = _build_fix_prefix()
|
|
266
|
+
if not prefix:
|
|
267
|
+
return
|
|
268
|
+
|
|
269
|
+
try:
|
|
270
|
+
OriginalClient = anthropic.Anthropic
|
|
271
|
+
|
|
272
|
+
class PatchedAnthropic(OriginalClient): # type: ignore[misc]
|
|
273
|
+
def __init__(self, *args, **kwargs): # type: ignore[no-untyped-def]
|
|
274
|
+
super().__init__(*args, **kwargs)
|
|
275
|
+
orig = self.messages.create
|
|
276
|
+
|
|
277
|
+
def _create(*a, **kw): # type: ignore[no-untyped-def]
|
|
278
|
+
if "system" in kw and isinstance(kw["system"], str):
|
|
279
|
+
kw["system"] = prefix + kw["system"]
|
|
280
|
+
elif "system" not in kw:
|
|
281
|
+
kw["system"] = prefix.strip()
|
|
282
|
+
return orig(*a, **kw)
|
|
283
|
+
|
|
284
|
+
self.messages.create = _create # type: ignore[method-assign]
|
|
285
|
+
|
|
286
|
+
anthropic.Anthropic = PatchedAnthropic
|
|
287
|
+
except (AttributeError, TypeError):
|
|
288
|
+
pass
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def _inject_system_prefix(kwargs: dict, prefix: str) -> dict:
|
|
292
|
+
"""Inject the fix prefix into the messages list as/before the system message."""
|
|
293
|
+
messages = kwargs.get("messages", [])
|
|
294
|
+
if not messages:
|
|
295
|
+
return kwargs
|
|
296
|
+
|
|
297
|
+
new_messages = list(messages)
|
|
298
|
+
# Check if first message is a system message
|
|
299
|
+
if new_messages and new_messages[0].get("role") == "system":
|
|
300
|
+
existing = new_messages[0].get("content", "")
|
|
301
|
+
new_messages[0] = {**new_messages[0], "content": prefix + existing}
|
|
302
|
+
else:
|
|
303
|
+
# Prepend a new system message
|
|
304
|
+
new_messages.insert(0, {"role": "system", "content": prefix.strip()})
|
|
305
|
+
|
|
306
|
+
return {**kwargs, "messages": new_messages}
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
# --- Internal -----------------------------------------------------------------
|
|
310
|
+
|
|
311
|
+
def _flush_openlit() -> None:
|
|
312
|
+
"""Best-effort flush of the OpenTelemetry SDK's span processors."""
|
|
313
|
+
try:
|
|
314
|
+
from opentelemetry import trace # type: ignore[import-untyped]
|
|
315
|
+
provider = trace.get_tracer_provider()
|
|
316
|
+
# TracerProvider has force_flush(); ProxyTracerProvider does not.
|
|
317
|
+
flush_fn = getattr(provider, "force_flush", None)
|
|
318
|
+
if callable(flush_fn):
|
|
319
|
+
flush_fn(timeout_millis=4_000)
|
|
320
|
+
except Exception as exc: # noqa: BLE001
|
|
321
|
+
print(f"[maev] OpenTelemetry flush warning: {exc}", file=sys.stderr)
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def _send_session_end(timeout: float = 5.0) -> None:
|
|
325
|
+
"""HTTP POST session.end to the Maev ingest endpoint."""
|
|
326
|
+
if not _api_key or not _endpoint or not _session_id:
|
|
327
|
+
return
|
|
328
|
+
|
|
329
|
+
payload = json.dumps({
|
|
330
|
+
"session_id": _session_id,
|
|
331
|
+
"step": 0,
|
|
332
|
+
"type": "session.end",
|
|
333
|
+
"payload": {},
|
|
334
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
335
|
+
}).encode("utf-8")
|
|
336
|
+
|
|
337
|
+
req = urllib.request.Request(
|
|
338
|
+
_endpoint + "/api/ingest",
|
|
339
|
+
data=payload,
|
|
340
|
+
headers={
|
|
341
|
+
"Content-Type": "application/json",
|
|
342
|
+
"x-api-key": _api_key,
|
|
343
|
+
},
|
|
344
|
+
method="POST",
|
|
345
|
+
)
|
|
346
|
+
try:
|
|
347
|
+
urllib.request.urlopen(req, timeout=timeout)
|
|
348
|
+
except Exception as exc: # noqa: BLE001
|
|
349
|
+
print(f"[maev] session.end flush failed: {exc}", file=sys.stderr)
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
def _start(api_key: str, agent_name: str, endpoint: str, session_id: str) -> None:
|
|
353
|
+
"""Internal: configure and start OpenLIT with Maev as the OTLP backend."""
|
|
354
|
+
try:
|
|
355
|
+
import openlit # type: ignore[import-untyped]
|
|
356
|
+
except ImportError as exc:
|
|
357
|
+
raise ImportError(
|
|
358
|
+
"maev-sdk requires 'openlit'. Install it with: pip install maev-sdk"
|
|
359
|
+
) from exc
|
|
360
|
+
|
|
361
|
+
otlp_endpoint = endpoint + "/api/ingest/otlp"
|
|
362
|
+
|
|
363
|
+
openlit.init(
|
|
364
|
+
otlp_endpoint=otlp_endpoint,
|
|
365
|
+
otlp_headers=f"x-api-key={api_key}",
|
|
366
|
+
environment="production",
|
|
367
|
+
# application_name -> gen_ai.system in OTLP spans -> agent name in dashboard
|
|
368
|
+
application_name=agent_name,
|
|
369
|
+
# session_id groups all spans from this process into one session
|
|
370
|
+
session_id=session_id,
|
|
371
|
+
collect_gpu_stats=False,
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
def _shutdown() -> None:
|
|
376
|
+
"""atexit handler - delegates to flush() so session.end fires exactly once."""
|
|
377
|
+
flush()
|
|
File without changes
|