agentguard-ram 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,29 @@
1
+ # Python
2
+ __pycache__/
3
+ *.pyc
4
+ *.pyo
5
+ *.pyd
6
+ .venv/
7
+ *.egg-info/
8
+ dist/
9
+ build/
10
+
11
+ # Env files
12
+ .env
13
+ .env.local
14
+ .env*.local
15
+
16
+ # Next.js
17
+ .next/
18
+ node_modules/
19
+
20
+ # Logs
21
+ *.log
22
+ logs/
23
+
24
+ # OS
25
+ .DS_Store
26
+
27
+ # IDE
28
+ .serena/
29
+ .vscode/
@@ -0,0 +1,93 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentguard-ram
3
+ Version: 0.1.0
4
+ Summary: Real-time cost observability and guardrails for AI agents
5
+ Project-URL: Homepage, https://agent-guard-nine.vercel.app
6
+ Project-URL: Repository, https://github.com/RambabuArabandi/agent-guard
7
+ Project-URL: Bug Tracker, https://github.com/RambabuArabandi/agent-guard/issues
8
+ License: MIT
9
+ Keywords: agents,ai,cost,guardrails,langchain,llm,observability
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Topic :: Software Development :: Libraries
17
+ Requires-Python: >=3.11
18
+ Requires-Dist: httpx>=0.27.0
19
+ Requires-Dist: pydantic>=2.7.0
20
+ Provides-Extra: langchain
21
+ Requires-Dist: langchain-core>=0.2.0; extra == 'langchain'
22
+ Description-Content-Type: text/markdown
23
+
24
+ # AgentGuard
25
+
26
+ Real-time cost observability and guardrails for AI agents.
27
+
28
+ Track LLM spend, set budget thresholds, and auto-pause agents before they blow up your bill.
29
+
30
+ ## Install
31
+
32
+ ```bash
33
+ pip install agentguard
34
+ ```
35
+
36
+ ## Quick Start
37
+
38
+ ```python
39
+ from agentguard import AgentGuard, AgentEvent
40
+
41
+ guard = AgentGuard(
42
+ api_key="ag_...",
43
+ agent_id="my-sales-agent",
44
+ cost_threshold=10.0, # auto-pause at $10/day
45
+ slack_webhook="https://...", # optional alert
46
+ )
47
+
48
+ # Custom agent — wrap with decorator
49
+ @guard.track
50
+ def run_my_agent():
51
+ # your agent logic here
52
+ alive = guard.send_event(AgentEvent(
53
+ agent_id="my-sales-agent",
54
+ event_type="llm_call",
55
+ model="gpt-4o",
56
+ input_tokens=500,
57
+ output_tokens=300,
58
+ ))
59
+ if not alive:
60
+ print("Agent paused — cost threshold reached")
61
+ return
62
+ ```
63
+
64
+ ## LangChain / LangGraph
65
+
66
+ ```python
67
+ from agentguard import AgentGuard
68
+
69
+ guard = AgentGuard(api_key="ag_...", agent_id="my-agent", cost_threshold=5.0)
70
+
71
+ # Drop-in callback — tracks every LLM call automatically
72
+ chain = my_chain.with_config(callbacks=[guard.callback])
73
+ result = chain.invoke({"input": "do something"})
74
+ ```
75
+
76
+ ## Supported Models
77
+
78
+ | Model | Input (per 1M) | Output (per 1M) |
79
+ |-------|---------------|----------------|
80
+ | gpt-4o | $2.50 | $10.00 |
81
+ | gpt-4o-mini | $0.15 | $0.60 |
82
+ | claude-3-5-sonnet | $3.00 | $15.00 |
83
+ | claude-3-5-haiku | $0.80 | $4.00 |
84
+ | gemini-1.5-pro | $1.25 | $5.00 |
85
+
86
+ ## Dashboard
87
+
88
+ View live costs, event feed, and manage agents at [agent-guard-nine.vercel.app](https://agent-guard-nine.vercel.app).
89
+
90
+ ## Links
91
+
92
+ - Dashboard: https://agent-guard-nine.vercel.app
93
+ - Backend API: https://agent-guard-production-dcdd.up.railway.app/docs
@@ -0,0 +1,70 @@
1
+ # AgentGuard
2
+
3
+ Real-time cost observability and guardrails for AI agents.
4
+
5
+ Track LLM spend, set budget thresholds, and auto-pause agents before they blow up your bill.
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ pip install agentguard
11
+ ```
12
+
13
+ ## Quick Start
14
+
15
+ ```python
16
+ from agentguard import AgentGuard, AgentEvent
17
+
18
+ guard = AgentGuard(
19
+ api_key="ag_...",
20
+ agent_id="my-sales-agent",
21
+ cost_threshold=10.0, # auto-pause at $10/day
22
+ slack_webhook="https://...", # optional alert
23
+ )
24
+
25
+ # Custom agent — wrap with decorator
26
+ @guard.track
27
+ def run_my_agent():
28
+ # your agent logic here
29
+ alive = guard.send_event(AgentEvent(
30
+ agent_id="my-sales-agent",
31
+ event_type="llm_call",
32
+ model="gpt-4o",
33
+ input_tokens=500,
34
+ output_tokens=300,
35
+ ))
36
+ if not alive:
37
+ print("Agent paused — cost threshold reached")
38
+ return
39
+ ```
40
+
41
+ ## LangChain / LangGraph
42
+
43
+ ```python
44
+ from agentguard import AgentGuard
45
+
46
+ guard = AgentGuard(api_key="ag_...", agent_id="my-agent", cost_threshold=5.0)
47
+
48
+ # Drop-in callback — tracks every LLM call automatically
49
+ chain = my_chain.with_config(callbacks=[guard.callback])
50
+ result = chain.invoke({"input": "do something"})
51
+ ```
52
+
53
+ ## Supported Models
54
+
55
+ | Model | Input (per 1M) | Output (per 1M) |
56
+ |-------|---------------|----------------|
57
+ | gpt-4o | $2.50 | $10.00 |
58
+ | gpt-4o-mini | $0.15 | $0.60 |
59
+ | claude-3-5-sonnet | $3.00 | $15.00 |
60
+ | claude-3-5-haiku | $0.80 | $4.00 |
61
+ | gemini-1.5-pro | $1.25 | $5.00 |
62
+
63
+ ## Dashboard
64
+
65
+ View live costs, event feed, and manage agents at [agent-guard-nine.vercel.app](https://agent-guard-nine.vercel.app).
66
+
67
+ ## Links
68
+
69
+ - Dashboard: https://agent-guard-nine.vercel.app
70
+ - Backend API: https://agent-guard-production-dcdd.up.railway.app/docs
@@ -0,0 +1,42 @@
1
+ """AgentGuard — AI agent cost observability SDK."""
2
+
3
+ from .tracker import AgentGuardTracker
4
+ from .models import AgentEvent
5
+
6
+ try:
7
+ from .callbacks import AgentGuardCallback
8
+ except ImportError:
9
+ AgentGuardCallback = None # type: ignore
10
+
11
+
12
+ class AgentGuard(AgentGuardTracker):
13
+ """
14
+ Main entry point.
15
+
16
+ Usage::
17
+
18
+ guard = AgentGuard(
19
+ api_key="ag_...",
20
+ agent_id="my-sales-agent",
21
+ cost_threshold=10.0,
22
+ slack_webhook="https://hooks.slack.com/...",
23
+ )
24
+
25
+ # LangChain / LangGraph
26
+ chain = my_chain.with_config(callbacks=[guard.callback])
27
+
28
+ # Custom agent
29
+ @guard.track
30
+ def run_my_agent():
31
+ ...
32
+ """
33
+
34
+ @property
35
+ def callback(self):
36
+ if AgentGuardCallback is None:
37
+ raise ImportError("Install langchain-core to use guard.callback")
38
+ return AgentGuardCallback(self)
39
+
40
+
41
+ __all__ = ["AgentGuard", "AgentGuardTracker", "AgentEvent", "AgentGuardCallback"]
42
+ __version__ = "0.1.0"
@@ -0,0 +1,94 @@
1
+ """LangChain callback handler for AgentGuard."""
2
+
3
+ from typing import Any
4
+ from uuid import UUID
5
+
6
+ from .models import AgentEvent
7
+ from .tracker import AgentGuardTracker
8
+
9
+ try:
10
+ from langchain_core.callbacks.base import BaseCallbackHandler
11
+ from langchain_core.outputs import LLMResult
12
+
13
+ class AgentGuardCallback(BaseCallbackHandler):
14
+ """Drop-in LangChain callback that tracks every LLM call."""
15
+
16
+ def __init__(self, tracker: AgentGuardTracker):
17
+ super().__init__()
18
+ self.tracker = tracker
19
+
20
+ # ── LLM events ────────────────────────────────────────────────────────
21
+
22
+ def on_llm_end(
23
+ self,
24
+ response: LLMResult,
25
+ *,
26
+ run_id: UUID,
27
+ parent_run_id: UUID | None = None,
28
+ **kwargs: Any,
29
+ ) -> None:
30
+ for generations in response.generations:
31
+ for gen in generations:
32
+ usage = getattr(gen, "generation_info", {}) or {}
33
+ # Different LLM providers use different keys
34
+ input_tokens = (
35
+ usage.get("input_tokens")
36
+ or usage.get("prompt_tokens")
37
+ or usage.get("prompt_token_count")
38
+ or 0
39
+ )
40
+ output_tokens = (
41
+ usage.get("output_tokens")
42
+ or usage.get("completion_tokens")
43
+ or usage.get("candidates_token_count")
44
+ or 0
45
+ )
46
+ model = usage.get("model_name") or kwargs.get("invocation_params", {}).get("model_name", "unknown")
47
+
48
+ self.tracker.send_event(
49
+ AgentEvent(
50
+ agent_id=self.tracker.agent_id,
51
+ event_type="llm_call",
52
+ model=model,
53
+ input_tokens=input_tokens,
54
+ output_tokens=output_tokens,
55
+ metadata={"run_id": str(run_id)},
56
+ )
57
+ )
58
+
59
+ # ── Tool events ───────────────────────────────────────────────────────
60
+
61
+ def on_tool_start(
62
+ self,
63
+ serialized: dict[str, Any],
64
+ input_str: str,
65
+ *,
66
+ run_id: UUID,
67
+ **kwargs: Any,
68
+ ) -> None:
69
+ tool_name = serialized.get("name", "unknown")
70
+ self.tracker.send_event(
71
+ AgentEvent(
72
+ agent_id=self.tracker.agent_id,
73
+ event_type="tool_call",
74
+ tool_name=tool_name,
75
+ metadata={"run_id": str(run_id)},
76
+ )
77
+ )
78
+
79
+ # ── Chain/agent lifecycle ─────────────────────────────────────────────
80
+
81
+ def on_chain_start(self, *args, **kwargs) -> None:
82
+ pass # tracked at decorator level if desired
83
+
84
+ def on_chain_end(self, *args, **kwargs) -> None:
85
+ pass
86
+
87
+ except ImportError:
88
+ # langchain not installed — AgentGuardCallback unavailable
89
+ class AgentGuardCallback: # type: ignore
90
+ def __init__(self, *args, **kwargs):
91
+ raise ImportError(
92
+ "LangChain is not installed. "
93
+ "Run: pip install langchain-core"
94
+ )
@@ -0,0 +1,12 @@
1
+ from pydantic import BaseModel
2
+ from typing import Literal
3
+
4
+
5
+ class AgentEvent(BaseModel):
6
+ agent_id: str
7
+ event_type: Literal["llm_call", "tool_call", "agent_start", "agent_end"]
8
+ model: str | None = None
9
+ input_tokens: int = 0
10
+ output_tokens: int = 0
11
+ tool_name: str | None = None
12
+ metadata: dict | None = None
@@ -0,0 +1,92 @@
1
+ """Core tracking logic — sends events to the AgentGuard backend."""
2
+
3
+ import httpx
4
+ from .models import AgentEvent
5
+
6
+
7
+ class AgentGuardTracker:
8
+ def __init__(
9
+ self,
10
+ api_key: str,
11
+ agent_id: str,
12
+ backend_url: str = "https://agent-guard-production-dcdd.up.railway.app",
13
+ cost_threshold: float | None = None,
14
+ slack_webhook: str | None = None,
15
+ ):
16
+ self.api_key = api_key
17
+ self.agent_id = agent_id
18
+ self.backend_url = backend_url.rstrip("/")
19
+ self._paused = False
20
+
21
+ # Register config if threshold provided
22
+ if cost_threshold is not None:
23
+ self._register_config(cost_threshold, slack_webhook)
24
+
25
+ def _register_config(self, threshold: float, slack_webhook: str | None):
26
+ try:
27
+ with httpx.Client() as client:
28
+ client.post(
29
+ f"{self.backend_url}/config",
30
+ json={
31
+ "agent_id": self.agent_id,
32
+ "cost_threshold_usd": threshold,
33
+ "slack_webhook_url": slack_webhook,
34
+ },
35
+ headers={"x-api-key": self.api_key},
36
+ timeout=5,
37
+ )
38
+ except Exception:
39
+ pass # Don't block agent startup
40
+
41
+ def send_event(self, event: AgentEvent) -> bool:
42
+ """
43
+ Send event to backend. Returns False if agent is paused (caller should stop).
44
+ """
45
+ if self._paused:
46
+ return False
47
+ try:
48
+ with httpx.Client() as client:
49
+ resp = client.post(
50
+ f"{self.backend_url}/events",
51
+ json=event.model_dump(),
52
+ headers={"x-api-key": self.api_key},
53
+ timeout=5,
54
+ )
55
+ data = resp.json()
56
+ if data.get("paused"):
57
+ self._paused = True
58
+ return False
59
+ except Exception:
60
+ pass # Don't crash the agent on network errors
61
+ return True
62
+
63
+ @property
64
+ def is_paused(self) -> bool:
65
+ return self._paused
66
+
67
+ def track(self, fn):
68
+ """Decorator — wrap any function, emit agent_start/agent_end events."""
69
+ import functools
70
+
71
+ @functools.wraps(fn)
72
+ def wrapper(*args, **kwargs):
73
+ self.send_event(
74
+ AgentEvent(agent_id=self.agent_id, event_type="agent_start")
75
+ )
76
+ try:
77
+ result = fn(*args, **kwargs)
78
+ self.send_event(
79
+ AgentEvent(agent_id=self.agent_id, event_type="agent_end")
80
+ )
81
+ return result
82
+ except Exception as exc:
83
+ self.send_event(
84
+ AgentEvent(
85
+ agent_id=self.agent_id,
86
+ event_type="agent_end",
87
+ metadata={"error": str(exc)},
88
+ )
89
+ )
90
+ raise
91
+
92
+ return wrapper
@@ -0,0 +1,36 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [tool.hatch.build.targets.wheel]
6
+ packages = ["agentguard"]
7
+
8
+ [project]
9
+ name = "agentguard-ram"
10
+ version = "0.1.0"
11
+ description = "Real-time cost observability and guardrails for AI agents"
12
+ readme = "README.md"
13
+ license = { text = "MIT" }
14
+ requires-python = ">=3.11"
15
+ keywords = ["ai", "agents", "llm", "observability", "cost", "guardrails", "langchain"]
16
+ classifiers = [
17
+ "Development Status :: 4 - Beta",
18
+ "Intended Audience :: Developers",
19
+ "License :: OSI Approved :: MIT License",
20
+ "Programming Language :: Python :: 3",
21
+ "Programming Language :: Python :: 3.11",
22
+ "Programming Language :: Python :: 3.12",
23
+ "Topic :: Software Development :: Libraries",
24
+ ]
25
+ dependencies = [
26
+ "httpx>=0.27.0",
27
+ "pydantic>=2.7.0",
28
+ ]
29
+
30
+ [project.optional-dependencies]
31
+ langchain = ["langchain-core>=0.2.0"]
32
+
33
+ [project.urls]
34
+ Homepage = "https://agent-guard-nine.vercel.app"
35
+ Repository = "https://github.com/RambabuArabandi/agent-guard"
36
+ "Bug Tracker" = "https://github.com/RambabuArabandi/agent-guard/issues"
@@ -0,0 +1,59 @@
1
+ """
2
+ Fake agent test — validates SDK → backend flow without LangChain.
3
+
4
+ Usage (from agentguard/sdk/ directory):
5
+ uv pip install -e .
6
+ AGENTGUARD_API_KEY=ag_test uv run python test_fake_agent.py
7
+
8
+ Or with custom backend:
9
+ AGENTGUARD_BACKEND_URL=http://localhost:8000 \
10
+ AGENTGUARD_API_KEY=ag_test \
11
+ uv run python test_fake_agent.py
12
+ """
13
+
14
+ import os
15
+ import sys
16
+ import time
17
+
18
+ # Allow running without installing the package (local dev)
19
+ sys.path.insert(0, os.path.dirname(__file__))
20
+ from agentguard import AgentGuard
21
+
22
+ guard = AgentGuard(
23
+ api_key=os.environ.get("AGENTGUARD_API_KEY", "ag_test"),
24
+ agent_id="fake-test-agent",
25
+ backend_url=os.environ.get("AGENTGUARD_BACKEND_URL", "http://localhost:8000"),
26
+ cost_threshold=1.0, # low threshold for testing
27
+ slack_webhook=os.environ.get("SLACK_WEBHOOK_URL"),
28
+ )
29
+
30
+
31
+ @guard.track
32
+ def fake_agent():
33
+ from agentguard.models import AgentEvent
34
+
35
+ # Simulate 3 LLM calls
36
+ for i in range(3):
37
+ print(f" [call {i+1}] sending llm_call event…")
38
+ alive = guard.send_event(
39
+ AgentEvent(
40
+ agent_id="fake-test-agent",
41
+ event_type="llm_call",
42
+ model="gpt-4o",
43
+ input_tokens=500,
44
+ output_tokens=300,
45
+ metadata={"step": i + 1},
46
+ )
47
+ )
48
+ if not alive:
49
+ print(" !! Agent paused by AgentGuard — stopping.")
50
+ return
51
+ time.sleep(0.3)
52
+
53
+ print(" Agent finished normally.")
54
+
55
+
56
+ if __name__ == "__main__":
57
+ print("Running fake agent…")
58
+ fake_agent()
59
+ print(f"Final paused state: {guard.is_paused}")