agentguard-tech 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 AgentGuard
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,245 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentguard-tech
3
+ Version: 0.1.0
4
+ Summary: Runtime security for AI agents — policy engine, audit trail, and kill switch
5
+ Author-email: AgentGuard <hello@agentguard.tech>
6
+ License: MIT
7
+ Project-URL: Homepage, https://agentguard.tech
8
+ Project-URL: Documentation, https://agentguard.tech
9
+ Project-URL: Repository, https://github.com/koshaji/agentguard
10
+ Project-URL: Demo, https://demo.agentguard.tech
11
+ Keywords: ai,agents,security,governance,policy,langchain,openai
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Topic :: Security
17
+ Classifier: Topic :: Software Development :: Libraries
18
+ Requires-Python: >=3.8
19
+ Description-Content-Type: text/markdown
20
+ License-File: LICENSE
21
+ Dynamic: license-file
22
+
23
+ # agentguard
24
+
25
+ **Runtime security for AI agents** — policy engine, audit trail, and kill switch.
26
+
27
+ [![PyPI version](https://img.shields.io/pypi/v/agentguard)](https://pypi.org/project/agentguard/)
28
+ [![Python versions](https://img.shields.io/pypi/pyversions/agentguard)](https://pypi.org/project/agentguard/)
29
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
30
+
31
+ ## Overview
32
+
33
+ AgentGuard gives AI agents production-grade guardrails:
34
+
35
+ - 🛡️ **Policy evaluation** — check every tool call before execution
36
+ - 📋 **Audit trail** — tamper-evident hash chain of every action
37
+ - 🔴 **Kill switch** — instantly halt all agents
38
+ - 🔍 **Audit verification** — cryptographically verify the audit chain
39
+ - ⚡ **Zero dependencies** — pure Python stdlib, works anywhere
40
+
41
+ ---
42
+
43
+ ## Installation
44
+
45
+ ```bash
46
+ pip install agentguard
47
+ ```
48
+
49
+ Requires Python 3.8+. No external dependencies.
50
+
51
+ ---
52
+
53
+ ## Quick Start
54
+
55
+ ```python
56
+ from agentguard import AgentGuard
57
+
58
+ guard = AgentGuard(api_key="ag_your_api_key")
59
+
60
+ # Evaluate an agent action before executing it
61
+ decision = guard.evaluate(
62
+ tool="send_email",
63
+ params={"to": "user@example.com", "subject": "Hello"}
64
+ )
65
+
66
+ if decision["result"] == "allow":
67
+ print("Action allowed, risk score:", decision["riskScore"])
68
+ # proceed with tool execution
69
+ elif decision["result"] == "block":
70
+ print("Action blocked:", decision["reason"])
71
+ elif decision["result"] == "require_approval":
72
+ print("Waiting for human approval...")
73
+ elif decision["result"] == "monitor":
74
+ print("Action monitored (allowed but logged):", decision["reason"])
75
+ ```
76
+
77
+ ---
78
+
79
+ ## API Reference
80
+
81
+ ### `AgentGuard(api_key, base_url=...)`
82
+
83
+ Create a client instance.
84
+
85
+ ```python
86
+ guard = AgentGuard(
87
+ api_key="ag_your_api_key",
88
+ base_url="https://api.agentguard.tech" # optional, default shown
89
+ )
90
+ ```
91
+
92
+ ---
93
+
94
+ ### `evaluate(tool, params=None) → dict`
95
+
96
+ Evaluate a tool call against your policy. Call this **before** every tool execution.
97
+
98
+ ```python
99
+ decision = guard.evaluate("read_file", {"path": "/data/report.csv"})
100
+ # Returns:
101
+ # {
102
+ # "result": "allow", # allow | block | monitor | require_approval
103
+ # "riskScore": 5, # 0-1000
104
+ # "reason": "Matched allow-read rule",
105
+ # "durationMs": 1.2,
106
+ # "matchedRuleId": "allow-read" # optional
107
+ # }
108
+ ```
109
+
110
+ **Integration pattern:**
111
+
112
+ ```python
113
+ def safe_tool_call(tool_name, tool_func, **params):
114
+ decision = guard.evaluate(tool_name, params)
115
+ if decision["result"] in ("allow", "monitor"):
116
+ return tool_func(**params)
117
+ elif decision["result"] == "block":
118
+ raise PermissionError(f"Blocked by policy: {decision['reason']}")
119
+ elif decision["result"] == "require_approval":
120
+ raise PermissionError("Awaiting human approval")
121
+ ```
122
+
123
+ ---
124
+
125
+ ### `get_usage() → dict`
126
+
127
+ Get usage statistics for your tenant.
128
+
129
+ ```python
130
+ usage = guard.get_usage()
131
+ print(usage)
132
+ # {
133
+ # "requestsToday": 142,
134
+ # "requestsThisMonth": 3891,
135
+ # "plan": "pro",
136
+ # "limits": { "requestsPerDay": 10000 }
137
+ # }
138
+ ```
139
+
140
+ ---
141
+
142
+ ### `get_audit(limit=50, offset=0) → dict`
143
+
144
+ Get audit trail events with pagination.
145
+
146
+ ```python
147
+ audit = guard.get_audit(limit=100, offset=0)
148
+ for event in audit["events"]:
149
+ print(f"{event['timestamp']} | {event['tool']} | {event['decision']}")
150
+ ```
151
+
152
+ ---
153
+
154
+ ### `kill_switch(active) → dict`
155
+
156
+ Activate or deactivate the global kill switch.
157
+
158
+ ```python
159
+ # Emergency halt — stop all agents immediately
160
+ guard.kill_switch(True)
161
+
162
+ # Resume operations
163
+ guard.kill_switch(False)
164
+ ```
165
+
166
+ ---
167
+
168
+ ### `verify_audit() → dict`
169
+
170
+ Verify the cryptographic integrity of the audit hash chain.
171
+
172
+ ```python
173
+ result = guard.verify_audit()
174
+ if result["valid"]:
175
+ print("Audit chain is intact")
176
+ else:
177
+ print(f"Chain broken at event index: {result['invalidAt']}")
178
+ ```
179
+
180
+ ---
181
+
182
+ ## Complete Example — LangChain-style Agent
183
+
184
+ ```python
185
+ from agentguard import AgentGuard
186
+
187
+ guard = AgentGuard(api_key="ag_your_api_key")
188
+
189
+ def run_tool(name: str, func, **params):
190
+ """Execute a tool with AgentGuard policy enforcement."""
191
+ decision = guard.evaluate(name, params)
192
+
193
+ result = decision["result"]
194
+ if result == "block":
195
+ raise PermissionError(f"Policy blocked {name}: {decision['reason']}")
196
+ if result == "require_approval":
197
+ raise PermissionError(f"Human approval required for {name}")
198
+
199
+ # "allow" or "monitor" — proceed
200
+ return func(**params)
201
+
202
+
203
+ # Your tools
204
+ def send_email(to: str, subject: str, body: str) -> str:
205
+ # ... send the email
206
+ return f"Email sent to {to}"
207
+
208
+ def read_file(path: str) -> str:
209
+ with open(path) as f:
210
+ return f.read()
211
+
212
+
213
+ # Use with policy enforcement
214
+ content = run_tool("read_file", read_file, path="/data/report.csv")
215
+ run_tool("send_email", send_email, to="boss@company.com", subject="Report", body=content)
216
+ ```
217
+
218
+ ---
219
+
220
+ ## Error Handling
221
+
222
+ ```python
223
+ from agentguard import AgentGuard
224
+
225
+ guard = AgentGuard(api_key="ag_your_key")
226
+
227
+ try:
228
+ decision = guard.evaluate("dangerous_tool", {"target": "production_db"})
229
+ except RuntimeError as e:
230
+ print(f"API error: {e}")
231
+ # RuntimeError: AgentGuard API error: 401 Unauthorized
232
+ ```
233
+
234
+ ---
235
+
236
+ ## Links
237
+
238
+ - 🌐 [agentguard.tech](https://agentguard.tech)
239
+ - 🎮 [Live Demo](https://demo.agentguard.tech)
240
+ - 📦 [GitHub](https://github.com/koshaji/agentguard)
241
+ - 📘 [npm SDK](https://www.npmjs.com/package/@agentguard/sdk)
242
+
243
+ ## License
244
+
245
+ MIT
@@ -0,0 +1,223 @@
1
+ # agentguard
2
+
3
+ **Runtime security for AI agents** — policy engine, audit trail, and kill switch.
4
+
5
+ [![PyPI version](https://img.shields.io/pypi/v/agentguard)](https://pypi.org/project/agentguard/)
6
+ [![Python versions](https://img.shields.io/pypi/pyversions/agentguard)](https://pypi.org/project/agentguard/)
7
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
8
+
9
+ ## Overview
10
+
11
+ AgentGuard gives AI agents production-grade guardrails:
12
+
13
+ - 🛡️ **Policy evaluation** — check every tool call before execution
14
+ - 📋 **Audit trail** — tamper-evident hash chain of every action
15
+ - 🔴 **Kill switch** — instantly halt all agents
16
+ - 🔍 **Audit verification** — cryptographically verify the audit chain
17
+ - ⚡ **Zero dependencies** — pure Python stdlib, works anywhere
18
+
19
+ ---
20
+
21
+ ## Installation
22
+
23
+ ```bash
24
+ pip install agentguard
25
+ ```
26
+
27
+ Requires Python 3.8+. No external dependencies.
28
+
29
+ ---
30
+
31
+ ## Quick Start
32
+
33
+ ```python
34
+ from agentguard import AgentGuard
35
+
36
+ guard = AgentGuard(api_key="ag_your_api_key")
37
+
38
+ # Evaluate an agent action before executing it
39
+ decision = guard.evaluate(
40
+ tool="send_email",
41
+ params={"to": "user@example.com", "subject": "Hello"}
42
+ )
43
+
44
+ if decision["result"] == "allow":
45
+ print("Action allowed, risk score:", decision["riskScore"])
46
+ # proceed with tool execution
47
+ elif decision["result"] == "block":
48
+ print("Action blocked:", decision["reason"])
49
+ elif decision["result"] == "require_approval":
50
+ print("Waiting for human approval...")
51
+ elif decision["result"] == "monitor":
52
+ print("Action monitored (allowed but logged):", decision["reason"])
53
+ ```
54
+
55
+ ---
56
+
57
+ ## API Reference
58
+
59
+ ### `AgentGuard(api_key, base_url=...)`
60
+
61
+ Create a client instance.
62
+
63
+ ```python
64
+ guard = AgentGuard(
65
+ api_key="ag_your_api_key",
66
+ base_url="https://api.agentguard.tech" # optional, default shown
67
+ )
68
+ ```
69
+
70
+ ---
71
+
72
+ ### `evaluate(tool, params=None) → dict`
73
+
74
+ Evaluate a tool call against your policy. Call this **before** every tool execution.
75
+
76
+ ```python
77
+ decision = guard.evaluate("read_file", {"path": "/data/report.csv"})
78
+ # Returns:
79
+ # {
80
+ # "result": "allow", # allow | block | monitor | require_approval
81
+ # "riskScore": 5, # 0-1000
82
+ # "reason": "Matched allow-read rule",
83
+ # "durationMs": 1.2,
84
+ # "matchedRuleId": "allow-read" # optional
85
+ # }
86
+ ```
87
+
88
+ **Integration pattern:**
89
+
90
+ ```python
91
+ def safe_tool_call(tool_name, tool_func, **params):
92
+ decision = guard.evaluate(tool_name, params)
93
+ if decision["result"] in ("allow", "monitor"):
94
+ return tool_func(**params)
95
+ elif decision["result"] == "block":
96
+ raise PermissionError(f"Blocked by policy: {decision['reason']}")
97
+ elif decision["result"] == "require_approval":
98
+ raise PermissionError("Awaiting human approval")
99
+ ```
100
+
101
+ ---
102
+
103
+ ### `get_usage() → dict`
104
+
105
+ Get usage statistics for your tenant.
106
+
107
+ ```python
108
+ usage = guard.get_usage()
109
+ print(usage)
110
+ # {
111
+ # "requestsToday": 142,
112
+ # "requestsThisMonth": 3891,
113
+ # "plan": "pro",
114
+ # "limits": { "requestsPerDay": 10000 }
115
+ # }
116
+ ```
117
+
118
+ ---
119
+
120
+ ### `get_audit(limit=50, offset=0) → dict`
121
+
122
+ Get audit trail events with pagination.
123
+
124
+ ```python
125
+ audit = guard.get_audit(limit=100, offset=0)
126
+ for event in audit["events"]:
127
+ print(f"{event['timestamp']} | {event['tool']} | {event['decision']}")
128
+ ```
129
+
130
+ ---
131
+
132
+ ### `kill_switch(active) → dict`
133
+
134
+ Activate or deactivate the global kill switch.
135
+
136
+ ```python
137
+ # Emergency halt — stop all agents immediately
138
+ guard.kill_switch(True)
139
+
140
+ # Resume operations
141
+ guard.kill_switch(False)
142
+ ```
143
+
144
+ ---
145
+
146
+ ### `verify_audit() → dict`
147
+
148
+ Verify the cryptographic integrity of the audit hash chain.
149
+
150
+ ```python
151
+ result = guard.verify_audit()
152
+ if result["valid"]:
153
+ print("Audit chain is intact")
154
+ else:
155
+ print(f"Chain broken at event index: {result['invalidAt']}")
156
+ ```
157
+
158
+ ---
159
+
160
+ ## Complete Example — LangChain-style Agent
161
+
162
+ ```python
163
+ from agentguard import AgentGuard
164
+
165
+ guard = AgentGuard(api_key="ag_your_api_key")
166
+
167
+ def run_tool(name: str, func, **params):
168
+ """Execute a tool with AgentGuard policy enforcement."""
169
+ decision = guard.evaluate(name, params)
170
+
171
+ result = decision["result"]
172
+ if result == "block":
173
+ raise PermissionError(f"Policy blocked {name}: {decision['reason']}")
174
+ if result == "require_approval":
175
+ raise PermissionError(f"Human approval required for {name}")
176
+
177
+ # "allow" or "monitor" — proceed
178
+ return func(**params)
179
+
180
+
181
+ # Your tools
182
+ def send_email(to: str, subject: str, body: str) -> str:
183
+ # ... send the email
184
+ return f"Email sent to {to}"
185
+
186
+ def read_file(path: str) -> str:
187
+ with open(path) as f:
188
+ return f.read()
189
+
190
+
191
+ # Use with policy enforcement
192
+ content = run_tool("read_file", read_file, path="/data/report.csv")
193
+ run_tool("send_email", send_email, to="boss@company.com", subject="Report", body=content)
194
+ ```
195
+
196
+ ---
197
+
198
+ ## Error Handling
199
+
200
+ ```python
201
+ from agentguard import AgentGuard
202
+
203
+ guard = AgentGuard(api_key="ag_your_key")
204
+
205
+ try:
206
+ decision = guard.evaluate("dangerous_tool", {"target": "production_db"})
207
+ except RuntimeError as e:
208
+ print(f"API error: {e}")
209
+ # RuntimeError: AgentGuard API error: 401 Unauthorized
210
+ ```
211
+
212
+ ---
213
+
214
+ ## Links
215
+
216
+ - 🌐 [agentguard.tech](https://agentguard.tech)
217
+ - 🎮 [Live Demo](https://demo.agentguard.tech)
218
+ - 📦 [GitHub](https://github.com/koshaji/agentguard)
219
+ - 📘 [npm SDK](https://www.npmjs.com/package/@agentguard/sdk)
220
+
221
+ ## License
222
+
223
+ MIT
@@ -0,0 +1,5 @@
1
+ """AgentGuard — Runtime security for AI agents."""
2
+ from .client import AgentGuard
3
+
4
+ __version__ = "0.1.0"
5
+ __all__ = ["AgentGuard"]
@@ -0,0 +1,79 @@
1
+ """AgentGuard Python SDK — Runtime security for AI agents."""
2
+ import json
3
+ from typing import Any, Optional
4
+ from urllib.request import Request, urlopen
5
+ from urllib.error import HTTPError
6
+
7
+
8
+ class AgentGuard:
9
+ """Client for the AgentGuard API."""
10
+
11
+ def __init__(self, api_key: str, base_url: str = "https://api.agentguard.tech"):
12
+ self.api_key = api_key
13
+ self.base_url = base_url.rstrip("/")
14
+
15
+ def _request(self, method: str, path: str, body: Optional[dict] = None) -> dict:
16
+ url = f"{self.base_url}{path}"
17
+ headers = {
18
+ "X-API-Key": self.api_key,
19
+ "Content-Type": "application/json",
20
+ }
21
+ data = json.dumps(body).encode() if body else None
22
+ req = Request(url, data=data, headers=headers, method=method)
23
+ try:
24
+ with urlopen(req) as resp:
25
+ return json.loads(resp.read().decode())
26
+ except HTTPError as e:
27
+ raise RuntimeError(f"AgentGuard API error: {e.code} {e.read().decode()}") from e
28
+
29
+ def evaluate(self, tool: str, params: Optional[dict] = None) -> dict:
30
+ """Evaluate an agent action against the policy engine.
31
+
32
+ Args:
33
+ tool: Name of the tool being called (e.g. "send_email", "read_file")
34
+ params: Optional dict of parameters passed to the tool
35
+
36
+ Returns:
37
+ dict with keys: result, riskScore, reason, durationMs, matchedRuleId (optional)
38
+ result is one of: "allow", "block", "monitor", "require_approval"
39
+ """
40
+ return self._request("POST", "/api/v1/evaluate", {"tool": tool, "params": params or {}})
41
+
42
+ def get_usage(self) -> dict:
43
+ """Get usage statistics for your tenant.
44
+
45
+ Returns:
46
+ dict with usage data including request counts and limits
47
+ """
48
+ return self._request("GET", "/api/v1/usage")
49
+
50
+ def get_audit(self, limit: int = 50, offset: int = 0) -> dict:
51
+ """Get audit trail events.
52
+
53
+ Args:
54
+ limit: Maximum number of events to return (default 50)
55
+ offset: Pagination offset (default 0)
56
+
57
+ Returns:
58
+ dict with 'events' list and pagination metadata
59
+ """
60
+ return self._request("GET", f"/api/v1/audit?limit={limit}&offset={offset}")
61
+
62
+ def kill_switch(self, active: bool) -> dict:
63
+ """Activate or deactivate the kill switch.
64
+
65
+ Args:
66
+ active: True to halt all agents, False to resume operations
67
+
68
+ Returns:
69
+ dict with confirmation of the kill switch state
70
+ """
71
+ return self._request("POST", "/api/v1/killswitch", {"active": active})
72
+
73
+ def verify_audit(self) -> dict:
74
+ """Verify audit trail hash chain integrity.
75
+
76
+ Returns:
77
+ dict with 'valid' boolean and optional 'invalidAt' index
78
+ """
79
+ return self._request("GET", "/api/v1/audit/verify")
@@ -0,0 +1,245 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentguard-tech
3
+ Version: 0.1.0
4
+ Summary: Runtime security for AI agents — policy engine, audit trail, and kill switch
5
+ Author-email: AgentGuard <hello@agentguard.tech>
6
+ License: MIT
7
+ Project-URL: Homepage, https://agentguard.tech
8
+ Project-URL: Documentation, https://agentguard.tech
9
+ Project-URL: Repository, https://github.com/koshaji/agentguard
10
+ Project-URL: Demo, https://demo.agentguard.tech
11
+ Keywords: ai,agents,security,governance,policy,langchain,openai
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Topic :: Security
17
+ Classifier: Topic :: Software Development :: Libraries
18
+ Requires-Python: >=3.8
19
+ Description-Content-Type: text/markdown
20
+ License-File: LICENSE
21
+ Dynamic: license-file
22
+
23
+ # agentguard
24
+
25
+ **Runtime security for AI agents** — policy engine, audit trail, and kill switch.
26
+
27
+ [![PyPI version](https://img.shields.io/pypi/v/agentguard)](https://pypi.org/project/agentguard/)
28
+ [![Python versions](https://img.shields.io/pypi/pyversions/agentguard)](https://pypi.org/project/agentguard/)
29
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
30
+
31
+ ## Overview
32
+
33
+ AgentGuard gives AI agents production-grade guardrails:
34
+
35
+ - 🛡️ **Policy evaluation** — check every tool call before execution
36
+ - 📋 **Audit trail** — tamper-evident hash chain of every action
37
+ - 🔴 **Kill switch** — instantly halt all agents
38
+ - 🔍 **Audit verification** — cryptographically verify the audit chain
39
+ - ⚡ **Zero dependencies** — pure Python stdlib, works anywhere
40
+
41
+ ---
42
+
43
+ ## Installation
44
+
45
+ ```bash
46
+ pip install agentguard
47
+ ```
48
+
49
+ Requires Python 3.8+. No external dependencies.
50
+
51
+ ---
52
+
53
+ ## Quick Start
54
+
55
+ ```python
56
+ from agentguard import AgentGuard
57
+
58
+ guard = AgentGuard(api_key="ag_your_api_key")
59
+
60
+ # Evaluate an agent action before executing it
61
+ decision = guard.evaluate(
62
+ tool="send_email",
63
+ params={"to": "user@example.com", "subject": "Hello"}
64
+ )
65
+
66
+ if decision["result"] == "allow":
67
+ print("Action allowed, risk score:", decision["riskScore"])
68
+ # proceed with tool execution
69
+ elif decision["result"] == "block":
70
+ print("Action blocked:", decision["reason"])
71
+ elif decision["result"] == "require_approval":
72
+ print("Waiting for human approval...")
73
+ elif decision["result"] == "monitor":
74
+ print("Action monitored (allowed but logged):", decision["reason"])
75
+ ```
76
+
77
+ ---
78
+
79
+ ## API Reference
80
+
81
+ ### `AgentGuard(api_key, base_url=...)`
82
+
83
+ Create a client instance.
84
+
85
+ ```python
86
+ guard = AgentGuard(
87
+ api_key="ag_your_api_key",
88
+ base_url="https://api.agentguard.tech" # optional, default shown
89
+ )
90
+ ```
91
+
92
+ ---
93
+
94
+ ### `evaluate(tool, params=None) → dict`
95
+
96
+ Evaluate a tool call against your policy. Call this **before** every tool execution.
97
+
98
+ ```python
99
+ decision = guard.evaluate("read_file", {"path": "/data/report.csv"})
100
+ # Returns:
101
+ # {
102
+ # "result": "allow", # allow | block | monitor | require_approval
103
+ # "riskScore": 5, # 0-1000
104
+ # "reason": "Matched allow-read rule",
105
+ # "durationMs": 1.2,
106
+ # "matchedRuleId": "allow-read" # optional
107
+ # }
108
+ ```
109
+
110
+ **Integration pattern:**
111
+
112
+ ```python
113
+ def safe_tool_call(tool_name, tool_func, **params):
114
+ decision = guard.evaluate(tool_name, params)
115
+ if decision["result"] in ("allow", "monitor"):
116
+ return tool_func(**params)
117
+ elif decision["result"] == "block":
118
+ raise PermissionError(f"Blocked by policy: {decision['reason']}")
119
+ elif decision["result"] == "require_approval":
120
+ raise PermissionError("Awaiting human approval")
121
+ ```
122
+
123
+ ---
124
+
125
+ ### `get_usage() → dict`
126
+
127
+ Get usage statistics for your tenant.
128
+
129
+ ```python
130
+ usage = guard.get_usage()
131
+ print(usage)
132
+ # {
133
+ # "requestsToday": 142,
134
+ # "requestsThisMonth": 3891,
135
+ # "plan": "pro",
136
+ # "limits": { "requestsPerDay": 10000 }
137
+ # }
138
+ ```
139
+
140
+ ---
141
+
142
+ ### `get_audit(limit=50, offset=0) → dict`
143
+
144
+ Get audit trail events with pagination.
145
+
146
+ ```python
147
+ audit = guard.get_audit(limit=100, offset=0)
148
+ for event in audit["events"]:
149
+ print(f"{event['timestamp']} | {event['tool']} | {event['decision']}")
150
+ ```
151
+
152
+ ---
153
+
154
+ ### `kill_switch(active) → dict`
155
+
156
+ Activate or deactivate the global kill switch.
157
+
158
+ ```python
159
+ # Emergency halt — stop all agents immediately
160
+ guard.kill_switch(True)
161
+
162
+ # Resume operations
163
+ guard.kill_switch(False)
164
+ ```
165
+
166
+ ---
167
+
168
+ ### `verify_audit() → dict`
169
+
170
+ Verify the cryptographic integrity of the audit hash chain.
171
+
172
+ ```python
173
+ result = guard.verify_audit()
174
+ if result["valid"]:
175
+ print("Audit chain is intact")
176
+ else:
177
+ print(f"Chain broken at event index: {result['invalidAt']}")
178
+ ```
179
+
180
+ ---
181
+
182
+ ## Complete Example — LangChain-style Agent
183
+
184
+ ```python
185
+ from agentguard import AgentGuard
186
+
187
+ guard = AgentGuard(api_key="ag_your_api_key")
188
+
189
+ def run_tool(name: str, func, **params):
190
+ """Execute a tool with AgentGuard policy enforcement."""
191
+ decision = guard.evaluate(name, params)
192
+
193
+ result = decision["result"]
194
+ if result == "block":
195
+ raise PermissionError(f"Policy blocked {name}: {decision['reason']}")
196
+ if result == "require_approval":
197
+ raise PermissionError(f"Human approval required for {name}")
198
+
199
+ # "allow" or "monitor" — proceed
200
+ return func(**params)
201
+
202
+
203
+ # Your tools
204
+ def send_email(to: str, subject: str, body: str) -> str:
205
+ # ... send the email
206
+ return f"Email sent to {to}"
207
+
208
+ def read_file(path: str) -> str:
209
+ with open(path) as f:
210
+ return f.read()
211
+
212
+
213
+ # Use with policy enforcement
214
+ content = run_tool("read_file", read_file, path="/data/report.csv")
215
+ run_tool("send_email", send_email, to="boss@company.com", subject="Report", body=content)
216
+ ```
217
+
218
+ ---
219
+
220
+ ## Error Handling
221
+
222
+ ```python
223
+ from agentguard import AgentGuard
224
+
225
+ guard = AgentGuard(api_key="ag_your_key")
226
+
227
+ try:
228
+ decision = guard.evaluate("dangerous_tool", {"target": "production_db"})
229
+ except RuntimeError as e:
230
+ print(f"API error: {e}")
231
+ # RuntimeError: AgentGuard API error: 401 Unauthorized
232
+ ```
233
+
234
+ ---
235
+
236
+ ## Links
237
+
238
+ - 🌐 [agentguard.tech](https://agentguard.tech)
239
+ - 🎮 [Live Demo](https://demo.agentguard.tech)
240
+ - 📦 [GitHub](https://github.com/koshaji/agentguard)
241
+ - 📘 [npm SDK](https://www.npmjs.com/package/@agentguard/sdk)
242
+
243
+ ## License
244
+
245
+ MIT
@@ -0,0 +1,9 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ agentguard/__init__.py
5
+ agentguard/client.py
6
+ agentguard_tech.egg-info/PKG-INFO
7
+ agentguard_tech.egg-info/SOURCES.txt
8
+ agentguard_tech.egg-info/dependency_links.txt
9
+ agentguard_tech.egg-info/top_level.txt
@@ -0,0 +1,27 @@
1
+ [build-system]
2
+ requires = ["setuptools>=68.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "agentguard-tech"
7
+ version = "0.1.0"
8
+ description = "Runtime security for AI agents — policy engine, audit trail, and kill switch"
9
+ readme = "README.md"
10
+ license = {text = "MIT"}
11
+ requires-python = ">=3.8"
12
+ authors = [{name = "AgentGuard", email = "hello@agentguard.tech"}]
13
+ keywords = ["ai", "agents", "security", "governance", "policy", "langchain", "openai"]
14
+ classifiers = [
15
+ "Development Status :: 3 - Alpha",
16
+ "Intended Audience :: Developers",
17
+ "License :: OSI Approved :: MIT License",
18
+ "Programming Language :: Python :: 3",
19
+ "Topic :: Security",
20
+ "Topic :: Software Development :: Libraries",
21
+ ]
22
+
23
+ [project.urls]
24
+ Homepage = "https://agentguard.tech"
25
+ Documentation = "https://agentguard.tech"
26
+ Repository = "https://github.com/koshaji/agentguard"
27
+ Demo = "https://demo.agentguard.tech"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+