safentic 1.0.5__py3-none-any.whl → 1.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,116 @@
1
+ import time
2
+ from typing import (
3
+ Any,
4
+ Dict,
5
+ Optional,
6
+ )
7
+
8
+ from .policy_engine import PolicyEngine
9
+ from .logger.audit import AuditLogger
10
+ from ._internal.errors import EnforcementError
11
+ from .helper.helper import deny_response, is_tool_blocked
12
+
13
+
14
+ class PolicyEnforcer:
15
+ """
16
+ Runtime wrapper to evaluate and enforce tool usage policies.
17
+ Tracks agent-specific violations, supports audit logging,
18
+ and handles TTL-based tool blocks.
19
+
20
+ Requires a PolicyEngine instance at construction (no implicit defaults).
21
+ """
22
+
23
+ TOOL_BLOCK_TTL = 60 # seconds
24
+
25
+ def __init__(
26
+ self, policy_engine: PolicyEngine, audit_logger: Optional[AuditLogger] = None
27
+ ) -> None:
28
+ if not isinstance(policy_engine, PolicyEngine):
29
+ raise EnforcementError(
30
+ "PolicyEnforcer requires a valid PolicyEngine instance"
31
+ )
32
+
33
+ self.policy_engine = policy_engine
34
+ self.agent_states: Dict[str, Dict[str, Any]] = {}
35
+ self.audit_logger = audit_logger or AuditLogger()
36
+
37
+ def enforce(
38
+ self, agent_id: str, tool_name: str, tool_args: dict[Any, Any]
39
+ ) -> dict[Any, Any]:
40
+ """
41
+ Evaluates a tool action for a given agent.
42
+ Returns a dict[Any, Any] with 'allowed', 'reason', and agent state metadata.
43
+ """
44
+ try:
45
+ state = self.agent_states.setdefault(
46
+ agent_id,
47
+ {
48
+ "blocked_tools": {}, # tool_name -> timestamp of block
49
+ "violation_count": 0,
50
+ "last_violation": None,
51
+ },
52
+ )
53
+
54
+ # 1) TTL check
55
+ if is_tool_blocked(tool_name, state, self.TOOL_BLOCK_TTL):
56
+ reason = "Tool is temporarily blocked due to a prior violation."
57
+ self.audit_logger.log(
58
+ agent_id=agent_id, tool=tool_name, allowed=False, reason=reason
59
+ )
60
+ return deny_response(tool_name, state, reason)
61
+
62
+ # 2) Policy evaluation
63
+ violation = self.policy_engine.evaluate_policy(
64
+ tool_name, tool_args, agent_id=agent_id
65
+ )
66
+ if violation:
67
+ level = violation.get("level", "block")
68
+ reason = violation.get("reason", "Policy violation")
69
+
70
+ # --- FIX: handle boolean violation reasons gracefully ---
71
+ if isinstance(reason, bool):
72
+ reason = "Policy violation (boolean trigger)"
73
+
74
+ if level == "warn":
75
+ self.audit_logger.log(
76
+ agent_id=agent_id,
77
+ tool=tool_name,
78
+ allowed=True,
79
+ reason=f"Warning: {reason}",
80
+ )
81
+ return {
82
+ "allowed": True,
83
+ "reason": f"Warning: {reason}",
84
+ "violation": violation,
85
+ "agent_state": state,
86
+ }
87
+
88
+ # Block
89
+ state["blocked_tools"][tool_name] = time.time()
90
+ state["violation_count"] += 1
91
+ state["last_violation"] = violation
92
+
93
+ self.audit_logger.log(
94
+ agent_id=agent_id, tool=tool_name, allowed=False, reason=reason
95
+ )
96
+ return deny_response(tool_name, state, reason, violation)
97
+
98
+ # 3) Allowed
99
+ self.audit_logger.log(agent_id=agent_id, tool=tool_name, allowed=True)
100
+ return {
101
+ "allowed": True,
102
+ "reason": "Action permitted",
103
+ "agent_state": state,
104
+ }
105
+
106
+ except Exception as e:
107
+ raise EnforcementError(
108
+ f"Failed to enforce policy for {tool_name}: {e}"
109
+ ) from e
110
+
111
+ def reset(self, agent_id: Optional[str] = None) -> None:
112
+ """Clears violation state for one or all agents."""
113
+ if agent_id:
114
+ self.agent_states.pop(agent_id, None)
115
+ else:
116
+ self.agent_states.clear()
@@ -0,0 +1,141 @@
1
+ import os
2
+ import yaml
3
+ from typing import Optional, Dict, Any
4
+
5
+ from .verifiers.llm_verifier import LLMVerifier
6
+ from .logger.audit import AuditLogger
7
+ from ._internal.errors import PolicyValidationError
8
+ from .helper.helper import require, get_text_fields, ReferenceLoader
9
+
10
+
11
+ class PolicyEngine:
12
+ """
13
+ Loads and evaluates policies defined in YAML.
14
+ Orchestrates verifiers and returns unified decision objects.
15
+ """
16
+
17
+ VALID_RULE_TYPES = {"llm_verifier"}
18
+ VALID_LEVELS = {"block", "warn"} # enforcement levels
19
+ VALID_RESP_FORMAT = {"boolean"}
20
+ VALID_MATCH_MODE = {"exact", "contains"}
21
+
22
+ def __init__(
23
+ self,
24
+ policy_path: str,
25
+ logger: Optional[AuditLogger] = None,
26
+ dry_run: bool = False,
27
+ no_llm: bool = False, # <-- Added flag
28
+ ):
29
+ if not policy_path:
30
+ raise PolicyValidationError("Policy path must be provided to PolicyEngine")
31
+
32
+ self.policy_path = policy_path
33
+ self.logger = logger or AuditLogger()
34
+ self.dry_run = dry_run
35
+ self.no_llm = no_llm # <-- Store flag
36
+
37
+ self.policy_cfg = self._load_policy()
38
+ self._validate_policy_cfg()
39
+
40
+ reference_dir = os.path.dirname(os.path.abspath(self.policy_path)) or "."
41
+ self.ref_loader = ReferenceLoader(reference_dir)
42
+ self.llm = LLMVerifier()
43
+
44
+ def _load_policy(self) -> Dict[str, Any]:
45
+ try:
46
+ with open(self.policy_path, "r", encoding="utf-8") as f:
47
+ return yaml.safe_load(f) or {}
48
+ except FileNotFoundError:
49
+ raise PolicyValidationError(f"Policy file not found: {self.policy_path}")
50
+ except yaml.YAMLError as e:
51
+ raise PolicyValidationError(f"Invalid YAML in {self.policy_path}: {e}")
52
+
53
+ def _validate_policy_cfg(self) -> None:
54
+ tools = self.policy_cfg.get("tools")
55
+ if not isinstance(tools, dict):
56
+ raise PolicyValidationError(
57
+ "policy.tools must be a mapping of tool names to configs"
58
+ )
59
+
60
+ for tool_name, cfg in tools.items():
61
+ rules = cfg.get("rules", [])
62
+ if not isinstance(rules, list):
63
+ raise PolicyValidationError(f"{tool_name}.rules must be a list")
64
+
65
+ for idx, rule in enumerate(rules):
66
+ rid = f"{tool_name}[{idx}]"
67
+ rtype = rule.get("type")
68
+ if rtype not in self.VALID_RULE_TYPES:
69
+ raise PolicyValidationError(f"{rid}: unknown type '{rtype}'")
70
+
71
+ if rtype == "llm_verifier":
72
+ require(rule, rid, "instruction")
73
+ require(rule, rid, "fields")
74
+ require(rule, rid, "reference_file")
75
+
76
+ fields = rule["fields"]
77
+ if not isinstance(fields, list) or not all(
78
+ isinstance(x, str) for x in fields
79
+ ):
80
+ raise PolicyValidationError(
81
+ f"{rid}: 'fields' must be a list[str]"
82
+ )
83
+
84
+ if (
85
+ rule.get("response_format", "boolean")
86
+ not in self.VALID_RESP_FORMAT
87
+ ):
88
+ raise PolicyValidationError(f"{rid}: invalid response_format")
89
+
90
+ if rule.get("match_mode", "exact") not in self.VALID_MATCH_MODE:
91
+ raise PolicyValidationError(f"{rid}: invalid match_mode")
92
+
93
+ if rule.get("level", "block") not in self.VALID_LEVELS:
94
+ raise PolicyValidationError(f"{rid}: invalid level")
95
+
96
+ def evaluate_policy(
97
+ self, tool_name: str, tool_input: Dict[str, Any], agent_id: str
98
+ ) -> Optional[Dict[str, Any]]:
99
+ tools_cfg = self.policy_cfg.get("tools", {})
100
+ tool_cfg = tools_cfg.get(tool_name, {})
101
+ rules = tool_cfg.get("rules", [])
102
+
103
+ for rule in rules:
104
+ rtype = rule.get("type")
105
+ if rtype not in self.VALID_RULE_TYPES:
106
+ continue
107
+
108
+ if rtype == "llm_verifier":
109
+ if self.no_llm:
110
+ continue # <-- Skip LLM checks if flag is set
111
+
112
+ fields = rule.get("fields", [])
113
+ rid = f"{tool_name}:{rule.get('description','llm_verifier')}"
114
+ text = get_text_fields(tool_input, fields, rid=rid, logger=self.logger)
115
+ if not text:
116
+ continue
117
+
118
+ reference = self.ref_loader.load(rule["reference_file"])
119
+ result = self.llm.evaluate(
120
+ instruction=rule["instruction"],
121
+ agent_output=text,
122
+ reference_text=reference,
123
+ rule=rule,
124
+ tool=tool_name,
125
+ agent_id=agent_id,
126
+ )
127
+
128
+ if result:
129
+ if self.dry_run:
130
+ result["reason"] = "[DRY_RUN] " + result["reason"]
131
+ self.logger.log(
132
+ agent_id,
133
+ tool_name,
134
+ allowed=True,
135
+ reason=result["reason"],
136
+ extra=result,
137
+ )
138
+ return None
139
+ return result
140
+
141
+ return None
@@ -0,0 +1,238 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import re
6
+ from typing import Any, Dict, Optional, Union
7
+
8
+ from openai import OpenAI
9
+ from ..logger.audit import AuditLogger
10
+
11
+
12
+ JSONDict = Dict[str, Any]
13
+ LLMOut = Union[str, bool, JSONDict]
14
+
15
+
16
+ class LLMVerifier:
17
+ """
18
+ Evaluates agent tool output using an LLM to determine policy compliance.
19
+ Controlled entirely by developer-authored rules in policy.yaml.
20
+ """
21
+
22
+ def __init__(self, api_key: Optional[str] = None) -> None:
23
+ self.api_key = api_key or os.getenv("OPENAI_API_KEY", "")
24
+ if not self.api_key:
25
+ raise ValueError("OPENAI_API_KEY not set and no api_key provided.")
26
+ self.client: OpenAI = OpenAI(api_key=self.api_key)
27
+ self.logger: AuditLogger = AuditLogger()
28
+
29
+ def set_api_key(self, api_key: str) -> None:
30
+ """Allows runtime injection of the API key (e.g., from SafetyLayer)."""
31
+ self.api_key = api_key
32
+ self.client = OpenAI(api_key=api_key)
33
+
34
+ # -------------------------
35
+ # Formatting helpers
36
+ # -------------------------
37
+
38
+ def max_tokens_for_format(self, fmt: str) -> int:
39
+ fmt_l = (fmt or "").lower()
40
+ if fmt_l == "boolean":
41
+ return 5
42
+ if fmt_l == "string":
43
+ return 50
44
+ if fmt_l == "json":
45
+ return 80
46
+ return 40 # default fallback
47
+
48
+ def _to_bool(self, value: Union[str, bool]) -> bool:
49
+ """Normalize a string/bool into a boolean."""
50
+ if isinstance(value, bool):
51
+ return value
52
+ v = value.strip().lower()
53
+ return v in ("yes", "true", "1")
54
+
55
+ def _normalize_output(self, raw: str, response_format: str) -> LLMOut:
56
+ """
57
+ Coerce the model output into the expected response_format.
58
+
59
+ - boolean: returns True/False
60
+ - string: returns a trimmed string
61
+ - json: returns a dict (if parseable), else raises
62
+ """
63
+ fmt = (response_format or "string").lower()
64
+
65
+ if fmt == "boolean":
66
+ # accept "yes"/"true"/"1" as True; "no"/"false"/"0" as False
67
+ return self._to_bool(raw)
68
+
69
+ if fmt == "json":
70
+ try:
71
+ parsed = json.loads(raw)
72
+ except Exception as e:
73
+ raise ValueError(f"Expected JSON from model but failed to parse: {e}")
74
+ if not isinstance(parsed, dict):
75
+ raise ValueError("Expected a JSON object (dict) from model.")
76
+ return parsed
77
+
78
+ # default string
79
+ return raw.strip()
80
+
81
+ # -------------------------
82
+ # Matching helpers
83
+ # -------------------------
84
+
85
+ def _check_match(
86
+ self,
87
+ llm_output: LLMOut,
88
+ trigger: str,
89
+ mode: str,
90
+ response_format: str,
91
+ ) -> bool:
92
+ """
93
+ Determine if the normalized model output matches the trigger using the chosen mode.
94
+ - For boolean response_format, we compare booleans directly.
95
+ - For string, we support exact/contains/regex.
96
+ - For json, you can use 'jsonpath' (optional) or fallback to string contains on the canonicalized JSON.
97
+ """
98
+ fmt = (response_format or "string").lower()
99
+ mode_l = (mode or "exact").lower()
100
+
101
+ if fmt == "boolean":
102
+ # Map trigger -> boolean and compare directly.
103
+ out_bool = (
104
+ llm_output
105
+ if isinstance(llm_output, bool)
106
+ else self._to_bool(str(llm_output))
107
+ )
108
+ trig_bool = self._to_bool(str(trigger))
109
+ return out_bool is trig_bool
110
+
111
+ if fmt == "json":
112
+ if mode_l == "jsonpath":
113
+ try:
114
+ from jsonpath_ng.ext import parse
115
+
116
+ results = parse(trigger).find(
117
+ llm_output if isinstance(llm_output, dict) else {}
118
+ )
119
+ return bool(results)
120
+ except Exception:
121
+ return False
122
+ # fallback: contains over a stable string form of the json
123
+ s = (
124
+ json.dumps(llm_output, separators=(",", ":"), ensure_ascii=False)
125
+ if isinstance(llm_output, dict)
126
+ else str(llm_output)
127
+ )
128
+ return str(trigger).lower() in s.lower()
129
+
130
+ # string-like matching
131
+ s = str(llm_output).strip()
132
+ trig = str(trigger).strip()
133
+ if mode_l == "exact":
134
+ return s.lower() == trig.lower()
135
+ if mode_l == "contains":
136
+ return trig.lower() in s.lower()
137
+ if mode_l == "regex":
138
+ try:
139
+ return re.search(trig, s, re.IGNORECASE) is not None
140
+ except re.error:
141
+ return False
142
+ # default conservative
143
+ return False
144
+
145
+ # -------------------------
146
+ # Public API
147
+ # -------------------------
148
+
149
+ def evaluate(
150
+ self,
151
+ instruction: str,
152
+ agent_output: str,
153
+ reference_text: str,
154
+ rule: Dict[str, Any],
155
+ tool: str,
156
+ agent_id: str,
157
+ ) -> Dict[str, Any]:
158
+ """
159
+ Performs LLM evaluation and returns structured result.
160
+ Never raises for trigger mismatches; only raises for fatal formatting errors.
161
+ """
162
+ response_format: str = rule.get("response_format", "boolean")
163
+ match_mode: str = rule.get("match_mode", "exact")
164
+ response_trigger: str = rule.get("response_trigger", "yes")
165
+ level: str = rule.get("level", "block")
166
+ severity: str = rule.get("severity", "medium")
167
+ reference_filename: Optional[str] = rule.get("reference_file")
168
+ model_name: str = rule.get("model", "gpt-4")
169
+
170
+ prompt = f"""{instruction}
171
+
172
+ Agent Response:
173
+ {agent_output}
174
+
175
+ Company Policy:
176
+ {reference_text}
177
+
178
+ Respond only with: {response_format}"""
179
+
180
+ try:
181
+ res = self.client.chat.completions.create(
182
+ model=model_name,
183
+ messages=[
184
+ {
185
+ "role": "system",
186
+ "content": "You are a compliance checker for internal company policy.",
187
+ },
188
+ {"role": "user", "content": prompt},
189
+ ],
190
+ max_tokens=self.max_tokens_for_format(response_format),
191
+ temperature=0,
192
+ )
193
+
194
+ raw_text = (res.choices[0].message.content or "").strip()
195
+
196
+ # Normalize into the declared format
197
+ normalized = self._normalize_output(raw_text, response_format)
198
+
199
+ # Perform trigger check safely for str/bool/json
200
+ matched = self._check_match(
201
+ llm_output=normalized,
202
+ trigger=response_trigger,
203
+ mode=match_mode,
204
+ response_format=response_format,
205
+ )
206
+
207
+ except Exception as e:
208
+ # Surface a clear, single-line reason to upstream callers
209
+ raise RuntimeError(f"LLM evaluation failed: {e}") from e
210
+
211
+ if matched:
212
+ reason = (
213
+ f"LLM rule triggered (trigger='{response_trigger}', "
214
+ f"mode='{match_mode}', fmt='{response_format}')"
215
+ )
216
+ self.logger.log(
217
+ agent_id=agent_id, tool=tool, allowed=(level != "block"), reason=reason
218
+ )
219
+ return {
220
+ "level": level,
221
+ "severity": severity,
222
+ "matched_value": (
223
+ normalized if not isinstance(normalized, dict) else "[json]"
224
+ ),
225
+ "reference_file": reference_filename,
226
+ "description": rule.get("description", ""),
227
+ "tags": rule.get("tags", []),
228
+ "reason": reason,
229
+ }
230
+
231
+ # No match → allow
232
+ self.logger.log(
233
+ agent_id=agent_id,
234
+ tool=tool,
235
+ allowed=True,
236
+ reason="No match",
237
+ )
238
+ return {}
@@ -0,0 +1,193 @@
1
+ Metadata-Version: 2.4
2
+ Name: safentic
3
+ Version: 1.0.7
4
+ Summary: Safentic SDK for AI agent runtime enforcement interception.
5
+ Author-email: Safentic <contact@safentic.com>
6
+ License-Expression: LicenseRef-Proprietary
7
+ Project-URL: Homepage, https://safentic.com
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.10
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE.txt
13
+ Requires-Dist: PyYAML
14
+ Requires-Dist: langchain
15
+ Requires-Dist: langchain-community
16
+ Requires-Dist: langchain-openai
17
+ Requires-Dist: openai
18
+ Requires-Dist: python-dotenv
19
+ Requires-Dist: sentence-transformers==3.2.1
20
+ Requires-Dist: requests
21
+ Requires-Dist: SQLAlchemy
22
+ Provides-Extra: dev
23
+ Requires-Dist: pytest; extra == "dev"
24
+ Requires-Dist: coverage; extra == "dev"
25
+ Requires-Dist: black; extra == "dev"
26
+ Requires-Dist: isort; extra == "dev"
27
+ Requires-Dist: ruff; extra == "dev"
28
+ Requires-Dist: mypy; extra == "dev"
29
+ Requires-Dist: pip-audit-extra; extra == "dev"
30
+ Requires-Dist: types-requests; extra == "dev"
31
+ Requires-Dist: types-PyYAML; extra == "dev"
32
+ Dynamic: license-file
33
+
34
+ # Safentic SDK
35
+
36
+ - **Safentic is a runtime guardrail SDK for agentic AI systems.**
37
+ - It intercepts and evaluates tool calls between agent intent and execution, enforcing custom safety policies and generating structured audit logs for compliance.
38
+
39
+ ## Installation
40
+ `pip install safentic`
41
+
42
+ # Quickstart: Wrap Your Agent
43
+
44
+ - Safentic works at the action boundary, not inside the model itself. You wrap your agent with SafetyLayer:
45
+
46
+ ```
47
+ from safentic.layer import SafetyLayer
48
+ from agent import AgentClassInstance # your existing agent
49
+
50
+ agent = AgentClassInstance()
51
+ ```
52
+
53
+ ## Wrap with Safentic
54
+ ``` layer = SafetyLayer(agent=agent, api_key="your-api-key", agent_id="demo-agent") ```
55
+
56
+ ## Example tool call
57
+ ```
58
+ try:
59
+ result = layer.call_tool("some_tool", {"body": "example input"})
60
+ print(result)
61
+ except Exception as e:
62
+ print("Blocked:", e)
63
+ ```
64
+
65
+ ## Output:
66
+
67
+ - Blocked: Blocked by policy
68
+
69
+ # Configuring Your Policy File
70
+
71
+ - Safentic enforces rules defined in a YAML configuration file (e.g. policy.yaml).
72
+ - By default, it looks for config/policy.yaml, or you can set the path with:
73
+
74
+ ```
75
+ export SAFENTIC_POLICY_PATH=/path/to/policy.yaml
76
+ ```
77
+
78
+ ## Schema
79
+
80
+ - At the moment, Safentic supports the llm_verifier rule type.
81
+
82
+ ```
83
+ tools:
84
+ <tool_name>:
85
+ rules:
86
+ - type: llm_verifier
87
+ description: "<short description of what this rule enforces>"
88
+ instruction: "<prompt instruction given to the verifier LLM>"
89
+ model: "<llm model name, e.g. gpt-4>"
90
+ fields: [<list of input fields to check>]
91
+ reference_file: "<path to reference text file, optional>"
92
+ response_format: boolean
93
+ response_trigger: yes
94
+ match_mode: exact
95
+ level: block # enforcement level: block | warn
96
+ severity: high # severity: low | medium | high
97
+ tags: [<labels for filtering/searching logs>]
98
+
99
+ logging:
100
+ level: INFO
101
+ destination: "safentic/logs/txt_logs/safentic_audit.log"
102
+ jsonl: "safentic/logs/json_logs/safentic_audit.jsonl"
103
+
104
+ Example Policy (obfuscated)
105
+ tools:
106
+ sample_tool:
107
+ rules:
108
+ - type: llm_verifier
109
+ description: "Block outputs that contain disallowed terms"
110
+ instruction: "Does this text contain disallowed terms or references?"
111
+ model: gpt-4
112
+ fields: [body]
113
+ reference_file: sample_guidelines.txt
114
+ response_format: boolean
115
+ response_trigger: yes
116
+ match_mode: exact
117
+ level: block
118
+ severity: high
119
+ tags: [sample, denylist]
120
+
121
+ another_tool:
122
+ rules: [] # Explicitly allow all actions for this tool
123
+
124
+ logging:
125
+ level: INFO
126
+ destination: "safentic/logs/txt_logs/safentic_audit.log"
127
+ jsonl: "safentic/logs/json_logs/safentic_audit.jsonl"
128
+ ```
129
+
130
+ ## Audit Logs
131
+
132
+ - Every decision is logged with context for compliance and debugging:
133
+
134
+ ```
135
+ {
136
+ "timestamp": "2025-09-09T14:25:11Z",
137
+ "agent_id": "demo-agent",
138
+ "tool": "sample_tool",
139
+ "allowed": false,
140
+ "reason": "Blocked by policy",
141
+ "rule": "sample_tool:denylist_check",
142
+ "severity": "high",
143
+ "level": "block",
144
+ "tags": ["sample", "denylist"]
145
+ }
146
+ ```
147
+
148
+ ### Log Fields
149
+
150
+ - timestamp – when the action was evaluated
151
+ - agent_id – the agent issuing the action
152
+ - tool – tool name
153
+ - allowed – whether the action was permitted
154
+ - reason – why it was allowed or blocked
155
+ - rule – the rule that applied (if any)
156
+ - severity – severity of the violation
157
+ - level – enforcement level (block, warn)
158
+ - tags – categories attached to the rule
159
+ - extra – additional metadata (e.g., missing fields, matched text)
160
+
161
+ # CLI Commands
162
+
163
+ - Safentic ships with a CLI for validating policies, running one-off checks, and inspecting logs:
164
+
165
+ ## Validate a policy file
166
+ ```
167
+ safentic validate-policy --policy config/policy.yaml --strict
168
+ ```
169
+
170
+ ## Run a one-off tool check
171
+ ```
172
+ safentic check-tool --tool sample_tool \
173
+ --input-json '{"body": "some text"}' \
174
+ --policy config/policy.yaml
175
+ ```
176
+ ## Tail the audit log (JSONL by default)
177
+ ```
178
+ safentic logs tail --path safentic/logs/json_logs/safentic_audit.jsonl -f
179
+ ```
180
+
181
+ ## Environment Variables
182
+
183
+ Set these before running Safentic:
184
+
185
+ - ```OPENAI_API_KEY``` – **required** for rules that use llm_verifier (e.g., GPT-4).
186
+ - ```SAFENTIC_POLICY_PATH``` – path to your policy.yaml (default: config/policy.yaml).
187
+ - ```SAFENTIC_LOG_PATH``` – override the default text audit log path.
188
+ - ```SAFENTIC_JSON_LOG_PATH``` – override the default JSONL audit log path.
189
+ - ```LOG_LEVEL``` – optional, sets verbosity (DEBUG, INFO, etc.).
190
+
191
+ # Supported Stacks
192
+
193
+ - Safentic integrates with frameworks like LangChain, AutoGen, and MCP by wrapping the tool dispatcher rather than modifying the model or prompts.