safentic 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- safentic/LICENSE.txt +36 -0
- safentic/__init__.py +8 -0
- safentic/config.py +2 -0
- safentic/engine.py +80 -0
- safentic/helper/__init__.py +0 -0
- safentic/helper/auth.py +12 -0
- safentic/layer.py +50 -0
- safentic/logger/__init__.py +0 -0
- safentic/logger/audit.py +83 -0
- safentic/policies/.gitkeep +0 -0
- safentic/policies/__init__.py +3 -0
- safentic/policies/example_policy.txt +33 -0
- safentic/policies/policy.yaml +25 -0
- safentic/policy.py +106 -0
- safentic/verifiers/__init__.py +0 -0
- safentic/verifiers/sentence_verifier.py +69 -0
- safentic-0.0.1.dist-info/METADATA +54 -0
- safentic-0.0.1.dist-info/RECORD +22 -0
- safentic-0.0.1.dist-info/WHEEL +5 -0
- safentic-0.0.1.dist-info/top_level.txt +2 -0
- tests/__init__.py +0 -0
- tests/test_all.py +132 -0
safentic/LICENSE.txt
ADDED
@@ -0,0 +1,36 @@
|
|
1
|
+
Safentic SDK Commercial License Agreement
|
2
|
+
=========================================
|
3
|
+
|
4
|
+
IMPORTANT – READ CAREFULLY:
|
5
|
+
|
6
|
+
This license governs the use of the Safentic SDK (“Software”) developed and owned by Safentic. By installing or using this Software, you (“Licensee”) agree to the following terms and conditions:
|
7
|
+
|
8
|
+
1. GRANT OF LICENSE
|
9
|
+
Licensor grants Licensee a non-exclusive, non-transferable, non-sublicensable license to use the Software solely for internal business purposes and only in accordance with the terms of the commercial agreement executed between the parties. This license may include limited evaluation rights, subject to expiration or usage restrictions.
|
10
|
+
|
11
|
+
2. RESTRICTIONS
|
12
|
+
Licensee shall NOT:
|
13
|
+
- Use the Software without a valid, active license key or subscription.
|
14
|
+
- Reverse engineer, decompile, or disassemble the Software.
|
15
|
+
- Modify, copy, or create derivative works based on the Software.
|
16
|
+
- Distribute, sublicense, lease, or otherwise make the Software available to any third party.
|
17
|
+
- Circumvent or attempt to disable any license verification, telemetry, or access control mechanisms.
|
18
|
+
|
19
|
+
3. OWNERSHIP
|
20
|
+
The Software is licensed, not sold. Safentic retains all rights, title, and interest in and to the Software, including all intellectual property rights.
|
21
|
+
|
22
|
+
4. TERMINATION
|
23
|
+
This license is effective until terminated. It will terminate automatically without notice if Licensee breaches any term of this agreement. Upon termination, Licensee must cease all use and destroy all copies of the Software.
|
24
|
+
|
25
|
+
5. NO WARRANTY
|
26
|
+
The Software is provided "as is" without warranty of any kind. Licensor disclaims all warranties, express or implied, including but not limited to warranties of merchantability and fitness for a particular purpose.
|
27
|
+
|
28
|
+
6. LIMITATION OF LIABILITY
|
29
|
+
In no event shall Licensor be liable for any damages arising out of the use or inability to use the Software, including but not limited to incidental, special, or consequential damages.
|
30
|
+
|
31
|
+
7. GOVERNING LAW
|
32
|
+
This agreement shall be governed by and construed in accordance with the laws of Ireland, without regard to its conflict of law principles.
|
33
|
+
|
34
|
+
For licensing inquiries, please contact: contact@safentic.com
|
35
|
+
|
36
|
+
Copyright © 2025, Safentic. All rights reserved.
|
safentic/__init__.py
ADDED
safentic/config.py
ADDED
safentic/engine.py
ADDED
@@ -0,0 +1,80 @@
|
|
1
|
+
from .policy import PolicyEngine
|
2
|
+
from .logger.audit import AuditLogger
|
3
|
+
|
4
|
+
class PolicyEnforcer():
|
5
|
+
"""
|
6
|
+
Runtime wrapper to evaluate and enforce tool usage policies.
|
7
|
+
Tracks agent-specific violations and supports audit logging.
|
8
|
+
"""
|
9
|
+
|
10
|
+
def __init__(self, policy_engine: PolicyEngine = None):
|
11
|
+
self.policy_engine = policy_engine or PolicyEngine()
|
12
|
+
self.agent_states = {}
|
13
|
+
self.audit_logger = AuditLogger()
|
14
|
+
|
15
|
+
def enforce(self, agent_id: str, tool_name: str, tool_args: dict) -> dict:
|
16
|
+
"""
|
17
|
+
Evaluates a tool action for a given agent.
|
18
|
+
Returns a dict with 'allowed', 'reason', and agent state metadata.
|
19
|
+
"""
|
20
|
+
state = self.agent_states.setdefault(agent_id, {
|
21
|
+
"blocked_tools": set(),
|
22
|
+
"violation_count": 0,
|
23
|
+
"last_violation": None
|
24
|
+
})
|
25
|
+
|
26
|
+
# Block repeat attempts to use already-denied tool
|
27
|
+
if tool_name in state["blocked_tools"]:
|
28
|
+
reason = "Tool previously blocked for this agent."
|
29
|
+
self.audit_logger.log(
|
30
|
+
agent_id=agent_id,
|
31
|
+
tool=tool_name,
|
32
|
+
allowed=False,
|
33
|
+
reason=reason
|
34
|
+
)
|
35
|
+
return self._deny(tool_name, state, reason)
|
36
|
+
|
37
|
+
# Run policy evaluation
|
38
|
+
violation = self.policy_engine.evaluate_policy(tool_name, tool_args)
|
39
|
+
|
40
|
+
if violation:
|
41
|
+
state["blocked_tools"].add(tool_name)
|
42
|
+
state["violation_count"] += 1
|
43
|
+
state["last_violation"] = violation
|
44
|
+
self.audit_logger.log(
|
45
|
+
agent_id=agent_id,
|
46
|
+
tool=tool_name,
|
47
|
+
allowed=False,
|
48
|
+
reason=violation
|
49
|
+
)
|
50
|
+
return self._deny(tool_name, state, violation)
|
51
|
+
|
52
|
+
# Log allowed action
|
53
|
+
self.audit_logger.log(
|
54
|
+
agent_id=agent_id,
|
55
|
+
tool=tool_name,
|
56
|
+
allowed=True
|
57
|
+
)
|
58
|
+
|
59
|
+
return {
|
60
|
+
"allowed": True,
|
61
|
+
"reason": "Action permitted",
|
62
|
+
"agent_state": state
|
63
|
+
}
|
64
|
+
|
65
|
+
def reset(self, agent_id: str = None):
|
66
|
+
"""
|
67
|
+
Clears violation state for one agent or all agents.
|
68
|
+
"""
|
69
|
+
if agent_id:
|
70
|
+
self.agent_states.pop(agent_id, None)
|
71
|
+
else:
|
72
|
+
self.agent_states.clear()
|
73
|
+
|
74
|
+
def _deny(self, tool_name: str, state: dict, reason: str) -> dict:
|
75
|
+
return {
|
76
|
+
"allowed": False,
|
77
|
+
"reason": reason,
|
78
|
+
"tool": tool_name,
|
79
|
+
"agent_state": state
|
80
|
+
}
|
File without changes
|
safentic/helper/auth.py
ADDED
@@ -0,0 +1,12 @@
|
|
1
|
+
import requests
|
2
|
+
from ..config import *
|
3
|
+
|
4
|
+
def validate_api_key(key: str) -> dict:
|
5
|
+
try:
|
6
|
+
response = requests.post(BASE_API_PATH + API_KEY_ENDPOINT, json={"api_key": key})
|
7
|
+
if response.status_code != 200:
|
8
|
+
return {"valid": False}
|
9
|
+
data = response.json()
|
10
|
+
return {"valid": True, **data}
|
11
|
+
except Exception:
|
12
|
+
return {"valid": False}
|
safentic/layer.py
ADDED
@@ -0,0 +1,50 @@
|
|
1
|
+
from .engine import PolicyEnforcer
|
2
|
+
from .logger.audit import AuditLogger
|
3
|
+
from .helper.auth import validate_api_key
|
4
|
+
|
5
|
+
class SafenticError(Exception):
|
6
|
+
"""Raised when Safentic blocks an action."""
|
7
|
+
pass
|
8
|
+
|
9
|
+
|
10
|
+
class SafetyLayer():
|
11
|
+
"""
|
12
|
+
Safentic runtime enforcement wrapper for agent actions.
|
13
|
+
First, insert your api key, then run an action.
|
14
|
+
Example:
|
15
|
+
safety.protect("send_email", {"body": "..."})
|
16
|
+
# Raises SafenticError if blocked
|
17
|
+
"""
|
18
|
+
|
19
|
+
def __init__(self, api_key: str, agent_id: str, enforcer: PolicyEnforcer = None, raise_on_block: bool = True):
|
20
|
+
self.agent_id = agent_id
|
21
|
+
self.raise_on_block = raise_on_block
|
22
|
+
self.logger = AuditLogger()
|
23
|
+
|
24
|
+
# If no custom enforcer is provided, instantiate one with the API key
|
25
|
+
self.enforcer = enforcer or PolicyEnforcer()
|
26
|
+
self.api_key = validate_api_key(api_key)
|
27
|
+
self.enforcer.reset(agent_id)
|
28
|
+
|
29
|
+
def protect(self, tool_name: str, tool_args: dict) -> dict:
|
30
|
+
"""
|
31
|
+
Checks whether a tool action is allowed.
|
32
|
+
Raises SafenticError if blocked (default), or returns result if raise_on_block=False.
|
33
|
+
"""
|
34
|
+
result = self.enforcer.enforce(self.agent_id, tool_name, tool_args)
|
35
|
+
|
36
|
+
# Log structured event
|
37
|
+
self.logger.log(
|
38
|
+
agent_id=self.agent_id,
|
39
|
+
tool=tool_name,
|
40
|
+
allowed=result["allowed"],
|
41
|
+
reason=result["reason"] if not result["allowed"] else None
|
42
|
+
)
|
43
|
+
|
44
|
+
# Raise or return based on outcome and config
|
45
|
+
if not result["allowed"]:
|
46
|
+
if self.raise_on_block:
|
47
|
+
raise SafenticError(result["reason"])
|
48
|
+
return result
|
49
|
+
|
50
|
+
return result
|
File without changes
|
safentic/logger/audit.py
ADDED
@@ -0,0 +1,83 @@
|
|
1
|
+
import logging
|
2
|
+
from datetime import datetime
|
3
|
+
import os
|
4
|
+
import json
|
5
|
+
|
6
|
+
class AuditLogger:
|
7
|
+
def __init__(self, config: dict = None):
|
8
|
+
config = config or {}
|
9
|
+
|
10
|
+
# Allow disabling via config or env
|
11
|
+
self.enabled = config.get("enabled", True)
|
12
|
+
if os.getenv("SAFE_AUDIT_LOG") == "0":
|
13
|
+
self.enabled = False
|
14
|
+
|
15
|
+
# File paths from config or default
|
16
|
+
self.txt_log_path = config.get("destination", "safentic/logs/txt_logs/safentic_audit.log")
|
17
|
+
self.jsonl_path = config.get("jsonl", "safentic/logs/json_logs/safentic_audit.jsonl")
|
18
|
+
|
19
|
+
# Ensure directories exist
|
20
|
+
os.makedirs(os.path.dirname(self.txt_log_path), exist_ok=True)
|
21
|
+
os.makedirs(os.path.dirname(self.jsonl_path), exist_ok=True)
|
22
|
+
|
23
|
+
# Set up logger
|
24
|
+
self.logger = logging.getLogger("safentic.audit")
|
25
|
+
|
26
|
+
level_str = config.get("level", "INFO").upper()
|
27
|
+
level_map = {
|
28
|
+
"DEBUG": logging.DEBUG,
|
29
|
+
"INFO": logging.INFO,
|
30
|
+
"WARNING": logging.WARNING,
|
31
|
+
"ERROR": logging.ERROR,
|
32
|
+
"CRITICAL": logging.CRITICAL
|
33
|
+
}
|
34
|
+
level = level_map.get(level_str, logging.INFO)
|
35
|
+
self.logger.setLevel(level)
|
36
|
+
|
37
|
+
# Prevent duplicate handlers (e.g., in notebooks)
|
38
|
+
if not self.logger.handlers:
|
39
|
+
formatter = logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
|
40
|
+
|
41
|
+
stream_handler = logging.StreamHandler()
|
42
|
+
stream_handler.setFormatter(formatter)
|
43
|
+
self.logger.addHandler(stream_handler)
|
44
|
+
|
45
|
+
file_handler = logging.FileHandler(self.txt_log_path)
|
46
|
+
file_handler.setFormatter(formatter)
|
47
|
+
self.logger.addHandler(file_handler)
|
48
|
+
|
49
|
+
def log(self, agent_id: str, tool: str, allowed: bool, reason: str = None):
|
50
|
+
if not self.enabled:
|
51
|
+
return
|
52
|
+
|
53
|
+
entry = {
|
54
|
+
"timestamp": datetime.now().isoformat(),
|
55
|
+
"agent_id": agent_id,
|
56
|
+
"tool": tool,
|
57
|
+
"allowed": allowed,
|
58
|
+
"reason": reason or "No violation"
|
59
|
+
}
|
60
|
+
|
61
|
+
log_level = logging.INFO if allowed else logging.WARNING
|
62
|
+
self.logger.log(log_level, f"[AUDIT] {entry}")
|
63
|
+
|
64
|
+
try:
|
65
|
+
with open(self.jsonl_path, "a", encoding="utf-8") as f:
|
66
|
+
f.write(json.dumps(entry) + "\n")
|
67
|
+
except Exception as e:
|
68
|
+
self.logger.error(f"Failed to write structured audit log: {e}")
|
69
|
+
|
70
|
+
def set_level(self, level: str):
|
71
|
+
level_map = {
|
72
|
+
"DEBUG": logging.DEBUG,
|
73
|
+
"INFO": logging.INFO,
|
74
|
+
"WARNING": logging.WARNING,
|
75
|
+
"ERROR": logging.ERROR,
|
76
|
+
"CRITICAL": logging.CRITICAL
|
77
|
+
}
|
78
|
+
|
79
|
+
level = level.upper()
|
80
|
+
if level in level_map:
|
81
|
+
self.logger.setLevel(level_map[level])
|
82
|
+
else:
|
83
|
+
raise ValueError(f"Unsupported log level: {level}")
|
File without changes
|
@@ -0,0 +1,33 @@
|
|
1
|
+
Standard Refund Policy
|
2
|
+
|
3
|
+
Last Updated: February 1, 2025
|
4
|
+
|
5
|
+
1. Overview
|
6
|
+
At XYZ, we strive to ensure your complete satisfaction with our platform. If you are not entirely happy with your purchase, you may be eligible for a refund as described below.
|
7
|
+
|
8
|
+
2. Eligibility
|
9
|
+
- Monthly Subscriptions: You may request a full refund within 30 days of your initial subscription charge.
|
10
|
+
- Annual Subscriptions: You may request a pro-rated refund (the remaining unused portion of your term) within 30 days of your initial annual subscription charge.
|
11
|
+
- One-Time Purchases and Credits: Refunds on one-time feature purchases or in-app credits are handled on a case-by-case basis and must be requested within 14 days of purchase.
|
12
|
+
|
13
|
+
3. How to Request a Refund
|
14
|
+
1. Log in to your XYZ account at https://app.XYZ.ai.
|
15
|
+
2. Go to Settings -> Billing -> Refunds.
|
16
|
+
3. Select the transaction you wish to refund and click Request Refund.
|
17
|
+
4. In the "Reason" field, briefly describe why you are requesting a refund.
|
18
|
+
5. Submit your request. You will receive an email confirmation within one business day.
|
19
|
+
|
20
|
+
4. Processing Time
|
21
|
+
- Refunds for credit-card or PayPal payments will be credited back to your original payment method within 5-7 business days after approval.
|
22
|
+
- In-app credits or wallet balances will be restored within 24 hours of approval.
|
23
|
+
|
24
|
+
5. Exceptions and Limitations
|
25
|
+
- No refunds will be issued for subscription renewals after the initial 30-day period.
|
26
|
+
- Promotional, gift, or trial subscriptions are non-refundable.
|
27
|
+
- Refund requests made more than 30 days after the initial charge are subject to review and may be denied.
|
28
|
+
- We reserve the right to refuse a refund if we suspect abuse of the refund policy.
|
29
|
+
|
30
|
+
6. Contact Us
|
31
|
+
If you have any questions or need assistance, please contact our Support Team at support@XYZ.ai or via the in-app chat.
|
32
|
+
|
33
|
+
Thank you for choosing XYZ! We appreciate your business and are committed to delivering a great experience.
|
@@ -0,0 +1,25 @@
|
|
1
|
+
tools:
|
2
|
+
send_email:
|
3
|
+
checks:
|
4
|
+
- type: deny_phrase
|
5
|
+
phrases:
|
6
|
+
- "one device per subscription"
|
7
|
+
- "24 hours"
|
8
|
+
|
9
|
+
- type: semantic
|
10
|
+
trigger_phrases:
|
11
|
+
- "refund policy"
|
12
|
+
- "guarantee"
|
13
|
+
- "our policy"
|
14
|
+
reference_file: "example_policy.txt"
|
15
|
+
|
16
|
+
update_ticket:
|
17
|
+
checks: [] # Explicitly allow all — no policy checks
|
18
|
+
|
19
|
+
log_to_crm:
|
20
|
+
checks: [] # Explicitly allow all — no policy checks
|
21
|
+
|
22
|
+
logging:
|
23
|
+
level: INFO
|
24
|
+
destination: "safentic/logs/txt_logs/safentic_audit.log"
|
25
|
+
jsonl: "safentic/logs/json_logs/safentic_audit.jsonl"
|
safentic/policy.py
ADDED
@@ -0,0 +1,106 @@
|
|
1
|
+
import os
|
2
|
+
import yaml
|
3
|
+
from typing import Optional
|
4
|
+
from .verifiers.sentence_verifier import SentenceTransformerVerifier
|
5
|
+
from .logger.audit import AuditLogger
|
6
|
+
|
7
|
+
|
8
|
+
class PolicyEngine:
|
9
|
+
"""
|
10
|
+
Evaluates whether a given tool action complies with safety policies.
|
11
|
+
Uses rule types such as deny_phrase and semantic checks.
|
12
|
+
"""
|
13
|
+
|
14
|
+
VALID_RULE_TYPES = {"deny_phrase", "semantic"}
|
15
|
+
|
16
|
+
def __init__(
|
17
|
+
self,
|
18
|
+
policy_path: Optional[str] = None,
|
19
|
+
policy_base_dir: Optional[str] = None
|
20
|
+
):
|
21
|
+
self.base_dir = policy_base_dir or os.path.dirname(__file__)
|
22
|
+
policy_path = policy_path or os.path.join(self.base_dir, "policies", "policy.yaml")
|
23
|
+
|
24
|
+
with open(policy_path, encoding="utf-8") as f:
|
25
|
+
self.policy_cfg = yaml.safe_load(f)
|
26
|
+
|
27
|
+
self.verifier = SentenceTransformerVerifier(
|
28
|
+
model_name="all-MiniLM-L6-v2",
|
29
|
+
low_threshold=0.50,
|
30
|
+
high_threshold=0.75,
|
31
|
+
)
|
32
|
+
|
33
|
+
self.audit_logger = AuditLogger()
|
34
|
+
|
35
|
+
def _load_reference_text(self, filename: str) -> str:
|
36
|
+
path = os.path.join(self.base_dir, "policies", filename)
|
37
|
+
with open(path, encoding="utf-8") as f:
|
38
|
+
return f.read().strip().lower()
|
39
|
+
|
40
|
+
def evaluate_policy(self, tool_name: str, args: dict, agent_id: str = "unknown") -> Optional[str]:
|
41
|
+
"""
|
42
|
+
Returns None if allowed, or a string reason if blocked.
|
43
|
+
Supports modular rule types per tool.
|
44
|
+
"""
|
45
|
+
tool_rules = self.policy_cfg.get("tools", {}).get(tool_name)
|
46
|
+
if not tool_rules:
|
47
|
+
return None # No policy = allow
|
48
|
+
|
49
|
+
text = (args.get("body") or args.get("note") or "").strip().lower()
|
50
|
+
if not text:
|
51
|
+
return None # Empty input = allow
|
52
|
+
|
53
|
+
for check in tool_rules.get("checks", []):
|
54
|
+
rule_type = check.get("type")
|
55
|
+
|
56
|
+
if rule_type not in self.VALID_RULE_TYPES:
|
57
|
+
warning = f"Unknown rule type in policy: '{rule_type}' for tool: '{tool_name}'"
|
58
|
+
self.audit_logger.log(
|
59
|
+
agent_id=agent_id,
|
60
|
+
tool=tool_name,
|
61
|
+
allowed=True,
|
62
|
+
reason=warning
|
63
|
+
)
|
64
|
+
continue
|
65
|
+
|
66
|
+
if rule_type == "deny_phrase":
|
67
|
+
for phrase in check.get("phrases", []):
|
68
|
+
if phrase.lower() in text:
|
69
|
+
reason = f"Blocked: matched deny phrase “{phrase}”"
|
70
|
+
self.audit_logger.log(
|
71
|
+
agent_id=agent_id,
|
72
|
+
tool=tool_name,
|
73
|
+
allowed=False,
|
74
|
+
reason=reason
|
75
|
+
)
|
76
|
+
return reason
|
77
|
+
|
78
|
+
elif rule_type == "semantic":
|
79
|
+
trigger_phrases = [p.lower() for p in check.get("trigger_phrases", [])]
|
80
|
+
if any(p in text for p in trigger_phrases):
|
81
|
+
reference_file = check.get("reference_file")
|
82
|
+
if not reference_file:
|
83
|
+
continue
|
84
|
+
|
85
|
+
reference_text = self._load_reference_text(reference_file)
|
86
|
+
decision = self.verifier.decision(candidate=text, official=reference_text)
|
87
|
+
|
88
|
+
if decision == "block":
|
89
|
+
explanation = self.verifier.explain(candidate=text, official=reference_text)
|
90
|
+
reason = f"Blocked by semantic check: {explanation}"
|
91
|
+
self.audit_logger.log(
|
92
|
+
agent_id=agent_id,
|
93
|
+
tool=tool_name,
|
94
|
+
allowed=False,
|
95
|
+
reason=reason
|
96
|
+
)
|
97
|
+
return reason
|
98
|
+
|
99
|
+
self.audit_logger.log(
|
100
|
+
agent_id=agent_id,
|
101
|
+
tool=tool_name,
|
102
|
+
allowed=True,
|
103
|
+
reason=f"Semantic decision: {decision}"
|
104
|
+
)
|
105
|
+
|
106
|
+
return None
|
File without changes
|
@@ -0,0 +1,69 @@
|
|
1
|
+
from sentence_transformers import SentenceTransformer, util
|
2
|
+
from ..logger.audit import AuditLogger
|
3
|
+
|
4
|
+
|
5
|
+
class SentenceTransformerVerifier:
|
6
|
+
"""
|
7
|
+
Verifies whether a candidate text is semantically aligned with an official policy text.
|
8
|
+
Uses cosine similarity thresholds to decide: allow, verify, or block.
|
9
|
+
"""
|
10
|
+
|
11
|
+
def __init__(
|
12
|
+
self,
|
13
|
+
model_name: str = "all-MiniLM-L6-v2",
|
14
|
+
low_threshold: float = 0.50,
|
15
|
+
high_threshold: float = 0.75,
|
16
|
+
):
|
17
|
+
self.embedder = SentenceTransformer(model_name)
|
18
|
+
self.low = low_threshold
|
19
|
+
self.high = high_threshold
|
20
|
+
self.logger = AuditLogger()
|
21
|
+
|
22
|
+
def similarity(self, a: str, b: str) -> float:
|
23
|
+
"""
|
24
|
+
Computes cosine similarity between two strings.
|
25
|
+
"""
|
26
|
+
vecs = self.embedder.encode([a, b], convert_to_tensor=True)
|
27
|
+
return util.cos_sim(vecs[0], vecs[1]).item()
|
28
|
+
|
29
|
+
def decision(self, candidate: str, official: str, agent_id: str = "semantic-check") -> str:
|
30
|
+
"""
|
31
|
+
Returns one of: "allow", "verify", or "block"
|
32
|
+
based on similarity thresholds.
|
33
|
+
Also logs the similarity score and decision.
|
34
|
+
"""
|
35
|
+
score = self.similarity(candidate, official)
|
36
|
+
|
37
|
+
if score >= self.high:
|
38
|
+
result = "allow"
|
39
|
+
elif score <= self.low:
|
40
|
+
result = "block"
|
41
|
+
else:
|
42
|
+
result = "verify"
|
43
|
+
|
44
|
+
self.logger.log(
|
45
|
+
agent_id=agent_id,
|
46
|
+
tool="semantic_policy_check",
|
47
|
+
allowed=(result != "block"),
|
48
|
+
reason=f"Semantic decision: {result} (score={score:.2f})"
|
49
|
+
)
|
50
|
+
|
51
|
+
return result
|
52
|
+
|
53
|
+
def explain(self, candidate: str, official: str, agent_id: str = "semantic-check") -> str:
|
54
|
+
"""
|
55
|
+
Returns a debug-friendly explanation string including the similarity score.
|
56
|
+
Also logs it to structured audit log for traceability.
|
57
|
+
"""
|
58
|
+
score = self.similarity(candidate, official)
|
59
|
+
explanation = f"Semantic similarity = {score:.2f} (low={self.low}, high={self.high})"
|
60
|
+
|
61
|
+
# Structured debug logging (allowed=True since it’s informational)
|
62
|
+
self.logger.log(
|
63
|
+
agent_id=agent_id,
|
64
|
+
tool="semantic_policy_check",
|
65
|
+
allowed=True,
|
66
|
+
reason=explanation
|
67
|
+
)
|
68
|
+
|
69
|
+
return explanation
|
@@ -0,0 +1,54 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: safentic
|
3
|
+
Version: 0.0.1
|
4
|
+
Summary: Safentic SDK for behavior analysis
|
5
|
+
Home-page: https://safentic.com
|
6
|
+
Author: Safentic
|
7
|
+
Author-email: contact@safentic.com
|
8
|
+
License: Proprietary :: Safentic Commercial License
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
10
|
+
Classifier: License :: Other/Proprietary License
|
11
|
+
Classifier: Operating System :: OS Independent
|
12
|
+
Description-Content-Type: text/markdown
|
13
|
+
Dynamic: author
|
14
|
+
Dynamic: author-email
|
15
|
+
Dynamic: classifier
|
16
|
+
Dynamic: description
|
17
|
+
Dynamic: description-content-type
|
18
|
+
Dynamic: home-page
|
19
|
+
Dynamic: license
|
20
|
+
Dynamic: summary
|
21
|
+
|
22
|
+
# Safentic SDK
|
23
|
+
|
24
|
+
Safentic is a runtime guardrail SDK for agentic AI systems.
|
25
|
+
It intercepts and evaluates unsafe tool calls between agent **intent** and **execution**, enforcing custom safety policies.
|
26
|
+
|
27
|
+
---
|
28
|
+
|
29
|
+
## Installation
|
30
|
+
|
31
|
+
Install from PyPI:
|
32
|
+
|
33
|
+
pip install safentic
|
34
|
+
|
35
|
+
---
|
36
|
+
|
37
|
+
## API Key Required
|
38
|
+
|
39
|
+
Safentic requires a valid API key to function.
|
40
|
+
To obtain one, contact: contact@safentic.com
|
41
|
+
|
42
|
+
---
|
43
|
+
|
44
|
+
## Quick Start
|
45
|
+
|
46
|
+
```python
|
47
|
+
from safentic import SafetyLayer, SafenticError
|
48
|
+
|
49
|
+
layer = SafetyLayer(api_key="your-api-key", agent_id="agent-007")
|
50
|
+
|
51
|
+
try:
|
52
|
+
layer.protect("send_email", {"body": "Refund me now!"})
|
53
|
+
except SafenticError as e:
|
54
|
+
print("Blocked by policy:", e)
|
@@ -0,0 +1,22 @@
|
|
1
|
+
safentic/LICENSE.txt,sha256=xl3AZ2rkiOG5qE01SPRBgoW5Ib5YKZQeszh6OlvKePk,2330
|
2
|
+
safentic/__init__.py,sha256=T6sUAvYN7XW5lo3dpzBPnzoUTIokV-W6xuMXhId1iJk,132
|
3
|
+
safentic/config.py,sha256=V6c8Fz0t-Ja278kjCrQMlGPBQ4Hj830t3q7U7oM4Q4k,90
|
4
|
+
safentic/engine.py,sha256=-a90x70SY15WkOIkgxoPVLs_9xGsf4Krj-CmpoMs6tE,2597
|
5
|
+
safentic/layer.py,sha256=LzdAGLa3cX8oheBQEABrTK_X0cte9XTyW1GdtgxI25o,1742
|
6
|
+
safentic/policy.py,sha256=ApAAAxiWb_M5TUTtYKk10BVWEy4xViSbM8ikocIqWoI,4111
|
7
|
+
safentic/helper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
|
+
safentic/helper/auth.py,sha256=evhikKRTtGXYp0YKkdntBM4bUjdUTtvJ2nA5YLt2IIA,391
|
9
|
+
safentic/logger/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
+
safentic/logger/audit.py,sha256=GAqZFVPERKyuugmGn6G1-fg9QLaHqN1aWl0S_J3dYXI,2947
|
11
|
+
safentic/policies/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
|
+
safentic/policies/__init__.py,sha256=s37JO8wBcNstqvPif1q2Dw46imddeZZJBo-f3mSfd58,69
|
13
|
+
safentic/policies/example_policy.txt,sha256=Vkv5p2Kcyppijl8BD--P1APy2cBgWrYCrKzfo2pW0lo,1955
|
14
|
+
safentic/policies/policy.yaml,sha256=W0_6kPp0VqS568eR_NnYgFEiHVi1YWlIwrk3txd1Lr4,627
|
15
|
+
safentic/verifiers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
|
+
safentic/verifiers/sentence_verifier.py,sha256=jgObZ1t4UCYHYIfNayI0n69hVBWOHc9GAhpJsDbiD2c,2376
|
17
|
+
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
|
+
tests/test_all.py,sha256=nIUedvH5ny_Glp5PuxmzHCRW2KvqkmwM3bB8rZvHLSo,5723
|
19
|
+
safentic-0.0.1.dist-info/METADATA,sha256=toBfprV-Pml2wEud51LuESBIQzeWHjJAl0J0bs8PtvA,1303
|
20
|
+
safentic-0.0.1.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
|
21
|
+
safentic-0.0.1.dist-info/top_level.txt,sha256=8bLyq7i9we7XEcbDYLHTUfR3IIpICeMfG8NWAHCLU5s,15
|
22
|
+
safentic-0.0.1.dist-info/RECORD,,
|
tests/__init__.py
ADDED
File without changes
|
tests/test_all.py
ADDED
@@ -0,0 +1,132 @@
|
|
1
|
+
import os
|
2
|
+
import sys
|
3
|
+
import math
|
4
|
+
import logging
|
5
|
+
import pytest
|
6
|
+
from unittest import mock
|
7
|
+
|
8
|
+
from safentic.policy import PolicyEngine
|
9
|
+
from safentic.engine import PolicyEnforcer
|
10
|
+
from safentic.layer import SafetyLayer, SafenticError
|
11
|
+
from safentic.verifiers.sentence_verifier import SentenceTransformerVerifier
|
12
|
+
from safentic.logger.audit import AuditLogger
|
13
|
+
|
14
|
+
os.environ["SAFE_AUDIT_LOG"] = "0"
|
15
|
+
|
16
|
+
TESTS = {
|
17
|
+
"valid_excerpt": "Here is our refund policy: any cancellation within 30 days of purchase receives a full refund.",
|
18
|
+
"hallucination_one_device": "According to our refund policy, every user may only refund one device per subscription.",
|
19
|
+
"made_up_timeframe": "Refund policy: you must request a refund within 24 hours of purchase.",
|
20
|
+
"generic_refund": "I’m sorry for the trouble—please let us know more about your refund issue.",
|
21
|
+
}
|
22
|
+
|
23
|
+
@pytest.fixture
|
24
|
+
def policy_engine():
|
25
|
+
return PolicyEngine()
|
26
|
+
|
27
|
+
@pytest.fixture
|
28
|
+
def enforcer():
|
29
|
+
return PolicyEnforcer()
|
30
|
+
|
31
|
+
@pytest.mark.parametrize("case, expected_contains", [
|
32
|
+
("valid_excerpt", None),
|
33
|
+
("generic_refund", None),
|
34
|
+
("hallucination_one_device", "one device per subscription"),
|
35
|
+
("made_up_timeframe", "24 hours"),
|
36
|
+
])
|
37
|
+
def test_policy_engine_evaluate(policy_engine, case, expected_contains):
|
38
|
+
reason = policy_engine.evaluate_policy("send_email", {"body": TESTS[case]})
|
39
|
+
if expected_contains is None:
|
40
|
+
assert reason is None
|
41
|
+
else:
|
42
|
+
assert expected_contains in reason.lower()
|
43
|
+
|
44
|
+
def test_policy_engine_skips_empty(policy_engine):
|
45
|
+
assert policy_engine.evaluate_policy("send_email", {"body": ""}) is None
|
46
|
+
|
47
|
+
def test_policy_engine_unknown_rule_type(policy_engine):
|
48
|
+
policy_engine.policy_cfg = {
|
49
|
+
"tools": {"send_email": {"checks": [{"type": "unknown_type"}]}}
|
50
|
+
}
|
51
|
+
assert policy_engine.evaluate_policy("send_email", {"body": "test"}) is None
|
52
|
+
|
53
|
+
def test_policy_engine_malformed_semantic(policy_engine):
|
54
|
+
policy_engine.policy_cfg = {
|
55
|
+
"tools": {"send_email": {"checks": [{"type": "semantic", "trigger_phrases": ["refund"]}]}}
|
56
|
+
}
|
57
|
+
assert policy_engine.evaluate_policy("send_email", {"body": "refund policy applies"}) is None
|
58
|
+
|
59
|
+
def test_enforcer_allows_valid_and_generic(enforcer):
|
60
|
+
agent_id = "agent-allow"
|
61
|
+
for case in ["valid_excerpt", "generic_refund"]:
|
62
|
+
assert enforcer.enforce(agent_id, "send_email", {"body": TESTS[case]})["allowed"]
|
63
|
+
|
64
|
+
def test_enforcer_blocks_and_resets(enforcer):
|
65
|
+
agent_id = "agent-block"
|
66
|
+
res = enforcer.enforce(agent_id, "send_email", {"body": TESTS["hallucination_one_device"]})
|
67
|
+
assert not res["allowed"]
|
68
|
+
repeat = enforcer.enforce(agent_id, "send_email", {"body": TESTS["hallucination_one_device"]})
|
69
|
+
assert "previously blocked" in repeat["reason"].lower()
|
70
|
+
enforcer.reset(agent_id)
|
71
|
+
assert agent_id not in enforcer.agent_states
|
72
|
+
|
73
|
+
def test_safety_layer_blocks_and_raises():
|
74
|
+
layer = SafetyLayer(api_key="demo-1234", agent_id="safety-1", raise_on_block=True)
|
75
|
+
with pytest.raises(SafenticError):
|
76
|
+
layer.protect("send_email", {"body": TESTS["made_up_timeframe"]})
|
77
|
+
|
78
|
+
def test_safety_layer_returns_result():
|
79
|
+
layer = SafetyLayer(api_key="demo-1234", agent_id="safety-2", raise_on_block=False)
|
80
|
+
assert layer.protect("send_email", {"body": TESTS["generic_refund"]})["allowed"]
|
81
|
+
|
82
|
+
@mock.patch("safentic.verifiers.sentence_verifier.SentenceTransformer")
|
83
|
+
def test_similarity_score_consistency(mock_model_class):
|
84
|
+
mock_model = mock_model_class.return_value
|
85
|
+
mock_model.encode.return_value = [[1.0], [1.0]]
|
86
|
+
verifier = SentenceTransformerVerifier()
|
87
|
+
score = verifier.similarity("a", "a")
|
88
|
+
assert math.isclose(score, 1.0, abs_tol=1e-4)
|
89
|
+
|
90
|
+
@mock.patch("safentic.verifiers.sentence_verifier.SentenceTransformer")
|
91
|
+
@pytest.mark.parametrize("a,b,score,expected", [
|
92
|
+
("a", "a", 0.95, "allow"),
|
93
|
+
("a", "b", 0.5, "verify"),
|
94
|
+
("x", "y", 0.1, "block"),
|
95
|
+
])
|
96
|
+
def test_verifier_threshold_behavior(mock_model_class, a, b, score, expected):
|
97
|
+
mock_model = mock_model_class.return_value
|
98
|
+
with mock.patch("safentic.verifiers.sentence_verifier.util.cos_sim") as mock_cos_sim:
|
99
|
+
mock_cos_sim.return_value = mock.Mock()
|
100
|
+
mock_cos_sim.return_value.item.return_value = score
|
101
|
+
|
102
|
+
verifier = SentenceTransformerVerifier(low_threshold=0.2, high_threshold=0.9)
|
103
|
+
result = verifier.decision(a, b)
|
104
|
+
assert result == expected
|
105
|
+
|
106
|
+
@mock.patch("safentic.verifiers.sentence_verifier.SentenceTransformer")
|
107
|
+
def test_verifier_explains_score(mock_model_class):
|
108
|
+
mock_model = mock_model_class.return_value
|
109
|
+
mock_model.encode.return_value = [[1.0], [1.0]]
|
110
|
+
verifier = SentenceTransformerVerifier()
|
111
|
+
assert "Semantic similarity" in verifier.explain("a", "a")
|
112
|
+
|
113
|
+
@mock.patch("safentic.logger.audit.open", new_callable=mock.mock_open)
|
114
|
+
def test_logger_set_level_and_log(mock_open_file):
|
115
|
+
os.environ["SAFE_AUDIT_LOG"] = "1" # Ensure logger is active
|
116
|
+
|
117
|
+
logger = AuditLogger(config={"txt": "dummy.txt", "json": "dummy.jsonl"})
|
118
|
+
logger.set_level("DEBUG")
|
119
|
+
logger.log(agent_id="mock-agent", tool="send_email", allowed=True)
|
120
|
+
logger.log(agent_id="mock-agent", tool="send_email", allowed=False, reason="test")
|
121
|
+
|
122
|
+
handle = mock_open_file()
|
123
|
+
handle.write.assert_called() # Ensure something was written
|
124
|
+
|
125
|
+
def test_logger_invalid_level():
|
126
|
+
with pytest.raises(ValueError):
|
127
|
+
AuditLogger().set_level("FAKE")
|
128
|
+
|
129
|
+
@mock.patch("safentic.logger.audit.open", side_effect=OSError("mocked failure"))
|
130
|
+
def test_logger_gracefully_fails_json_write(mock_open):
|
131
|
+
logger = AuditLogger(config={"txt": "ok.txt", "json": "fail.jsonl"})
|
132
|
+
logger.log("agent", "tool", False, reason="test")
|