safentic 1.0.3__tar.gz → 1.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. {safentic-1.0.3/safentic.egg-info → safentic-1.0.5}/PKG-INFO +1 -1
  2. {safentic-1.0.3 → safentic-1.0.5}/safentic/__init__.py +8 -8
  3. safentic-1.0.5/safentic/engine.py +92 -0
  4. safentic-1.0.5/safentic/layer.py +69 -0
  5. {safentic-1.0.3 → safentic-1.0.5}/safentic/policy.py +26 -30
  6. {safentic-1.0.3 → safentic-1.0.5/safentic.egg-info}/PKG-INFO +1 -1
  7. {safentic-1.0.3 → safentic-1.0.5}/setup.py +27 -27
  8. safentic-1.0.3/safentic/engine.py +0 -80
  9. safentic-1.0.3/safentic/layer.py +0 -50
  10. {safentic-1.0.3 → safentic-1.0.5}/MANIFEST.in +0 -0
  11. {safentic-1.0.3 → safentic-1.0.5}/README.md +0 -0
  12. {safentic-1.0.3 → safentic-1.0.5}/requirements.txt +0 -0
  13. {safentic-1.0.3 → safentic-1.0.5}/safentic/LICENSE.txt +0 -0
  14. {safentic-1.0.3 → safentic-1.0.5}/safentic/config.py +0 -0
  15. {safentic-1.0.3 → safentic-1.0.5}/safentic/helper/__init__.py +0 -0
  16. {safentic-1.0.3 → safentic-1.0.5}/safentic/helper/auth.py +0 -0
  17. {safentic-1.0.3 → safentic-1.0.5}/safentic/logger/__init__.py +0 -0
  18. {safentic-1.0.3 → safentic-1.0.5}/safentic/logger/audit.py +0 -0
  19. {safentic-1.0.3 → safentic-1.0.5}/safentic/policies/.gitkeep +0 -0
  20. {safentic-1.0.3 → safentic-1.0.5}/safentic/policies/__init__.py +0 -0
  21. {safentic-1.0.3 → safentic-1.0.5}/safentic/policies/example_policy.txt +0 -0
  22. {safentic-1.0.3 → safentic-1.0.5}/safentic/policies/policy.yaml +0 -0
  23. {safentic-1.0.3 → safentic-1.0.5}/safentic/verifiers/__init__.py +0 -0
  24. {safentic-1.0.3 → safentic-1.0.5}/safentic/verifiers/sentence_verifier.py +0 -0
  25. {safentic-1.0.3 → safentic-1.0.5}/safentic.egg-info/SOURCES.txt +0 -0
  26. {safentic-1.0.3 → safentic-1.0.5}/safentic.egg-info/dependency_links.txt +0 -0
  27. {safentic-1.0.3 → safentic-1.0.5}/safentic.egg-info/requires.txt +0 -0
  28. {safentic-1.0.3 → safentic-1.0.5}/safentic.egg-info/top_level.txt +0 -0
  29. {safentic-1.0.3 → safentic-1.0.5}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: safentic
3
- Version: 1.0.3
3
+ Version: 1.0.5
4
4
  Summary: Safentic SDK for behavior analysis
5
5
  Home-page: https://safentic.com
6
6
  Author: Safentic
@@ -1,8 +1,8 @@
1
- from .layer import SafetyLayer, SafenticError
2
-
3
- __all__ = [
4
- "SafetyLayer",
5
- "SafenticError",
6
- ]
7
-
8
- __version__ = "1.0.3"
1
+ from .layer import SafetyLayer, SafenticError
2
+
3
+ __all__ = [
4
+ "SafetyLayer",
5
+ "SafenticError",
6
+ ]
7
+
8
+ __version__ = "1.0.5"
@@ -0,0 +1,92 @@
1
+ import time
2
+ from .policy import PolicyEngine
3
+ from .logger.audit import AuditLogger
4
+
5
+
6
+ class PolicyEnforcer:
7
+ """
8
+ Runtime wrapper to evaluate and enforce tool usage policies.
9
+ Tracks agent-specific violations, supports audit logging, and handles TTL-based tool blocks.
10
+ """
11
+
12
+ TOOL_BLOCK_TTL = 60 # seconds - how long a tool remains blocked after violation
13
+
14
+ def __init__(self, policy_engine: PolicyEngine = None):
15
+ self.policy_engine = policy_engine or PolicyEngine()
16
+ self.agent_states = {}
17
+ self.audit_logger = AuditLogger()
18
+
19
+ def enforce(self, agent_id: str, tool_name: str, tool_args: dict) -> dict:
20
+ """
21
+ Evaluates a tool action for a given agent.
22
+ Returns a dict with 'allowed', 'reason', and agent state metadata.
23
+ """
24
+ state = self.agent_states.setdefault(agent_id, {
25
+ "blocked_tools": {}, # tool_name -> timestamp of block
26
+ "violation_count": 0,
27
+ "last_violation": None
28
+ })
29
+
30
+ # Check if tool is still blocked
31
+ if self._is_tool_blocked(tool_name, state):
32
+ reason = "Tool is temporarily blocked due to a prior violation."
33
+ self.audit_logger.log(agent_id=agent_id, tool=tool_name, allowed=False, reason=reason)
34
+ return self._deny(tool_name, state, reason)
35
+
36
+ # Evaluate policy
37
+ violation = self.policy_engine.evaluate_policy(tool_name, tool_args)
38
+
39
+ if violation:
40
+ # Example violation object: {"reason": "...", "level": "block"}
41
+ level = violation.get("level", "block")
42
+ reason = violation.get("reason", "Policy violation")
43
+
44
+ if level == "warn":
45
+ # Log a warning but allow the call
46
+ self.audit_logger.log(agent_id=agent_id, tool=tool_name, allowed=True, reason=f"Warning: {reason}")
47
+ return {
48
+ "allowed": True,
49
+ "reason": f"Warning: {reason}",
50
+ "agent_state": state
51
+ }
52
+
53
+ # Otherwise: enforce block
54
+ state["blocked_tools"][tool_name] = time.time()
55
+ state["violation_count"] += 1
56
+ state["last_violation"] = violation
57
+ self.audit_logger.log(agent_id=agent_id, tool=tool_name, allowed=False, reason=reason)
58
+ return self._deny(tool_name, state, reason)
59
+
60
+ # Allow
61
+ self.audit_logger.log(agent_id=agent_id, tool=tool_name, allowed=True)
62
+ return {
63
+ "allowed": True,
64
+ "reason": "Action permitted",
65
+ "agent_state": state
66
+ }
67
+
68
+ def reset(self, agent_id: str = None):
69
+ """Clears violation state for one or all agents."""
70
+ if agent_id:
71
+ self.agent_states.pop(agent_id, None)
72
+ else:
73
+ self.agent_states.clear()
74
+
75
+ def _deny(self, tool_name: str, state: dict, reason: str) -> dict:
76
+ return {
77
+ "allowed": False,
78
+ "reason": reason,
79
+ "tool": tool_name,
80
+ "agent_state": state
81
+ }
82
+
83
+ def _is_tool_blocked(self, tool_name: str, state: dict) -> bool:
84
+ """Checks if a tool is still blocked based on TTL."""
85
+ blocked_at = state["blocked_tools"].get(tool_name)
86
+ if not blocked_at:
87
+ return False
88
+ if time.time() - blocked_at > self.TOOL_BLOCK_TTL:
89
+ # Tool block expired
90
+ del state["blocked_tools"][tool_name]
91
+ return False
92
+ return True
@@ -0,0 +1,69 @@
1
+ from .engine import PolicyEnforcer
2
+ from .logger.audit import AuditLogger
3
+ from .helper.auth import validate_api_key
4
+
5
+
6
+ class SafenticError(Exception):
7
+ """Raised when Safentic blocks an action."""
8
+ pass
9
+
10
+
11
+ class InvalidAPIKeyError(Exception):
12
+ """Raised when an invalid API key is used."""
13
+ pass
14
+
15
+
16
+ class InvalidAgentInterfaceError(Exception):
17
+ """Raised when the wrapped agent does not implement the required method."""
18
+ pass
19
+
20
+
21
+ class SafetyLayer:
22
+ """
23
+ Wraps an agent with real-time enforcement of Safentic policies.
24
+ All tool calls must go through `call_tool()`.
25
+
26
+ Example:
27
+ agent = SafetyLayer(MyAgent(), api_key="...", agent_id="agent-001")
28
+ agent.call_tool("send_email", {"to": "alice@example.com"})
29
+ """
30
+
31
+ def __init__(self, agent, api_key: str, agent_id: str = "", enforcer: PolicyEnforcer = None, raise_on_block: bool = True):
32
+ if not api_key:
33
+ raise InvalidAPIKeyError("Missing API key")
34
+
35
+ validation_response = validate_api_key(api_key)
36
+ if not validation_response or validation_response.get("status") != "valid":
37
+ raise InvalidAPIKeyError("Invalid or unauthorized API key")
38
+
39
+ if not hasattr(agent, "call_tool") or not callable(getattr(agent, "call_tool")):
40
+ raise InvalidAgentInterfaceError("Wrapped agent must implement `call_tool(tool_name: str, **kwargs)`")
41
+
42
+ self.agent = agent
43
+ self.api_key = api_key
44
+ self.agent_id = agent_id
45
+ self.raise_on_block = raise_on_block
46
+ self.logger = AuditLogger()
47
+ self.enforcer = enforcer or PolicyEnforcer()
48
+ self.enforcer.reset(agent_id)
49
+
50
+ def call_tool(self, tool_name: str, tool_args: dict) -> dict:
51
+ """
52
+ Intercepts a tool call and enforces policies before execution.
53
+ If blocked, raises `SafenticError` or returns an error response (configurable).
54
+ """
55
+ result = self.enforcer.enforce(self.agent_id, tool_name, tool_args)
56
+
57
+ self.logger.log(
58
+ agent_id=self.agent_id,
59
+ tool=tool_name,
60
+ allowed=result["allowed"],
61
+ reason=result["reason"] if not result["allowed"] else None
62
+ )
63
+
64
+ if not result["allowed"]:
65
+ if self.raise_on_block:
66
+ raise SafenticError(result["reason"])
67
+ return {"error": result["reason"]}
68
+
69
+ return self.agent.call_tool(tool_name, **tool_args)
@@ -1,14 +1,15 @@
1
1
  import os
2
2
  import yaml
3
- from typing import Optional
3
+ from typing import Optional, Dict, Any, Union
4
4
  from .verifiers.sentence_verifier import SentenceTransformerVerifier
5
5
  from .logger.audit import AuditLogger
6
6
 
7
7
 
8
8
  class PolicyEngine:
9
9
  """
10
- Evaluates whether a given tool action complies with safety policies.
11
- Uses rule types such as deny_phrase and semantic checks.
10
+ Evaluates whether a tool action complies with safety policies.
11
+ Supports multiple rule types: deny_phrase, semantic.
12
+ Returns structured violations for downstream enforcement.
12
13
  """
13
14
 
14
15
  VALID_RULE_TYPES = {"deny_phrase", "semantic"}
@@ -37,10 +38,16 @@ class PolicyEngine:
37
38
  with open(path, encoding="utf-8") as f:
38
39
  return f.read().strip().lower()
39
40
 
40
- def evaluate_policy(self, tool_name: str, args: dict, agent_id: str = "unknown") -> Optional[str]:
41
+ def evaluate_policy(
42
+ self,
43
+ tool_name: str,
44
+ args: Dict[str, Any],
45
+ agent_id: str = "unknown"
46
+ ) -> Optional[Dict[str, Union[str, Any]]]:
41
47
  """
42
- Returns None if allowed, or a string reason if blocked.
43
- Supports modular rule types per tool.
48
+ Returns:
49
+ None if allowed,
50
+ dict with 'reason' and 'level' if blocked or warned
44
51
  """
45
52
  tool_rules = self.policy_cfg.get("tools", {}).get(tool_name)
46
53
  if not tool_rules:
@@ -52,50 +59,39 @@ class PolicyEngine:
52
59
 
53
60
  for check in tool_rules.get("checks", []):
54
61
  rule_type = check.get("type")
62
+ level = check.get("level", "block") # Default to block
55
63
 
56
64
  if rule_type not in self.VALID_RULE_TYPES:
57
- warning = f"Unknown rule type in policy: '{rule_type}' for tool: '{tool_name}'"
58
- self.audit_logger.log(
59
- agent_id=agent_id,
60
- tool=tool_name,
61
- allowed=True,
62
- reason=warning
63
- )
65
+ warning = f"Unknown rule type: '{rule_type}' for tool: '{tool_name}'"
66
+ self.audit_logger.log(agent_id=agent_id, tool=tool_name, allowed=True, reason=warning)
64
67
  continue
65
68
 
69
+ # ---- Phrase Matching ----
66
70
  if rule_type == "deny_phrase":
67
71
  for phrase in check.get("phrases", []):
68
72
  if phrase.lower() in text:
69
- reason = f"Blocked: matched deny phrase “{phrase}”"
70
- self.audit_logger.log(
71
- agent_id=agent_id,
72
- tool=tool_name,
73
- allowed=False,
74
- reason=reason
75
- )
76
- return reason
73
+ reason = f"Matched deny phrase: “{phrase}”"
74
+ self.audit_logger.log(agent_id=agent_id, tool=tool_name, allowed=(level == "warn"), reason=reason)
75
+ return {"reason": reason, "level": level}
77
76
 
77
+ # ---- Semantic Check ----
78
78
  elif rule_type == "semantic":
79
79
  trigger_phrases = [p.lower() for p in check.get("trigger_phrases", [])]
80
80
  if any(p in text for p in trigger_phrases):
81
81
  reference_file = check.get("reference_file")
82
82
  if not reference_file:
83
- continue
83
+ continue # Skip if not configured
84
84
 
85
85
  reference_text = self._load_reference_text(reference_file)
86
86
  decision = self.verifier.decision(candidate=text, official=reference_text)
87
87
 
88
88
  if decision == "block":
89
89
  explanation = self.verifier.explain(candidate=text, official=reference_text)
90
- reason = f"Blocked by semantic check: {explanation}"
91
- self.audit_logger.log(
92
- agent_id=agent_id,
93
- tool=tool_name,
94
- allowed=False,
95
- reason=reason
96
- )
97
- return reason
90
+ reason = f"Semantic block: {explanation}"
91
+ self.audit_logger.log(agent_id=agent_id, tool=tool_name, allowed=(level == "warn"), reason=reason)
92
+ return {"reason": reason, "level": level}
98
93
 
94
+ # Log semantic pass
99
95
  self.audit_logger.log(
100
96
  agent_id=agent_id,
101
97
  tool=tool_name,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: safentic
3
- Version: 1.0.3
3
+ Version: 1.0.5
4
4
  Summary: Safentic SDK for behavior analysis
5
5
  Home-page: https://safentic.com
6
6
  Author: Safentic
@@ -1,27 +1,27 @@
1
- from setuptools import setup, find_packages
2
-
3
- setup(
4
- name="safentic",
5
- version="1.0.3",
6
- packages=find_packages(),
7
- install_requires=[
8
- "requests",
9
- "PyYAML",
10
- "sentence-transformers==3.2.1",
11
- "sqlalchemy",
12
- "python-dotenv"
13
- ],
14
- include_package_data=True,
15
- license="Proprietary :: Safentic Commercial License", # Custom classifier
16
- classifiers=[
17
- "Programming Language :: Python :: 3",
18
- "License :: Other/Proprietary License", # Indicates not open source
19
- "Operating System :: OS Independent",
20
- ],
21
- author="Safentic",
22
- author_email="contact@safentic.com",
23
- description="Safentic SDK for behavior analysis",
24
- long_description=open("README.md").read(),
25
- long_description_content_type="text/markdown",
26
- url="https://safentic.com",
27
- )
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="safentic",
5
+ version="1.0.5",
6
+ packages=find_packages(),
7
+ install_requires=[
8
+ "requests",
9
+ "PyYAML",
10
+ "sentence-transformers==3.2.1",
11
+ "sqlalchemy",
12
+ "python-dotenv"
13
+ ],
14
+ include_package_data=True,
15
+ license="Proprietary :: Safentic Commercial License", # Custom classifier
16
+ classifiers=[
17
+ "Programming Language :: Python :: 3",
18
+ "License :: Other/Proprietary License", # Indicates not open source
19
+ "Operating System :: OS Independent",
20
+ ],
21
+ author="Safentic",
22
+ author_email="contact@safentic.com",
23
+ description="Safentic SDK for behavior analysis",
24
+ long_description=open("README.md").read(),
25
+ long_description_content_type="text/markdown",
26
+ url="https://safentic.com",
27
+ )
@@ -1,80 +0,0 @@
1
- from .policy import PolicyEngine
2
- from .logger.audit import AuditLogger
3
-
4
- class PolicyEnforcer():
5
- """
6
- Runtime wrapper to evaluate and enforce tool usage policies.
7
- Tracks agent-specific violations and supports audit logging.
8
- """
9
-
10
- def __init__(self, policy_engine: PolicyEngine = None):
11
- self.policy_engine = policy_engine or PolicyEngine()
12
- self.agent_states = {}
13
- self.audit_logger = AuditLogger()
14
-
15
- def enforce(self, agent_id: str, tool_name: str, tool_args: dict) -> dict:
16
- """
17
- Evaluates a tool action for a given agent.
18
- Returns a dict with 'allowed', 'reason', and agent state metadata.
19
- """
20
- state = self.agent_states.setdefault(agent_id, {
21
- "blocked_tools": set(),
22
- "violation_count": 0,
23
- "last_violation": None
24
- })
25
-
26
- # Block repeat attempts to use already-denied tool
27
- if tool_name in state["blocked_tools"]:
28
- reason = "Tool previously blocked for this agent."
29
- self.audit_logger.log(
30
- agent_id=agent_id,
31
- tool=tool_name,
32
- allowed=False,
33
- reason=reason
34
- )
35
- return self._deny(tool_name, state, reason)
36
-
37
- # Run policy evaluation
38
- violation = self.policy_engine.evaluate_policy(tool_name, tool_args)
39
-
40
- if violation:
41
- state["blocked_tools"].add(tool_name)
42
- state["violation_count"] += 1
43
- state["last_violation"] = violation
44
- self.audit_logger.log(
45
- agent_id=agent_id,
46
- tool=tool_name,
47
- allowed=False,
48
- reason=violation
49
- )
50
- return self._deny(tool_name, state, violation)
51
-
52
- # Log allowed action
53
- self.audit_logger.log(
54
- agent_id=agent_id,
55
- tool=tool_name,
56
- allowed=True
57
- )
58
-
59
- return {
60
- "allowed": True,
61
- "reason": "Action permitted",
62
- "agent_state": state
63
- }
64
-
65
- def reset(self, agent_id: str = None):
66
- """
67
- Clears violation state for one agent or all agents.
68
- """
69
- if agent_id:
70
- self.agent_states.pop(agent_id, None)
71
- else:
72
- self.agent_states.clear()
73
-
74
- def _deny(self, tool_name: str, state: dict, reason: str) -> dict:
75
- return {
76
- "allowed": False,
77
- "reason": reason,
78
- "tool": tool_name,
79
- "agent_state": state
80
- }
@@ -1,50 +0,0 @@
1
- from .engine import PolicyEnforcer
2
- from .logger.audit import AuditLogger
3
- from .helper.auth import validate_api_key
4
-
5
- class SafenticError(Exception):
6
- """Raised when Safentic blocks an action."""
7
- pass
8
-
9
-
10
- class SafetyLayer():
11
- """
12
- Safentic runtime enforcement wrapper for agent actions.
13
- First, insert your api key, then run an action.
14
- Example:
15
- safety.protect("send_email", {"body": "..."})
16
- # Raises SafenticError if blocked
17
- """
18
-
19
- def __init__(self, api_key="", agent_id="", enforcer: PolicyEnforcer = None, raise_on_block: bool = True):
20
- self.agent_id = agent_id
21
- self.raise_on_block = raise_on_block
22
- self.logger = AuditLogger()
23
-
24
- self.enforcer = enforcer or PolicyEnforcer()
25
- self.api_key = validate_api_key(api_key)
26
- self.enforcer.reset(agent_id)
27
-
28
- def protect(self, tool_name: str, tool_args: dict) -> dict:
29
- """
30
- Checks whether a tool action is allowed.
31
- Raises SafenticError if blocked (default), or returns result if raise_on_block=False.
32
- """
33
-
34
- result = self.enforcer.enforce(self.agent_id, tool_name, tool_args)
35
-
36
- # Log structured event
37
- self.logger.log(
38
- agent_id=self.agent_id,
39
- tool=tool_name,
40
- allowed=result["allowed"],
41
- reason=result["reason"] if not result["allowed"] else None
42
- )
43
-
44
- # Raise or return based on outcome and config
45
- if not result["allowed"]:
46
- if self.raise_on_block:
47
- raise SafenticError(result["reason"])
48
- return result
49
-
50
- return result
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes