safentic 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,18 @@
1
+ # Essentials
2
+ include LICENSE.txt
3
+ include README.md
4
+ include setup.py
5
+ include pyproject.toml
6
+ include requirements.txt
7
+
8
+ # Include everything under safentic, including nested packages
9
+ recursive-include safentic *.py *.txt *.yaml *.yml
10
+
11
+ # Keep these if you ever add data/config dirs
12
+ recursive-include safentic/data *
13
+ recursive-include safentic/config *
14
+ recursive-include safentic/policies *
15
+
16
+ # Ignore test folders or cache
17
+ prune tests
18
+ global-exclude __pycache__ *.py[cod] *.pyo
@@ -0,0 +1,42 @@
1
+ Metadata-Version: 2.4
2
+ Name: safentic
3
+ Version: 1.0.0
4
+ Summary: Safentic SDK for behavior analysis
5
+ Home-page: https://safentic.com
6
+ Author: Safentic
7
+ Author-email: contact@safentic.com
8
+ License: Proprietary :: Safentic Commercial License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: Other/Proprietary License
11
+ Classifier: Operating System :: OS Independent
12
+ Description-Content-Type: text/markdown
13
+ Dynamic: author
14
+ Dynamic: author-email
15
+ Dynamic: classifier
16
+ Dynamic: description
17
+ Dynamic: description-content-type
18
+ Dynamic: home-page
19
+ Dynamic: license
20
+ Dynamic: summary
21
+
22
+ # SAFENTIC-V0
23
+
24
+ Safentic is a runtime guardrail SDK for agentic AI.
25
+ It intercepts unsafe tool calls between agent **intent** and **execution**.
26
+
27
+ ## To Run Locally with Docker
28
+
29
+ ### Prerequisites
30
+
31
+ - [Docker Desktop](https://www.docker.com/products/docker-desktop) installed
32
+
33
+ ---
34
+
35
+ ### Build and Run with Docker
36
+
37
+ Make sure docker desktop is running on your device, then do the following:
38
+ ```
39
+ 1. git clone https://github.com/type0-1/safentic-v0.git
40
+ 2. cd safentic-v0
41
+ 3. docker compose up --build
42
+ ```
@@ -0,0 +1,21 @@
1
+ # SAFENTIC-V0
2
+
3
+ Safentic is a runtime guardrail SDK for agentic AI.
4
+ It intercepts unsafe tool calls between agent **intent** and **execution**.
5
+
6
+ ## To Run Locally with Docker
7
+
8
+ ### Prerequisites
9
+
10
+ - [Docker Desktop](https://www.docker.com/products/docker-desktop) installed
11
+
12
+ ---
13
+
14
+ ### Build and Run with Docker
15
+
16
+ Make sure docker desktop is running on your device, then do the following:
17
+ ```
18
+ 1. git clone https://github.com/type0-1/safentic-v0.git
19
+ 2. cd safentic-v0
20
+ 3. docker compose up --build
21
+ ```
@@ -0,0 +1,10 @@
1
+ PyYAML==6.0
2
+ langchain
3
+ langchain-community
4
+ langchain-openai
5
+ openai
6
+ python-dotenv
7
+ sentence-transformers==3.2.1
8
+ pyautogen
9
+ pytest
10
+ coverage
@@ -0,0 +1,36 @@
1
+ Safentic SDK Commercial License Agreement
2
+ =========================================
3
+
4
+ IMPORTANT – READ CAREFULLY:
5
+
6
+ This license governs the use of the Safentic SDK (“Software”) developed and owned by Safentic. By installing or using this Software, you (“Licensee”) agree to the following terms and conditions:
7
+
8
+ 1. GRANT OF LICENSE
9
+ Licensor grants Licensee a non-exclusive, non-transferable, non-sublicensable license to use the Software solely for internal business purposes and only in accordance with the terms of the commercial agreement executed between the parties. This license may include limited evaluation rights, subject to expiration or usage restrictions.
10
+
11
+ 2. RESTRICTIONS
12
+ Licensee shall NOT:
13
+ - Use the Software without a valid, active license key or subscription.
14
+ - Reverse engineer, decompile, or disassemble the Software.
15
+ - Modify, copy, or create derivative works based on the Software.
16
+ - Distribute, sublicense, lease, or otherwise make the Software available to any third party.
17
+ - Circumvent or attempt to disable any license verification, telemetry, or access control mechanisms.
18
+
19
+ 3. OWNERSHIP
20
+ The Software is licensed, not sold. Safentic retains all rights, title, and interest in and to the Software, including all intellectual property rights.
21
+
22
+ 4. TERMINATION
23
+ This license is effective until terminated. It will terminate automatically without notice if Licensee breaches any term of this agreement. Upon termination, Licensee must cease all use and destroy all copies of the Software.
24
+
25
+ 5. NO WARRANTY
26
+ The Software is provided "as is" without warranty of any kind. Licensor disclaims all warranties, express or implied, including but not limited to warranties of merchantability and fitness for a particular purpose.
27
+
28
+ 6. LIMITATION OF LIABILITY
29
+ In no event shall Licensor be liable for any damages arising out of the use or inability to use the Software, including but not limited to incidental, special, or consequential damages.
30
+
31
+ 7. GOVERNING LAW
32
+ This agreement shall be governed by and construed in accordance with the laws of Ireland, without regard to its conflict of law principles.
33
+
34
+ For licensing inquiries, please contact: contact@safentic.com
35
+
36
+ Copyright © 2025, Safentic. All rights reserved.
@@ -0,0 +1,8 @@
1
+ from .layer import SafetyLayer, SafenticError
2
+
3
+ __all__ = [
4
+ "SafetyLayer",
5
+ "SafenticError",
6
+ ]
7
+
8
+ __version__ = "0.3"
@@ -0,0 +1,2 @@
1
+ BASE_API_PATH = "https://safentic-api.onrender.com/"
2
+ API_KEY_ENDPOINT = "auth/validate"
@@ -0,0 +1,80 @@
1
+ from .policy import PolicyEngine
2
+ from .logger.audit import AuditLogger
3
+
4
+ class PolicyEnforcer():
5
+ """
6
+ Runtime wrapper to evaluate and enforce tool usage policies.
7
+ Tracks agent-specific violations and supports audit logging.
8
+ """
9
+
10
+ def __init__(self, policy_engine: PolicyEngine = None):
11
+ self.policy_engine = policy_engine or PolicyEngine()
12
+ self.agent_states = {}
13
+ self.audit_logger = AuditLogger()
14
+
15
+ def enforce(self, agent_id: str, tool_name: str, tool_args: dict) -> dict:
16
+ """
17
+ Evaluates a tool action for a given agent.
18
+ Returns a dict with 'allowed', 'reason', and agent state metadata.
19
+ """
20
+ state = self.agent_states.setdefault(agent_id, {
21
+ "blocked_tools": set(),
22
+ "violation_count": 0,
23
+ "last_violation": None
24
+ })
25
+
26
+ # Block repeat attempts to use already-denied tool
27
+ if tool_name in state["blocked_tools"]:
28
+ reason = "Tool previously blocked for this agent."
29
+ self.audit_logger.log(
30
+ agent_id=agent_id,
31
+ tool=tool_name,
32
+ allowed=False,
33
+ reason=reason
34
+ )
35
+ return self._deny(tool_name, state, reason)
36
+
37
+ # Run policy evaluation
38
+ violation = self.policy_engine.evaluate_policy(tool_name, tool_args)
39
+
40
+ if violation:
41
+ state["blocked_tools"].add(tool_name)
42
+ state["violation_count"] += 1
43
+ state["last_violation"] = violation
44
+ self.audit_logger.log(
45
+ agent_id=agent_id,
46
+ tool=tool_name,
47
+ allowed=False,
48
+ reason=violation
49
+ )
50
+ return self._deny(tool_name, state, violation)
51
+
52
+ # Log allowed action
53
+ self.audit_logger.log(
54
+ agent_id=agent_id,
55
+ tool=tool_name,
56
+ allowed=True
57
+ )
58
+
59
+ return {
60
+ "allowed": True,
61
+ "reason": "Action permitted",
62
+ "agent_state": state
63
+ }
64
+
65
+ def reset(self, agent_id: str = None):
66
+ """
67
+ Clears violation state for one agent or all agents.
68
+ """
69
+ if agent_id:
70
+ self.agent_states.pop(agent_id, None)
71
+ else:
72
+ self.agent_states.clear()
73
+
74
+ def _deny(self, tool_name: str, state: dict, reason: str) -> dict:
75
+ return {
76
+ "allowed": False,
77
+ "reason": reason,
78
+ "tool": tool_name,
79
+ "agent_state": state
80
+ }
File without changes
@@ -0,0 +1,12 @@
1
+ import requests
2
+ from ..config import *
3
+
4
+ def validate_api_key(key: str) -> dict:
5
+ try:
6
+ response = requests.post(BASE_API_PATH + API_KEY_ENDPOINT, json={"api_key": key})
7
+ if response.status_code != 200:
8
+ return {"valid": False}
9
+ data = response.json()
10
+ return {"valid": True, **data}
11
+ except Exception:
12
+ return {"valid": False}
@@ -0,0 +1,50 @@
1
+ from .engine import PolicyEnforcer
2
+ from .logger.audit import AuditLogger
3
+ from .helper.auth import validate_api_key
4
+
5
+ class SafenticError(Exception):
6
+ """Raised when Safentic blocks an action."""
7
+ pass
8
+
9
+
10
+ class SafetyLayer():
11
+ """
12
+ Safentic runtime enforcement wrapper for agent actions.
13
+ First, insert your api key, then run an action.
14
+ Example:
15
+ safety.protect("send_email", {"body": "..."})
16
+ # Raises SafenticError if blocked
17
+ """
18
+
19
+ def __init__(self, api_key: str, agent_id: str, enforcer: PolicyEnforcer = None, raise_on_block: bool = True):
20
+ self.agent_id = agent_id
21
+ self.raise_on_block = raise_on_block
22
+ self.logger = AuditLogger()
23
+
24
+ # If no custom enforcer is provided, instantiate one with the API key
25
+ self.enforcer = enforcer or PolicyEnforcer()
26
+ self.api_key = validate_api_key(api_key)
27
+ self.enforcer.reset(agent_id)
28
+
29
+ def protect(self, tool_name: str, tool_args: dict) -> dict:
30
+ """
31
+ Checks whether a tool action is allowed.
32
+ Raises SafenticError if blocked (default), or returns result if raise_on_block=False.
33
+ """
34
+ result = self.enforcer.enforce(self.agent_id, tool_name, tool_args)
35
+
36
+ # Log structured event
37
+ self.logger.log(
38
+ agent_id=self.agent_id,
39
+ tool=tool_name,
40
+ allowed=result["allowed"],
41
+ reason=result["reason"] if not result["allowed"] else None
42
+ )
43
+
44
+ # Raise or return based on outcome and config
45
+ if not result["allowed"]:
46
+ if self.raise_on_block:
47
+ raise SafenticError(result["reason"])
48
+ return result
49
+
50
+ return result
File without changes
@@ -0,0 +1,83 @@
1
+ import logging
2
+ from datetime import datetime
3
+ import os
4
+ import json
5
+
6
+ class AuditLogger:
7
+ def __init__(self, config: dict = None):
8
+ config = config or {}
9
+
10
+ # Allow disabling via config or env
11
+ self.enabled = config.get("enabled", True)
12
+ if os.getenv("SAFE_AUDIT_LOG") == "0":
13
+ self.enabled = False
14
+
15
+ # File paths from config or default
16
+ self.txt_log_path = config.get("destination", "safentic/logs/txt_logs/safentic_audit.log")
17
+ self.jsonl_path = config.get("jsonl", "safentic/logs/json_logs/safentic_audit.jsonl")
18
+
19
+ # Ensure directories exist
20
+ os.makedirs(os.path.dirname(self.txt_log_path), exist_ok=True)
21
+ os.makedirs(os.path.dirname(self.jsonl_path), exist_ok=True)
22
+
23
+ # Set up logger
24
+ self.logger = logging.getLogger("safentic.audit")
25
+
26
+ level_str = config.get("level", "INFO").upper()
27
+ level_map = {
28
+ "DEBUG": logging.DEBUG,
29
+ "INFO": logging.INFO,
30
+ "WARNING": logging.WARNING,
31
+ "ERROR": logging.ERROR,
32
+ "CRITICAL": logging.CRITICAL
33
+ }
34
+ level = level_map.get(level_str, logging.INFO)
35
+ self.logger.setLevel(level)
36
+
37
+ # Prevent duplicate handlers (e.g., in notebooks)
38
+ if not self.logger.handlers:
39
+ formatter = logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
40
+
41
+ stream_handler = logging.StreamHandler()
42
+ stream_handler.setFormatter(formatter)
43
+ self.logger.addHandler(stream_handler)
44
+
45
+ file_handler = logging.FileHandler(self.txt_log_path)
46
+ file_handler.setFormatter(formatter)
47
+ self.logger.addHandler(file_handler)
48
+
49
+ def log(self, agent_id: str, tool: str, allowed: bool, reason: str = None):
50
+ if not self.enabled:
51
+ return
52
+
53
+ entry = {
54
+ "timestamp": datetime.now().isoformat(),
55
+ "agent_id": agent_id,
56
+ "tool": tool,
57
+ "allowed": allowed,
58
+ "reason": reason or "No violation"
59
+ }
60
+
61
+ log_level = logging.INFO if allowed else logging.WARNING
62
+ self.logger.log(log_level, f"[AUDIT] {entry}")
63
+
64
+ try:
65
+ with open(self.jsonl_path, "a", encoding="utf-8") as f:
66
+ f.write(json.dumps(entry) + "\n")
67
+ except Exception as e:
68
+ self.logger.error(f"Failed to write structured audit log: {e}")
69
+
70
+ def set_level(self, level: str):
71
+ level_map = {
72
+ "DEBUG": logging.DEBUG,
73
+ "INFO": logging.INFO,
74
+ "WARNING": logging.WARNING,
75
+ "ERROR": logging.ERROR,
76
+ "CRITICAL": logging.CRITICAL
77
+ }
78
+
79
+ level = level.upper()
80
+ if level in level_map:
81
+ self.logger.setLevel(level_map[level])
82
+ else:
83
+ raise ValueError(f"Unsupported log level: {level}")
File without changes
@@ -0,0 +1,3 @@
1
+ from ..layer import SafetyLayer, SafenticError
2
+
3
+ __version__ = "0.1"
@@ -0,0 +1,33 @@
1
+ Standard Refund Policy
2
+
3
+ Last Updated: February 1, 2025
4
+
5
+ 1. Overview
6
+ At XYZ, we strive to ensure your complete satisfaction with our platform. If you are not entirely happy with your purchase, you may be eligible for a refund as described below.
7
+
8
+ 2. Eligibility
9
+ - Monthly Subscriptions: You may request a full refund within 30 days of your initial subscription charge.
10
+ - Annual Subscriptions: You may request a pro-rated refund (the remaining unused portion of your term) within 30 days of your initial annual subscription charge.
11
+ - One-Time Purchases and Credits: Refunds on one-time feature purchases or in-app credits are handled on a case-by-case basis and must be requested within 14 days of purchase.
12
+
13
+ 3. How to Request a Refund
14
+ 1. Log in to your XYZ account at https://app.XYZ.ai.
15
+ 2. Go to Settings -> Billing -> Refunds.
16
+ 3. Select the transaction you wish to refund and click Request Refund.
17
+ 4. In the "Reason" field, briefly describe why you are requesting a refund.
18
+ 5. Submit your request. You will receive an email confirmation within one business day.
19
+
20
+ 4. Processing Time
21
+ - Refunds for credit-card or PayPal payments will be credited back to your original payment method within 5-7 business days after approval.
22
+ - In-app credits or wallet balances will be restored within 24 hours of approval.
23
+
24
+ 5. Exceptions and Limitations
25
+ - No refunds will be issued for subscription renewals after the initial 30-day period.
26
+ - Promotional, gift, or trial subscriptions are non-refundable.
27
+ - Refund requests made more than 30 days after the initial charge are subject to review and may be denied.
28
+ - We reserve the right to refuse a refund if we suspect abuse of the refund policy.
29
+
30
+ 6. Contact Us
31
+ If you have any questions or need assistance, please contact our Support Team at support@XYZ.ai or via the in-app chat.
32
+
33
+ Thank you for choosing XYZ! We appreciate your business and are committed to delivering a great experience.
@@ -0,0 +1,25 @@
1
+ tools:
2
+ send_email:
3
+ checks:
4
+ - type: deny_phrase
5
+ phrases:
6
+ - "one device per subscription"
7
+ - "24 hours"
8
+
9
+ - type: semantic
10
+ trigger_phrases:
11
+ - "refund policy"
12
+ - "guarantee"
13
+ - "our policy"
14
+ reference_file: "example_policy.txt"
15
+
16
+ update_ticket:
17
+ checks: [] # Explicitly allow all — no policy checks
18
+
19
+ log_to_crm:
20
+ checks: [] # Explicitly allow all — no policy checks
21
+
22
+ logging:
23
+ level: INFO
24
+ destination: "safentic/logs/txt_logs/safentic_audit.log"
25
+ jsonl: "safentic/logs/json_logs/safentic_audit.jsonl"
@@ -0,0 +1,106 @@
1
+ import os
2
+ import yaml
3
+ from typing import Optional
4
+ from .verifiers.sentence_verifier import SentenceTransformerVerifier
5
+ from .logger.audit import AuditLogger
6
+
7
+
8
+ class PolicyEngine:
9
+ """
10
+ Evaluates whether a given tool action complies with safety policies.
11
+ Uses rule types such as deny_phrase and semantic checks.
12
+ """
13
+
14
+ VALID_RULE_TYPES = {"deny_phrase", "semantic"}
15
+
16
+ def __init__(
17
+ self,
18
+ policy_path: Optional[str] = None,
19
+ policy_base_dir: Optional[str] = None
20
+ ):
21
+ self.base_dir = policy_base_dir or os.path.dirname(__file__)
22
+ policy_path = policy_path or os.path.join(self.base_dir, "policies", "policy.yaml")
23
+
24
+ with open(policy_path, encoding="utf-8") as f:
25
+ self.policy_cfg = yaml.safe_load(f)
26
+
27
+ self.verifier = SentenceTransformerVerifier(
28
+ model_name="all-MiniLM-L6-v2",
29
+ low_threshold=0.50,
30
+ high_threshold=0.75,
31
+ )
32
+
33
+ self.audit_logger = AuditLogger()
34
+
35
+ def _load_reference_text(self, filename: str) -> str:
36
+ path = os.path.join(self.base_dir, "policies", filename)
37
+ with open(path, encoding="utf-8") as f:
38
+ return f.read().strip().lower()
39
+
40
+ def evaluate_policy(self, tool_name: str, args: dict, agent_id: str = "unknown") -> Optional[str]:
41
+ """
42
+ Returns None if allowed, or a string reason if blocked.
43
+ Supports modular rule types per tool.
44
+ """
45
+ tool_rules = self.policy_cfg.get("tools", {}).get(tool_name)
46
+ if not tool_rules:
47
+ return None # No policy = allow
48
+
49
+ text = (args.get("body") or args.get("note") or "").strip().lower()
50
+ if not text:
51
+ return None # Empty input = allow
52
+
53
+ for check in tool_rules.get("checks", []):
54
+ rule_type = check.get("type")
55
+
56
+ if rule_type not in self.VALID_RULE_TYPES:
57
+ warning = f"Unknown rule type in policy: '{rule_type}' for tool: '{tool_name}'"
58
+ self.audit_logger.log(
59
+ agent_id=agent_id,
60
+ tool=tool_name,
61
+ allowed=True,
62
+ reason=warning
63
+ )
64
+ continue
65
+
66
+ if rule_type == "deny_phrase":
67
+ for phrase in check.get("phrases", []):
68
+ if phrase.lower() in text:
69
+ reason = f"Blocked: matched deny phrase “{phrase}”"
70
+ self.audit_logger.log(
71
+ agent_id=agent_id,
72
+ tool=tool_name,
73
+ allowed=False,
74
+ reason=reason
75
+ )
76
+ return reason
77
+
78
+ elif rule_type == "semantic":
79
+ trigger_phrases = [p.lower() for p in check.get("trigger_phrases", [])]
80
+ if any(p in text for p in trigger_phrases):
81
+ reference_file = check.get("reference_file")
82
+ if not reference_file:
83
+ continue
84
+
85
+ reference_text = self._load_reference_text(reference_file)
86
+ decision = self.verifier.decision(candidate=text, official=reference_text)
87
+
88
+ if decision == "block":
89
+ explanation = self.verifier.explain(candidate=text, official=reference_text)
90
+ reason = f"Blocked by semantic check: {explanation}"
91
+ self.audit_logger.log(
92
+ agent_id=agent_id,
93
+ tool=tool_name,
94
+ allowed=False,
95
+ reason=reason
96
+ )
97
+ return reason
98
+
99
+ self.audit_logger.log(
100
+ agent_id=agent_id,
101
+ tool=tool_name,
102
+ allowed=True,
103
+ reason=f"Semantic decision: {decision}"
104
+ )
105
+
106
+ return None
File without changes
@@ -0,0 +1,69 @@
1
+ from sentence_transformers import SentenceTransformer, util
2
+ from ..logger.audit import AuditLogger
3
+
4
+
5
+ class SentenceTransformerVerifier:
6
+ """
7
+ Verifies whether a candidate text is semantically aligned with an official policy text.
8
+ Uses cosine similarity thresholds to decide: allow, verify, or block.
9
+ """
10
+
11
+ def __init__(
12
+ self,
13
+ model_name: str = "all-MiniLM-L6-v2",
14
+ low_threshold: float = 0.50,
15
+ high_threshold: float = 0.75,
16
+ ):
17
+ self.embedder = SentenceTransformer(model_name)
18
+ self.low = low_threshold
19
+ self.high = high_threshold
20
+ self.logger = AuditLogger()
21
+
22
+ def similarity(self, a: str, b: str) -> float:
23
+ """
24
+ Computes cosine similarity between two strings.
25
+ """
26
+ vecs = self.embedder.encode([a, b], convert_to_tensor=True)
27
+ return util.cos_sim(vecs[0], vecs[1]).item()
28
+
29
+ def decision(self, candidate: str, official: str, agent_id: str = "semantic-check") -> str:
30
+ """
31
+ Returns one of: "allow", "verify", or "block"
32
+ based on similarity thresholds.
33
+ Also logs the similarity score and decision.
34
+ """
35
+ score = self.similarity(candidate, official)
36
+
37
+ if score >= self.high:
38
+ result = "allow"
39
+ elif score <= self.low:
40
+ result = "block"
41
+ else:
42
+ result = "verify"
43
+
44
+ self.logger.log(
45
+ agent_id=agent_id,
46
+ tool="semantic_policy_check",
47
+ allowed=(result != "block"),
48
+ reason=f"Semantic decision: {result} (score={score:.2f})"
49
+ )
50
+
51
+ return result
52
+
53
+ def explain(self, candidate: str, official: str, agent_id: str = "semantic-check") -> str:
54
+ """
55
+ Returns a debug-friendly explanation string including the similarity score.
56
+ Also logs it to structured audit log for traceability.
57
+ """
58
+ score = self.similarity(candidate, official)
59
+ explanation = f"Semantic similarity = {score:.2f} (low={self.low}, high={self.high})"
60
+
61
+ # Structured debug logging (allowed=True since it’s informational)
62
+ self.logger.log(
63
+ agent_id=agent_id,
64
+ tool="semantic_policy_check",
65
+ allowed=True,
66
+ reason=explanation
67
+ )
68
+
69
+ return explanation
@@ -0,0 +1,42 @@
1
+ Metadata-Version: 2.4
2
+ Name: safentic
3
+ Version: 1.0.0
4
+ Summary: Safentic SDK for behavior analysis
5
+ Home-page: https://safentic.com
6
+ Author: Safentic
7
+ Author-email: contact@safentic.com
8
+ License: Proprietary :: Safentic Commercial License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: Other/Proprietary License
11
+ Classifier: Operating System :: OS Independent
12
+ Description-Content-Type: text/markdown
13
+ Dynamic: author
14
+ Dynamic: author-email
15
+ Dynamic: classifier
16
+ Dynamic: description
17
+ Dynamic: description-content-type
18
+ Dynamic: home-page
19
+ Dynamic: license
20
+ Dynamic: summary
21
+
22
+ # SAFENTIC-V0
23
+
24
+ Safentic is a runtime guardrail SDK for agentic AI.
25
+ It intercepts unsafe tool calls between agent **intent** and **execution**.
26
+
27
+ ## To Run Locally with Docker
28
+
29
+ ### Prerequisites
30
+
31
+ - [Docker Desktop](https://www.docker.com/products/docker-desktop) installed
32
+
33
+ ---
34
+
35
+ ### Build and Run with Docker
36
+
37
+ Make sure docker desktop is running on your device, then do the following:
38
+ ```
39
+ 1. git clone https://github.com/type0-1/safentic-v0.git
40
+ 2. cd safentic-v0
41
+ 3. docker compose up --build
42
+ ```
@@ -0,0 +1,24 @@
1
+ MANIFEST.in
2
+ README.md
3
+ requirements.txt
4
+ setup.py
5
+ safentic/LICENSE.txt
6
+ safentic/__init__.py
7
+ safentic/config.py
8
+ safentic/engine.py
9
+ safentic/layer.py
10
+ safentic/policy.py
11
+ safentic.egg-info/PKG-INFO
12
+ safentic.egg-info/SOURCES.txt
13
+ safentic.egg-info/dependency_links.txt
14
+ safentic.egg-info/top_level.txt
15
+ safentic/helper/__init__.py
16
+ safentic/helper/auth.py
17
+ safentic/logger/__init__.py
18
+ safentic/logger/audit.py
19
+ safentic/policies/.gitkeep
20
+ safentic/policies/__init__.py
21
+ safentic/policies/example_policy.txt
22
+ safentic/policies/policy.yaml
23
+ safentic/verifiers/__init__.py
24
+ safentic/verifiers/sentence_verifier.py
@@ -0,0 +1,2 @@
1
+ safentic
2
+ tests
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,21 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="safentic",
5
+ version="1.0.0",
6
+ packages=find_packages(),
7
+ install_requires=[],
8
+ include_package_data=True,
9
+ license="Proprietary :: Safentic Commercial License", # Custom classifier
10
+ classifiers=[
11
+ "Programming Language :: Python :: 3",
12
+ "License :: Other/Proprietary License", # Indicates not open source
13
+ "Operating System :: OS Independent",
14
+ ],
15
+ author="Safentic",
16
+ author_email="contact@safentic.com",
17
+ description="Safentic SDK for behavior analysis",
18
+ long_description=open("README.md").read(),
19
+ long_description_content_type="text/markdown",
20
+ url="https://safentic.com",
21
+ )