sigil-protocol 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,35 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+
7
+ permissions:
8
+ contents: read
9
+ id-token: write # required for Trusted Publishing OIDC
10
+
11
+ jobs:
12
+ build-and-publish:
13
+ name: Build and publish to PyPI
14
+ runs-on: ubuntu-latest
15
+
16
+ environment:
17
+ name: pypi
18
+ url: https://pypi.org/project/sigil-protocol/
19
+
20
+ steps:
21
+ - uses: actions/checkout@v4
22
+
23
+ - name: Set up Python
24
+ uses: actions/setup-python@v5
25
+ with:
26
+ python-version: "3.11"
27
+
28
+ - name: Install build
29
+ run: python -m pip install build
30
+
31
+ - name: Build wheel and sdist
32
+ run: python -m build
33
+
34
+ - name: Publish to PyPI (Trusted Publishing)
35
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,208 @@
1
+ Metadata-Version: 2.4
2
+ Name: sigil-protocol
3
+ Version: 0.1.0
4
+ Summary: SIGIL security layer for AI agent tool calls — scans MCP tool arguments for leaked secrets, enforces policies, and writes audit logs
5
+ Project-URL: Homepage, https://sigil-protocol.org
6
+ Project-URL: Repository, https://github.com/sigil-eu/sigil
7
+ Project-URL: Documentation, https://sigil-protocol.org
8
+ Project-URL: Bug Tracker, https://github.com/sigil-eu/sigil/issues
9
+ License: MIT
10
+ Keywords: ai-agent,autogen,crewai,langchain,mcp,secrets-scanning,security,sigil
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
16
+ Classifier: Topic :: Security
17
+ Requires-Python: >=3.9
18
+ Requires-Dist: httpx>=0.25
19
+ Requires-Dist: pydantic>=2.0
20
+ Provides-Extra: all
21
+ Requires-Dist: crewai>=0.1; extra == 'all'
22
+ Requires-Dist: langchain-core>=0.1; extra == 'all'
23
+ Requires-Dist: mcp>=0.9; extra == 'all'
24
+ Requires-Dist: pyautogen>=0.2; extra == 'all'
25
+ Provides-Extra: autogen
26
+ Requires-Dist: pyautogen>=0.2; extra == 'autogen'
27
+ Provides-Extra: crewai
28
+ Requires-Dist: crewai>=0.1; extra == 'crewai'
29
+ Provides-Extra: langchain
30
+ Requires-Dist: langchain-core>=0.1; extra == 'langchain'
31
+ Provides-Extra: mcp
32
+ Requires-Dist: mcp>=0.9; extra == 'mcp'
33
+ Provides-Extra: openai
34
+ Requires-Dist: openai-agents>=0.1; extra == 'openai'
35
+ Description-Content-Type: text/markdown
36
+
37
+ # sigil-protocol
38
+
39
+ > 🔐 SIGIL security layer for AI agent tool calls — scans MCP tool arguments for leaked secrets, blocks dangerous operations, and writes audit logs.
40
+ > **MIT licensed.** Works with LangChain, CrewAI, AutoGen, mcp-agent, and OpenAI Agents SDK.
41
+
42
+ ```bash
43
+ pip install sigil-protocol
44
+ ```
45
+
46
+ ## 30-second start
47
+
48
+ ```python
49
+ from sigil_protocol import scan
50
+
51
+ result = scan('{"key": "AKIAIOSFODNN7EXAMPLE"}')
52
+ if result.blocked:
53
+ print(f"BLOCKED: {result.pattern} ({result.severity})")
54
+ # → BLOCKED: aws_access_key_id (Critical)
55
+ ```
56
+
57
+ ---
58
+
59
+ ## Framework Adapters
60
+
61
+ ### LangChain
62
+
63
+ ```bash
64
+ pip install 'sigil-protocol[langchain]'
65
+ ```
66
+
67
+ **Option A — Give the LLM an explicit scan tool:**
68
+
69
+ ```python
70
+ from sigil_protocol.langchain import SigilScanTool
71
+ from langchain.agents import initialize_agent
72
+
73
+ agent = initialize_agent(
74
+ tools=[SigilScanTool(), my_db_tool, my_api_tool],
75
+ llm=llm,
76
+ ...
77
+ )
78
+ # The LLM will call sigil_scan before passing data to any backend tool
79
+ ```
80
+
81
+ **Option B — Gate an existing tool transparently:**
82
+
83
+ ```python
84
+ from sigil_protocol.langchain import sigil_tool
85
+ from langchain_core.tools import BaseTool
86
+
87
+ @sigil_tool
88
+ class ExecuteSQLTool(BaseTool):
89
+ name = "execute_sql"
90
+ description = "Runs SQL queries"
91
+ def _run(self, query: str) -> str:
92
+ return db.execute(query)
93
+ # → Raises ValueError on Critical findings before _run is ever called
94
+ ```
95
+
96
+ ---
97
+
98
+ ### CrewAI
99
+
100
+ ```bash
101
+ pip install 'sigil-protocol[crewai]'
102
+ ```
103
+
104
+ ```python
105
+ from sigil_protocol.crewai import sigil_gate, SigilBaseTool
106
+ from crewai import Agent
107
+
108
+ # Explicit scan tool
109
+ agent = Agent(tools=[SigilBaseTool(), ...])
110
+
111
+ # Or gate any existing tool
112
+ @sigil_gate
113
+ class PaymentTool(BaseTool):
114
+ name: str = "initiate_payment"
115
+ ...
116
+ ```
117
+
118
+ ---
119
+
120
+ ### AutoGen
121
+
122
+ ```bash
123
+ pip install 'sigil-protocol[autogen]'
124
+ ```
125
+
126
+ ```python
127
+ from sigil_protocol.autogen import sigil_function
128
+
129
+ @user_proxy.register_for_execution()
130
+ @assistant.register_for_llm(description="Execute a shell command")
131
+ @sigil_function
132
+ def run_shell(cmd: str) -> str:
133
+ return subprocess.check_output(cmd, shell=True).decode()
134
+ # → Raises RuntimeError if cmd contains a leaked secret
135
+ ```
136
+
137
+ ---
138
+
139
+ ### mcp-agent (lastmile-ai)
140
+
141
+ ```bash
142
+ pip install 'sigil-protocol[mcp]'
143
+ ```
144
+
145
+ ```python
146
+ from sigil_protocol.mcp_agent import SigilMiddleware
147
+
148
+ async with app.run() as agent_app:
149
+ agent = Agent(name="my_agent", servers=["filesystem", "github"])
150
+ async with agent.activate() as active_agent:
151
+ llm = await active_agent.attach_llm(OpenAIAugmentedLLM)
152
+ llm.add_middleware(SigilMiddleware()) # ← scans args AND responses
153
+ ```
154
+
155
+ ---
156
+
157
+ ### OpenAI Agents SDK
158
+
159
+ ```bash
160
+ pip install 'sigil-protocol[openai]'
161
+ ```
162
+
163
+ ```python
164
+ from agents import Agent, Runner
165
+ from sigil_protocol.openai_agents import SigilGuardrail
166
+
167
+ agent = Agent(
168
+ name="secure_agent",
169
+ instructions="You are a helpful assistant.",
170
+ input_guardrails=[SigilGuardrail()],
171
+ )
172
+ result = await Runner.run(agent, user_input)
173
+ # → GuardrailTripwireTriggered if input contains leaked secrets
174
+ ```
175
+
176
+ ---
177
+
178
+ ## Pattern Coverage
179
+
180
+ Patterns are fetched from [registry.sigil-protocol.org](https://registry.sigil-protocol.org) (cached 5 min locally). Falls back to built-ins if offline.
181
+
182
+ | Category | Examples |
183
+ |---|---|
184
+ | Cloud credentials | AWS, GCP, Azure, OpenAI, GitHub, npm, Stripe |
185
+ | Cryptographic keys | RSA/EC private keys, SSH keys, JWT secrets |
186
+ | PII (EU GDPR) | IBAN, phone, email, SSN |
187
+ | Dangerous SQL | DROP TABLE, DELETE without WHERE, TRUNCATE |
188
+ | Prompt injection | Jailbreak openers, system prompt leaks |
189
+
190
+ ## Configuration
191
+
192
+ | Env variable | Default | Description |
193
+ |---|---|---|
194
+ | `SIGIL_REGISTRY_URL` | `https://registry.sigil-protocol.org` | Pattern registry endpoint |
195
+ | `SIGIL_BUNDLE_TTL` | `300` | Pattern cache TTL in seconds |
196
+ | `SIGIL_OFFLINE` | `false` | Use built-in patterns only |
197
+ | `SIGIL_MIN_SEVERITY` | `High` | Minimum severity to flag (`Warn`/`High`/`Critical`) |
198
+
199
+ ## License
200
+
201
+ **MIT** — this package. The SIGIL core Rust library is EUPL-1.2.
202
+
203
+ ## Links
204
+
205
+ - 🌐 [sigil-protocol.org](https://sigil-protocol.org)
206
+ - 📦 [PyPI: sigil-protocol](https://pypi.org/project/sigil-protocol/)
207
+ - 🗂 [registry.sigil-protocol.org](https://registry.sigil-protocol.org)
208
+ - 📄 [Protocol spec & Rust crate](https://github.com/sigil-eu/sigil)
@@ -0,0 +1,172 @@
1
+ # sigil-protocol
2
+
3
+ > 🔐 SIGIL security layer for AI agent tool calls — scans MCP tool arguments for leaked secrets, blocks dangerous operations, and writes audit logs.
4
+ > **MIT licensed.** Works with LangChain, CrewAI, AutoGen, mcp-agent, and OpenAI Agents SDK.
5
+
6
+ ```bash
7
+ pip install sigil-protocol
8
+ ```
9
+
10
+ ## 30-second start
11
+
12
+ ```python
13
+ from sigil_protocol import scan
14
+
15
+ result = scan('{"key": "AKIAIOSFODNN7EXAMPLE"}')
16
+ if result.blocked:
17
+ print(f"BLOCKED: {result.pattern} ({result.severity})")
18
+ # → BLOCKED: aws_access_key_id (Critical)
19
+ ```
20
+
21
+ ---
22
+
23
+ ## Framework Adapters
24
+
25
+ ### LangChain
26
+
27
+ ```bash
28
+ pip install 'sigil-protocol[langchain]'
29
+ ```
30
+
31
+ **Option A — Give the LLM an explicit scan tool:**
32
+
33
+ ```python
34
+ from sigil_protocol.langchain import SigilScanTool
35
+ from langchain.agents import initialize_agent
36
+
37
+ agent = initialize_agent(
38
+ tools=[SigilScanTool(), my_db_tool, my_api_tool],
39
+ llm=llm,
40
+ ...
41
+ )
42
+ # The LLM will call sigil_scan before passing data to any backend tool
43
+ ```
44
+
45
+ **Option B — Gate an existing tool transparently:**
46
+
47
+ ```python
48
+ from sigil_protocol.langchain import sigil_tool
49
+ from langchain_core.tools import BaseTool
50
+
51
+ @sigil_tool
52
+ class ExecuteSQLTool(BaseTool):
53
+ name = "execute_sql"
54
+ description = "Runs SQL queries"
55
+ def _run(self, query: str) -> str:
56
+ return db.execute(query)
57
+ # → Raises ValueError on Critical findings before _run is ever called
58
+ ```
59
+
60
+ ---
61
+
62
+ ### CrewAI
63
+
64
+ ```bash
65
+ pip install 'sigil-protocol[crewai]'
66
+ ```
67
+
68
+ ```python
69
+ from sigil_protocol.crewai import sigil_gate, SigilBaseTool
70
+ from crewai import Agent
71
+
72
+ # Explicit scan tool
73
+ agent = Agent(tools=[SigilBaseTool(), ...])
74
+
75
+ # Or gate any existing tool
76
+ @sigil_gate
77
+ class PaymentTool(BaseTool):
78
+ name: str = "initiate_payment"
79
+ ...
80
+ ```
81
+
82
+ ---
83
+
84
+ ### AutoGen
85
+
86
+ ```bash
87
+ pip install 'sigil-protocol[autogen]'
88
+ ```
89
+
90
+ ```python
91
+ from sigil_protocol.autogen import sigil_function
92
+
93
+ @user_proxy.register_for_execution()
94
+ @assistant.register_for_llm(description="Execute a shell command")
95
+ @sigil_function
96
+ def run_shell(cmd: str) -> str:
97
+ return subprocess.check_output(cmd, shell=True).decode()
98
+ # → Raises RuntimeError if cmd contains a leaked secret
99
+ ```
100
+
101
+ ---
102
+
103
+ ### mcp-agent (lastmile-ai)
104
+
105
+ ```bash
106
+ pip install 'sigil-protocol[mcp]'
107
+ ```
108
+
109
+ ```python
110
+ from sigil_protocol.mcp_agent import SigilMiddleware
111
+
112
+ async with app.run() as agent_app:
113
+ agent = Agent(name="my_agent", servers=["filesystem", "github"])
114
+ async with agent.activate() as active_agent:
115
+ llm = await active_agent.attach_llm(OpenAIAugmentedLLM)
116
+ llm.add_middleware(SigilMiddleware()) # ← scans args AND responses
117
+ ```
118
+
119
+ ---
120
+
121
+ ### OpenAI Agents SDK
122
+
123
+ ```bash
124
+ pip install 'sigil-protocol[openai]'
125
+ ```
126
+
127
+ ```python
128
+ from agents import Agent, Runner
129
+ from sigil_protocol.openai_agents import SigilGuardrail
130
+
131
+ agent = Agent(
132
+ name="secure_agent",
133
+ instructions="You are a helpful assistant.",
134
+ input_guardrails=[SigilGuardrail()],
135
+ )
136
+ result = await Runner.run(agent, user_input)
137
+ # → GuardrailTripwireTriggered if input contains leaked secrets
138
+ ```
139
+
140
+ ---
141
+
142
+ ## Pattern Coverage
143
+
144
+ Patterns are fetched from [registry.sigil-protocol.org](https://registry.sigil-protocol.org) (cached 5 min locally). Falls back to built-ins if offline.
145
+
146
+ | Category | Examples |
147
+ |---|---|
148
+ | Cloud credentials | AWS, GCP, Azure, OpenAI, GitHub, npm, Stripe |
149
+ | Cryptographic keys | RSA/EC private keys, SSH keys, JWT secrets |
150
+ | PII (EU GDPR) | IBAN, phone, email, SSN |
151
+ | Dangerous SQL | DROP TABLE, DELETE without WHERE, TRUNCATE |
152
+ | Prompt injection | Jailbreak openers, system prompt leaks |
153
+
154
+ ## Configuration
155
+
156
+ | Env variable | Default | Description |
157
+ |---|---|---|
158
+ | `SIGIL_REGISTRY_URL` | `https://registry.sigil-protocol.org` | Pattern registry endpoint |
159
+ | `SIGIL_BUNDLE_TTL` | `300` | Pattern cache TTL in seconds |
160
+ | `SIGIL_OFFLINE` | `false` | Use built-in patterns only |
161
+ | `SIGIL_MIN_SEVERITY` | `High` | Minimum severity to flag (`Warn`/`High`/`Critical`) |
162
+
163
+ ## License
164
+
165
+ **MIT** — this package. The SIGIL core Rust library is EUPL-1.2.
166
+
167
+ ## Links
168
+
169
+ - 🌐 [sigil-protocol.org](https://sigil-protocol.org)
170
+ - 📦 [PyPI: sigil-protocol](https://pypi.org/project/sigil-protocol/)
171
+ - 🗂 [registry.sigil-protocol.org](https://registry.sigil-protocol.org)
172
+ - 📄 [Protocol spec & Rust crate](https://github.com/sigil-eu/sigil)
@@ -0,0 +1,47 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "sigil-protocol"
7
+ version = "0.1.0"
8
+ description = "SIGIL security layer for AI agent tool calls — scans MCP tool arguments for leaked secrets, enforces policies, and writes audit logs"
9
+ readme = "README.md"
10
+ license = { text = "MIT" }
11
+ requires-python = ">=3.9"
12
+ keywords = ["sigil", "mcp", "security", "ai-agent", "langchain", "crewai", "autogen", "secrets-scanning"]
13
+ classifiers = [
14
+ "Development Status :: 4 - Beta",
15
+ "Intended Audience :: Developers",
16
+ "License :: OSI Approved :: MIT License",
17
+ "Programming Language :: Python :: 3",
18
+ "Topic :: Security",
19
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
20
+ ]
21
+
22
+ dependencies = [
23
+ "httpx>=0.25",
24
+ "pydantic>=2.0",
25
+ ]
26
+
27
+ [project.optional-dependencies]
28
+ langchain = ["langchain-core>=0.1"]
29
+ crewai = ["crewai>=0.1"]
30
+ autogen = ["pyautogen>=0.2"]
31
+ mcp = ["mcp>=0.9"]
32
+ openai = ["openai-agents>=0.1"]
33
+ all = [
34
+ "langchain-core>=0.1",
35
+ "crewai>=0.1",
36
+ "pyautogen>=0.2",
37
+ "mcp>=0.9",
38
+ ]
39
+
40
+ [project.urls]
41
+ Homepage = "https://sigil-protocol.org"
42
+ Repository = "https://github.com/sigil-eu/sigil"
43
+ Documentation = "https://sigil-protocol.org"
44
+ "Bug Tracker" = "https://github.com/sigil-eu/sigil/issues"
45
+
46
+ [tool.hatch.build.targets.wheel]
47
+ packages = ["sigil_protocol"]
@@ -0,0 +1,29 @@
1
+ """
2
+ sigil-protocol — SIGIL security layer for AI agent tool calls.
3
+
4
+ Quick start:
5
+ from sigil_protocol import scan, ScanResult
6
+
7
+ result = scan('{"key": "AKIAIOSFODNN7EXAMPLE"}')
8
+ if result.blocked:
9
+ raise ValueError(f"SIGIL blocked: {result.pattern} ({result.severity})")
10
+
11
+ Framework adapters:
12
+ from sigil_protocol.langchain import SigilScanTool, sigil_tool
13
+ from sigil_protocol.crewai import sigil_gate, SigilBaseTool
14
+ from sigil_protocol.autogen import sigil_function, SigilFunctionExecutor
15
+ from sigil_protocol.mcp_agent import SigilMiddleware
16
+ from sigil_protocol.openai_agents import SigilGuardrail
17
+ """
18
+
19
+ from .scanner import RemoteScanner, ScanResult, Severity, scan, scanner
20
+
21
+ __all__ = [
22
+ "RemoteScanner",
23
+ "ScanResult",
24
+ "Severity",
25
+ "scan",
26
+ "scanner",
27
+ ]
28
+
29
+ __version__ = "0.1.0"
@@ -0,0 +1,107 @@
1
+ """
2
+ SIGIL adapter for Microsoft AutoGen.
3
+
4
+ Provides:
5
+ - sigil_function — function decorator; wraps a @register_for_execution function
6
+ - SigilProxy — a ConversableAgent that intercepts all outgoing function calls
7
+
8
+ Usage:
9
+ from sigil_protocol.autogen import sigil_function, SigilProxy
10
+ import autogen
11
+
12
+ # Option 1: Per-function decorator
13
+ @user_proxy.register_for_execution()
14
+ @assistant.register_for_llm(description="Execute SQL query")
15
+ @sigil_function
16
+ def execute_sql(query: str) -> str:
17
+ return db.execute(query)
18
+
19
+ # Option 2: Proxy agent that gates ALL tool calls
20
+ sigil_proxy = SigilProxy(name="sigil_guard")
21
+ # Place sigil_proxy in your agent graph between LLM and executor
22
+ """
23
+
24
+ from __future__ import annotations
25
+
26
+ import json
27
+ import logging
28
+ from functools import wraps
29
+ from typing import Any, Callable
30
+
31
+ from .scanner import scanner
32
+
33
+ logger = logging.getLogger("sigil_protocol.autogen")
34
+
35
+
36
+ def sigil_function(fn: Callable) -> Callable:
37
+ """
38
+ Decorator for AutoGen @register_for_execution functions.
39
+ Scans all arguments before calling the wrapped function.
40
+ Raises RuntimeError on Critical-severity findings.
41
+
42
+ Example:
43
+ @user_proxy.register_for_execution()
44
+ @assistant.register_for_llm(description="Send email")
45
+ @sigil_function
46
+ def send_email(to: str, body: str) -> str:
47
+ ...
48
+ """
49
+ @wraps(fn)
50
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
51
+ payload = json.dumps({"args": list(args), "kwargs": kwargs})
52
+ result = scanner().scan(payload)
53
+ if result.blocked:
54
+ raise RuntimeError(
55
+ f"🔐 SIGIL blocked call to `{fn.__name__}`: "
56
+ f"leaked secret detected ({result.pattern}, severity={result.severity}). "
57
+ "Remove sensitive data from arguments and retry."
58
+ )
59
+ if result.warned:
60
+ logger.warning(
61
+ "SIGIL warning in call to `%s`: %s (%s)",
62
+ fn.__name__, result.pattern, result.severity,
63
+ )
64
+ return fn(*args, **kwargs)
65
+
66
+ return wrapper
67
+
68
+
69
+ class SigilProxy:
70
+ """
71
+ A lightweight AutoGen-compatible proxy that can be placed in an agent
72
+ graph to intercept all tool/function calls made by any upstream agent.
73
+
74
+ Use by wrapping a ConversableAgent's generate_tool_calls_reply or
75
+ by registering it as a middleware in custom agent graphs.
76
+
77
+ Minimal usage in a two-agent setup:
78
+ from autogen import AssistantAgent, UserProxyAgent
79
+ from sigil_protocol.autogen import SigilProxy, sigil_function
80
+
81
+ # Decorate individual functions (simpler)
82
+ @user_proxy.register_for_execution()
83
+ @assistant.register_for_llm(description="...")
84
+ @sigil_function
85
+ def my_tool(...): ...
86
+ """
87
+
88
+ def __call__(self, func_name: str, func_args: dict) -> dict | None:
89
+ """
90
+ Call gate. Returns None to allow the call, or a dict with
91
+ {"content": "SIGIL BLOCKED: ..."} to short-circuit with an error.
92
+ """
93
+ payload = json.dumps({"function": func_name, "args": func_args})
94
+ result = scanner().scan(payload)
95
+ if result.blocked:
96
+ msg = (
97
+ f"🔐 SIGIL blocked `{func_name}`: "
98
+ f"leaked secret ({result.pattern}, severity={result.severity}). "
99
+ "Retry without the sensitive data."
100
+ )
101
+ logger.error(msg)
102
+ return {"content": msg, "role": "tool"}
103
+ if result.warned:
104
+ logger.warning(
105
+ "SIGIL warning for `%s`: %s (%s)", func_name, result.pattern, result.severity
106
+ )
107
+ return None # proceed
@@ -0,0 +1,95 @@
1
+ """
2
+ SIGIL adapter for CrewAI.
3
+
4
+ Provides:
5
+ - sigil_gate — class decorator that wraps any CrewAI BaseTool with a gate
6
+ - SigilBaseTool — ready-to-use CrewAI tool for explicit LLM scans
7
+
8
+ Usage:
9
+ from sigil_protocol.crewai import sigil_gate, SigilBaseTool
10
+ from crewai_tools import BaseTool
11
+
12
+ # Option 1: Explicit scan tool
13
+ scan_tool = SigilBaseTool()
14
+ agent = Agent(tools=[scan_tool, my_other_tool])
15
+
16
+ # Option 2: Gate an existing tool
17
+ @sigil_gate
18
+ class MyApiTool(BaseTool):
19
+ name: str = "call_api"
20
+ description: str = "Calls the payment API"
21
+ def _run(self, endpoint: str, payload: str) -> str: ...
22
+ """
23
+
24
+ from __future__ import annotations
25
+
26
+ import json
27
+ from typing import Any, Optional, Type
28
+
29
+ from .scanner import scanner
30
+
31
+ try:
32
+ from crewai.tools import BaseTool
33
+ except ImportError:
34
+ try:
35
+ from crewai_tools import BaseTool # type: ignore[no-redef]
36
+ except ImportError as e:
37
+ raise ImportError(
38
+ "CrewAI adapter requires crewai. "
39
+ "Install with: pip install 'sigil-protocol[crewai]'"
40
+ ) from e
41
+
42
+
43
+ class SigilBaseTool(BaseTool):
44
+ """
45
+ CrewAI tool that scans a JSON payload for SIGIL security findings.
46
+ Add to any Agent's tool list. The LLM will call it before passing
47
+ sensitive data to any other tool.
48
+ """
49
+
50
+ name: str = "sigil_scan"
51
+ description: str = (
52
+ "Scans any JSON payload for leaked secrets (API keys, credentials, PII), "
53
+ "dangerous SQL operations, or prompt injection using SIGIL's 43+ verified patterns. "
54
+ "Call this BEFORE invoking any tool that processes external or user-supplied data. "
55
+ "If severity='Critical' is returned, abort the tool call and inform the user."
56
+ )
57
+
58
+ def _run(self, payload: str) -> str:
59
+ result = scanner().scan(payload)
60
+ if not result.hit:
61
+ return "SIGIL: clean — no findings."
62
+ status = "BLOCKED" if result.blocked else "WARNING"
63
+ findings = ", ".join(
64
+ f"{h.get('id') or h.get('pattern_name')} ({h.get('severity')})"
65
+ for h in result.all_hits
66
+ )
67
+ return f"SIGIL {status}: {findings}"
68
+
69
+
70
+ def sigil_gate(cls: Type[BaseTool]) -> Type[BaseTool]:
71
+ """
72
+ Class decorator for CrewAI BaseTool. Scans all _run() arguments
73
+ before executing. Raises on Critical-severity findings.
74
+
75
+ Example:
76
+ @sigil_gate
77
+ class DatabaseQueryTool(BaseTool):
78
+ name: str = "query_database"
79
+ ...
80
+ """
81
+ original_run = cls._run
82
+
83
+ def _guarded_run(self, *args: Any, **kwargs: Any) -> Any:
84
+ payload = json.dumps({"args": list(args), "kwargs": kwargs})
85
+ result = scanner().scan(payload)
86
+ if result.blocked:
87
+ return (
88
+ f"🔐 SIGIL BLOCKED: This call to `{self.name}` was blocked because "
89
+ f"a leaked secret was detected ({result.pattern}, severity={result.severity}). "
90
+ "Remove the sensitive data and retry."
91
+ )
92
+ return original_run(self, *args, **kwargs)
93
+
94
+ cls._run = _guarded_run
95
+ return cls
@@ -0,0 +1,117 @@
1
+ """
2
+ SIGIL adapter for LangChain.
3
+
4
+ Provides:
5
+ - SigilScanTool — a LangChain BaseTool that the LLM can call explicitly
6
+ - sigil_tool — decorator that wraps any BaseTool with a SIGIL pre-scan gate
7
+
8
+ Usage:
9
+ from sigil_protocol.langchain import SigilScanTool, sigil_tool
10
+ from langchain.agents import initialize_agent
11
+
12
+ # Option 1: Give the LLM an explicit scan tool
13
+ agent = initialize_agent(tools=[SigilScanTool(), my_db_tool, ...], ...)
14
+
15
+ # Option 2: Gate an existing tool
16
+ @sigil_tool
17
+ class MySensitiveTool(BaseTool):
18
+ name = "execute_sql"
19
+ ...
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ import json
25
+ from functools import wraps
26
+ from typing import Any, Optional, Type
27
+
28
+ from .scanner import ScanResult, Severity, scanner
29
+
30
+ try:
31
+ from langchain_core.tools import BaseTool
32
+ from pydantic import BaseModel, Field
33
+ except ImportError as e:
34
+ raise ImportError(
35
+ "LangChain adapter requires langchain-core. "
36
+ "Install with: pip install 'sigil-protocol[langchain]'"
37
+ ) from e
38
+
39
+
40
+ class _ScanInput(BaseModel):
41
+ payload: str = Field(description="JSON string or plain text to scan for secrets or dangerous content.")
42
+
43
+
44
+ class SigilScanTool(BaseTool):
45
+ """
46
+ LangChain tool that scans a payload for SIGIL security findings.
47
+ Register alongside your other tools — the LLM will call this before
48
+ sending sensitive data to any backend.
49
+ """
50
+
51
+ name: str = "sigil_scan"
52
+ description: str = (
53
+ "Scans a JSON payload or text for leaked secrets (API keys, credentials, "
54
+ "private keys, PII), dangerous SQL (DROP TABLE, DELETE without WHERE), or "
55
+ "prompt injection patterns using the SIGIL registry of 43+ verified patterns. "
56
+ "Call this BEFORE passing any user-supplied or sensitive data to a tool backend. "
57
+ "If the result contains severity='Critical', you MUST NOT proceed and MUST "
58
+ "inform the user immediately."
59
+ )
60
+ args_schema: Type[BaseModel] = _ScanInput
61
+
62
+ def _run(self, payload: str) -> str:
63
+ result = scanner().scan(payload)
64
+ if not result.hit:
65
+ return json.dumps({"status": "clean", "findings": []})
66
+ findings = [
67
+ {"pattern": h.get("id") or h.get("pattern_name"), "severity": h.get("severity"), "category": h.get("category")}
68
+ for h in result.all_hits
69
+ ]
70
+ status = "blocked" if result.blocked else "warn"
71
+ return json.dumps({"status": status, "findings": findings})
72
+
73
+ async def _arun(self, payload: str) -> str:
74
+ return self._run(payload)
75
+
76
+
77
+ def sigil_tool(cls: Type[BaseTool]) -> Type[BaseTool]:
78
+ """
79
+ Class decorator that wraps a LangChain BaseTool's _run/_arun methods
80
+ with a SIGIL pre-scan gate. Critical findings raise ValueError (blocking
81
+ the tool call). High findings log a warning but allow the call through.
82
+
83
+ Example:
84
+ @sigil_tool
85
+ class MyDatabaseTool(BaseTool):
86
+ name = "query_db"
87
+ ...
88
+ """
89
+ original_run = cls._run
90
+ original_arun = cls._arun if hasattr(cls, "_arun") else None
91
+
92
+ def _guarded_run(self, *args: Any, **kwargs: Any) -> Any:
93
+ payload = json.dumps({"args": args, "kwargs": kwargs})
94
+ result = scanner().scan(payload)
95
+ if result.blocked:
96
+ raise ValueError(
97
+ f"🔐 SIGIL blocked tool call to `{self.name}`: "
98
+ f"leaked secret detected ({result.pattern}, {result.severity}). "
99
+ "Remove sensitive data from the arguments and retry."
100
+ )
101
+ return original_run(self, *args, **kwargs)
102
+
103
+ async def _guarded_arun(self, *args: Any, **kwargs: Any) -> Any:
104
+ payload = json.dumps({"args": list(args), "kwargs": kwargs})
105
+ result = scanner().scan(payload)
106
+ if result.blocked:
107
+ raise ValueError(
108
+ f"🔐 SIGIL blocked tool call to `{self.name}`: "
109
+ f"leaked secret ({result.pattern}, {result.severity})."
110
+ )
111
+ if original_arun:
112
+ return await original_arun(self, *args, **kwargs)
113
+ return _guarded_run(self, *args, **kwargs)
114
+
115
+ cls._run = _guarded_run
116
+ cls._arun = _guarded_arun
117
+ return cls
@@ -0,0 +1,89 @@
1
+ """
2
+ SIGIL adapter for lastmile-ai/mcp-agent.
3
+
4
+ Provides SigilMiddleware — drop into MCPAgent's middleware list to scan
5
+ every outgoing tool call and return result for secret leaks before they
6
+ reach any MCP server backend.
7
+
8
+ Usage:
9
+ from mcp_agent.app import MCPApp
10
+ from mcp_agent.agents.agent import Agent
11
+ from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
12
+ from sigil_protocol.mcp_agent import SigilMiddleware
13
+
14
+ app = MCPApp(name="my_agent")
15
+
16
+ async with app.run() as agent_app:
17
+ agent = Agent(
18
+ name="my_agent",
19
+ instruction="You are a helpful assistant.",
20
+ servers=["filesystem", "github"],
21
+ )
22
+ async with agent.activate() as active_agent:
23
+ llm = await active_agent.attach_llm(OpenAIAugmentedLLM)
24
+ # Middleware scans all tool args before they leave the process
25
+ llm.add_middleware(SigilMiddleware())
26
+ result = await llm.generate_str("List my files")
27
+ """
28
+
29
+ from __future__ import annotations
30
+
31
+ import json
32
+ import logging
33
+ from typing import Any
34
+
35
+ from .scanner import scanner
36
+
37
+ logger = logging.getLogger("sigil_protocol.mcp_agent")
38
+
39
+
40
+ class SigilMiddleware:
41
+ """
42
+ mcp-agent middleware that scans tool call arguments with SIGIL before
43
+ they are sent to any MCP server. Blocks Critical findings.
44
+
45
+ Attach via: llm.add_middleware(SigilMiddleware())
46
+ Or: app = MCPApp(..., middleware=[SigilMiddleware()])
47
+ """
48
+
49
+ async def on_tool_call(
50
+ self,
51
+ tool_name: str,
52
+ tool_args: dict[str, Any],
53
+ next_handler,
54
+ ) -> Any:
55
+ payload = json.dumps({"tool": tool_name, "args": tool_args})
56
+ result = scanner().scan(payload)
57
+
58
+ if result.blocked:
59
+ msg = (
60
+ f"🔐 SIGIL blocked `{tool_name}`: "
61
+ f"leaked secret detected ({result.pattern}, severity={result.severity}). "
62
+ "Remove the sensitive data from the arguments and retry."
63
+ )
64
+ logger.error(msg)
65
+ # Return an error content block compatible with mcp-agent's response format
66
+ return {"content": [{"type": "text", "text": msg}], "isError": True}
67
+
68
+ if result.warned:
69
+ logger.warning(
70
+ "SIGIL warning for `%s`: %s (%s)", tool_name, result.pattern, result.severity
71
+ )
72
+
73
+ return await next_handler(tool_name, tool_args)
74
+
75
+ async def on_tool_result(
76
+ self,
77
+ tool_name: str,
78
+ result_content: Any,
79
+ next_handler,
80
+ ) -> Any:
81
+ """Also scan tool *responses* for accidentally returned credentials."""
82
+ payload = json.dumps(result_content) if not isinstance(result_content, str) else result_content
83
+ scan_result = scanner().scan(payload)
84
+ if scan_result.hit:
85
+ logger.warning(
86
+ "SIGIL: secret in response from `%s`: %s (%s) — logged.",
87
+ tool_name, scan_result.pattern, scan_result.severity,
88
+ )
89
+ return await next_handler(tool_name, result_content)
@@ -0,0 +1,101 @@
1
+ """
2
+ SIGIL adapter for the OpenAI Agents SDK (openai-agents).
3
+
4
+ Provides SigilGuardrail — an InputGuardrail that can be applied to any
5
+ OpenAI Agent to scan tool arguments (and optionally response content)
6
+ for leaked secrets before they are processed.
7
+
8
+ Usage:
9
+ from agents import Agent, Runner
10
+ from sigil_protocol.openai_agents import SigilGuardrail
11
+
12
+ agent = Agent(
13
+ name="my_agent",
14
+ instructions="You are a helpful assistant.",
15
+ input_guardrails=[SigilGuardrail()],
16
+ )
17
+ result = await Runner.run(agent, "Send this API key to the webhook: sk-abc123...")
18
+ # → Guardrail trips, run is exited with a GuardrailTripwireTriggered exception
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ import json
24
+ import logging
25
+ from typing import Any
26
+
27
+ from .scanner import Severity, scanner
28
+
29
+ logger = logging.getLogger("sigil_protocol.openai_agents")
30
+
31
+ try:
32
+ from agents import ( # type: ignore[import]
33
+ Agent,
34
+ GuardrailFunctionOutput,
35
+ InputGuardrail,
36
+ RunContextWrapper,
37
+ TResponseInputItem,
38
+ )
39
+ _HAS_SDK = True
40
+ except ImportError:
41
+ _HAS_SDK = False
42
+
43
+
44
+ if _HAS_SDK:
45
+ from pydantic import BaseModel
46
+
47
+ class _SigilOutput(BaseModel):
48
+ blocked: bool
49
+ reason: str | None = None
50
+
51
+ class SigilGuardrail(InputGuardrail):
52
+ """
53
+ OpenAI Agents SDK InputGuardrail backed by SIGIL's remote scanner.
54
+ Trips on Critical-severity findings and warns on High.
55
+ """
56
+
57
+ name: str = "sigil_guardrail"
58
+
59
+ async def run(
60
+ self,
61
+ ctx: RunContextWrapper,
62
+ agent: Agent,
63
+ input: str | list[TResponseInputItem],
64
+ ) -> GuardrailFunctionOutput:
65
+ text = input if isinstance(input, str) else json.dumps(input)
66
+ result = scanner().scan(text)
67
+
68
+ if result.blocked:
69
+ reason = (
70
+ f"🔐 SIGIL: Leaked secret detected ({result.pattern}, "
71
+ f"severity={result.severity}). Input rejected."
72
+ )
73
+ logger.error(reason)
74
+ return GuardrailFunctionOutput(
75
+ output_info=_SigilOutput(blocked=True, reason=reason),
76
+ tripwire_triggered=True,
77
+ )
78
+
79
+ if result.warned:
80
+ logger.warning(
81
+ "SIGIL warning: %s (%s) in agent input", result.pattern, result.severity
82
+ )
83
+
84
+ return GuardrailFunctionOutput(
85
+ output_info=_SigilOutput(blocked=False),
86
+ tripwire_triggered=False,
87
+ )
88
+
89
+ else:
90
+ # Stub when openai-agents is not installed
91
+ class SigilGuardrail: # type: ignore[no-redef]
92
+ """
93
+ Stub — install openai-agents to use this adapter:
94
+ pip install 'sigil-protocol[openai]'
95
+ """
96
+
97
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
98
+ raise ImportError(
99
+ "OpenAI Agents adapter requires openai-agents. "
100
+ "Install with: pip install 'sigil-protocol[openai]'"
101
+ )
@@ -0,0 +1,162 @@
1
+ """
2
+ Core scanner — fetches the SIGIL pattern bundle from the public registry
3
+ and scans arbitrary text/JSON for security findings.
4
+
5
+ Uses no EUPL code — this file is MIT-licensed and calls the registry HTTP API.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import json
11
+ import os
12
+ import re
13
+ import time
14
+ from dataclasses import dataclass, field
15
+ from enum import Enum
16
+ from typing import Optional
17
+
18
+ import httpx
19
+
20
+ REGISTRY_URL = os.getenv(
21
+ "SIGIL_REGISTRY_URL", "https://registry.sigil-protocol.org"
22
+ )
23
+ BUNDLE_TTL = int(os.getenv("SIGIL_BUNDLE_TTL", "300")) # seconds
24
+ OFFLINE = os.getenv("SIGIL_OFFLINE", "").lower() in ("1", "true", "yes")
25
+ MIN_SEVERITY = os.getenv("SIGIL_MIN_SEVERITY", "High") # Warn|High|Critical
26
+
27
+ # ── Built-in fallback patterns (subset) ──────────────────────────────────────
28
+ _BUILTIN_PATTERNS = [
29
+ {"id": "aws_access_key_id", "severity": "Critical", "regex": r"AKIA[0-9A-Z]{16}"},
30
+ {"id": "openai_api_key", "severity": "Critical", "regex": r"sk-[a-zA-Z0-9]{32,}"},
31
+ {"id": "github_pat", "severity": "Critical", "regex": r"gh[ps]_[a-zA-Z0-9]{36}"},
32
+ {"id": "rsa_private_key", "severity": "Critical", "regex": r"-----BEGIN RSA PRIVATE KEY-----"},
33
+ {"id": "generic_secret", "severity": "High", "regex": r"(?i)(secret|password|passwd|api_key)\s*[:=]\s*['\"]?[A-Za-z0-9+/]{16,}"},
34
+ {"id": "sql_drop_table", "severity": "Critical", "regex": r"(?i)DROP\s+TABLE\s+\w+"},
35
+ {"id": "sql_delete_no_where", "severity": "High", "regex": r"(?i)DELETE\s+FROM\s+\w+\s*(?!WHERE)"},
36
+ {"id": "sql_truncate", "severity": "High", "regex": r"(?i)TRUNCATE\s+(TABLE\s+)?\w+"},
37
+ {"id": "prompt_injection", "severity": "High", "regex": r"(?i)(ignore previous instructions|you are now|act as|jailbreak)"},
38
+ ]
39
+
40
+
41
+ class Severity(str, Enum):
42
+ Warn = "Warn"
43
+ High = "High"
44
+ Critical = "Critical"
45
+
46
+ @classmethod
47
+ def _order(cls) -> dict[str, int]:
48
+ return {"Warn": 0, "High": 1, "Critical": 2}
49
+
50
+ def __ge__(self, other: "Severity") -> bool:
51
+ return self._order()[self.value] >= self._order()[other.value]
52
+
53
+
54
+ @dataclass
55
+ class ScanResult:
56
+ hit: bool
57
+ pattern: Optional[str] = None
58
+ severity: Optional[Severity] = None
59
+ category: Optional[str] = None
60
+ all_hits: list[dict] = field(default_factory=list)
61
+
62
+ @property
63
+ def blocked(self) -> bool:
64
+ return self.hit and self.severity == Severity.Critical
65
+
66
+ @property
67
+ def warned(self) -> bool:
68
+ return self.hit and self.severity in (Severity.High, Severity.Warn)
69
+
70
+ def __bool__(self) -> bool:
71
+ return self.hit
72
+
73
+
74
+ class RemoteScanner:
75
+ """
76
+ Fetches the SIGIL pattern bundle from registry.sigil-protocol.org and
77
+ compiles regexes locally. Patterns are cached for SIGIL_BUNDLE_TTL seconds
78
+ (default 5 minutes). Falls back to built-ins if the registry is unreachable.
79
+ """
80
+
81
+ def __init__(self) -> None:
82
+ self._patterns: list[dict] = []
83
+ self._compiled: list[tuple[re.Pattern, dict]] = []
84
+ self._fetched_at: float = 0.0
85
+ self._min_sev = Severity(MIN_SEVERITY)
86
+
87
+ def _needs_refresh(self) -> bool:
88
+ return time.monotonic() - self._fetched_at > BUNDLE_TTL
89
+
90
+ def _load(self) -> None:
91
+ if OFFLINE:
92
+ self._patterns = _BUILTIN_PATTERNS
93
+ else:
94
+ try:
95
+ resp = httpx.get(
96
+ f"{REGISTRY_URL}/patterns/bundle",
97
+ timeout=5.0,
98
+ headers={"Accept": "application/json"},
99
+ )
100
+ resp.raise_for_status()
101
+ data = resp.json()
102
+ self._patterns = data if isinstance(data, list) else data.get("patterns", [])
103
+ except Exception:
104
+ if not self._patterns:
105
+ self._patterns = _BUILTIN_PATTERNS
106
+
107
+ self._compiled = []
108
+ for p in self._patterns:
109
+ try:
110
+ self._compiled.append((re.compile(p["regex"]), p))
111
+ except re.error:
112
+ pass
113
+ self._fetched_at = time.monotonic()
114
+
115
+ def scan(self, text: str) -> ScanResult:
116
+ """Scan text for security findings. Returns the highest-severity hit."""
117
+ if self._needs_refresh():
118
+ self._load()
119
+
120
+ if isinstance(text, (dict, list)):
121
+ text = json.dumps(text)
122
+
123
+ hits = []
124
+ for pattern, meta in self._compiled:
125
+ if pattern.search(text):
126
+ sev = Severity(meta.get("severity", "Warn"))
127
+ if sev >= self._min_sev:
128
+ hits.append({**meta, "severity_enum": sev})
129
+
130
+ if not hits:
131
+ return ScanResult(hit=False)
132
+
133
+ # Return the highest-severity hit as the primary
134
+ hits.sort(key=lambda h: Severity._order()[h["severity_enum"].value], reverse=True)
135
+ top = hits[0]
136
+ return ScanResult(
137
+ hit=True,
138
+ pattern=top.get("id") or top.get("pattern_name"),
139
+ severity=top["severity_enum"],
140
+ category=top.get("category"),
141
+ all_hits=hits,
142
+ )
143
+
144
+ def scan_json(self, obj) -> ScanResult:
145
+ return self.scan(json.dumps(obj))
146
+
147
+
148
+ # Module-level default scanner instance (lazy-loaded)
149
+ _default_scanner: Optional[RemoteScanner] = None
150
+
151
+
152
+ def scanner() -> RemoteScanner:
153
+ """Return the module-level default scanner, creating it if necessary."""
154
+ global _default_scanner
155
+ if _default_scanner is None:
156
+ _default_scanner = RemoteScanner()
157
+ return _default_scanner
158
+
159
+
160
+ def scan(text: str) -> ScanResult:
161
+ """Convenience function — scan text using the default scanner."""
162
+ return scanner().scan(text)
@@ -0,0 +1,114 @@
1
+ """Tests for sigil-protocol core scanner."""
2
+
3
+ import json
4
+ import pytest
5
+ from unittest.mock import patch, MagicMock
6
+
7
+ from sigil_protocol.scanner import RemoteScanner, ScanResult, Severity, scan
8
+
9
+
10
+ # ── Fixtures ──────────────────────────────────────────────────────────────────
11
+
12
+ @pytest.fixture
13
+ def offline_scanner(monkeypatch):
14
+ """Scanner forced to use built-in patterns (no HTTP)."""
15
+ monkeypatch.setenv("SIGIL_OFFLINE", "true")
16
+ s = RemoteScanner()
17
+ s._needs_refresh = lambda: True # force reload
18
+ return s
19
+
20
+
21
+ # ── Core scanner ──────────────────────────────────────────────────────────────
22
+
23
+ def test_clean_payload(offline_scanner):
24
+ result = offline_scanner.scan('{"query": "SELECT name FROM users WHERE id=1"}')
25
+ assert not result.hit
26
+ assert not result.blocked
27
+ assert result.severity is None
28
+
29
+
30
+ def test_aws_key_detected(offline_scanner):
31
+ result = offline_scanner.scan("AKIAIOSFODNN7EXAMPLE")
32
+ assert result.hit
33
+ assert result.pattern == "aws_access_key_id"
34
+ assert result.severity == Severity.Critical
35
+ assert result.blocked
36
+
37
+
38
+ def test_openai_key_detected(offline_scanner):
39
+ result = offline_scanner.scan("Authorization: Bearer sk-abc123def456ghi789jkl012mno345pqr678")
40
+ assert result.hit
41
+ assert result.severity == Severity.Critical
42
+
43
+
44
+ def test_sql_drop_detected(offline_scanner):
45
+ result = offline_scanner.scan("DROP TABLE users")
46
+ assert result.hit
47
+ assert result.blocked
48
+
49
+
50
+ def test_sql_delete_no_where(offline_scanner):
51
+ result = offline_scanner.scan("DELETE FROM accounts")
52
+ assert result.hit
53
+
54
+
55
+ def test_json_input(offline_scanner):
56
+ payload = {"db": "prod", "query": "DROP TABLE payments"}
57
+ result = offline_scanner.scan(json.dumps(payload))
58
+ assert result.hit
59
+
60
+
61
+ def test_dict_input_coerced(offline_scanner):
62
+ """scan() should accept dict and coerce to JSON string."""
63
+ result = offline_scanner.scan(json.dumps({"key": "AKIAIOSFODNN7EXAMPLE"}))
64
+ assert result.hit
65
+
66
+
67
+ def test_multiple_hits_highest_severity_returned(offline_scanner):
68
+ """When multiple patterns hit, the most severe should be primary."""
69
+ payload = json.dumps({
70
+ "key": "AKIAIOSFODNN7EXAMPLE",
71
+ "q": "DROP TABLE payments",
72
+ })
73
+ result = offline_scanner.scan(payload)
74
+ assert result.severity == Severity.Critical
75
+ assert len(result.all_hits) >= 2
76
+
77
+
78
+ def test_scan_result_bool_false():
79
+ r = ScanResult(hit=False)
80
+ assert not r
81
+ assert not r.blocked
82
+ assert not r.warned
83
+
84
+
85
+ def test_scan_result_bool_true():
86
+ r = ScanResult(hit=True, severity=Severity.Critical, pattern="aws_access_key_id")
87
+ assert r
88
+ assert r.blocked
89
+ assert not r.warned
90
+
91
+
92
+ def test_scan_result_high_severity():
93
+ r = ScanResult(hit=True, severity=Severity.High, pattern="generic_secret")
94
+ assert r.warned
95
+ assert not r.blocked
96
+
97
+
98
+ # ── Module-level convenience function ─────────────────────────────────────────
99
+
100
+ def test_module_scan_clean(monkeypatch):
101
+ monkeypatch.setenv("SIGIL_OFFLINE", "true")
102
+ from sigil_protocol.scanner import RemoteScanner, scan as module_scan
103
+ s = RemoteScanner()
104
+ result = s.scan("hello world — nothing sensitive here at all")
105
+ assert not result.hit
106
+ assert result.severity is None
107
+
108
+
109
+ # ── Severity ordering ─────────────────────────────────────────────────────────
110
+
111
+ def test_severity_ordering():
112
+ assert Severity.Critical >= Severity.High
113
+ assert Severity.High >= Severity.Warn
114
+ assert not (Severity.Warn >= Severity.Critical)