open-edison 0.1.10__py3-none-any.whl → 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ *,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: ;--tw-contain-size: ;--tw-contain-layout: ;--tw-contain-paint: ;--tw-contain-style: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: ;--tw-contain-size: ;--tw-contain-layout: ;--tw-contain-paint: ;--tw-contain-style: }*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html,:host{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";font-feature-settings:normal;font-variation-settings:normal;-webkit-tap-highlight-color:transparent}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-feature-settings:normal;font-variation-settings:normal;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;letter-spacing:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,input:where([type=button]),input:where([type=reset]),input:where([type=submit]){-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]:where(:not([hidden=until-found])){display:none}.\!container{width:100%!important}.container{width:100%}@media (min-width: 640px){.\!container{max-width:640px!important}.container{max-width:640px}}@media (min-width: 768px){.\!container{max-width:768px!important}.container{max-width:768px}}@media (min-width: 1024px){.\!container{max-width:1024px!important}.container{max-width:1024px}}@media (min-width: 1280px){.\!container{max-width:1280px!important}.container{max-width:1280px}}@media (min-width: 1536px){.\!container{max-width:1536px!important}.container{max-width:1536px}}.relative{position:relative}.m-0{margin:0}.mx-auto{margin-left:auto;margin-right:auto}.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-3{margin-bottom:.75rem}.ml-2{margin-left:.5rem}.mr-2{margin-right:.5rem}.mt-0\.5{margin-top:.125rem}.mt-1{margin-top:.25rem}.mt-2{margin-top:.5rem}.mt-3{margin-top:.75rem}.mt-4{margin-top:1rem}.inline-block{display:inline-block}.flex{display:flex}.inline-flex{display:inline-flex}.table{display:table}.grid{display:grid}.hidden{display:none}.h-2{height:.5rem}.h-5{height:1.25rem}.h-6{height:1.5rem}.h-\[580px\]{height:580px}.w-10{width:2.5rem}.w-2{width:.5rem}.w-4{width:1rem}.w-5{width:1.25rem}.w-full{width:100%}.min-w-\[240px\]{min-width:240px}.max-w-\[1400px\]{max-width:1400px}.max-w-\[260px\]{max-width:260px}.border-collapse{border-collapse:collapse}.translate-x-1{--tw-translate-x: .25rem;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.translate-x-5{--tw-translate-x: 1.25rem;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.transform{transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.cursor-pointer{cursor:pointer}.select-none{-webkit-user-select:none;-moz-user-select:none;user-select:none}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.flex-col{flex-direction:column}.flex-wrap{flex-wrap:wrap}.items-start{align-items:flex-start}.items-end{align-items:flex-end}.items-center{align-items:center}.justify-between{justify-content:space-between}.gap-1{gap:.25rem}.gap-2{gap:.5rem}.gap-3{gap:.75rem}.space-y-1>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.25rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.25rem * var(--tw-space-y-reverse))}.space-y-2>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.5rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.5rem * var(--tw-space-y-reverse))}.space-y-3>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.75rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.75rem * var(--tw-space-y-reverse))}.space-y-4>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(1rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(1rem * var(--tw-space-y-reverse))}.overflow-hidden{overflow:hidden}.overflow-y-auto{overflow-y:auto}.truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.rounded{border-radius:.25rem}.rounded-full{border-radius:9999px}.rounded-t{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.border{border-width:1px}.border-b{border-bottom-width:1px}.border-r{border-right-width:1px}.border-amber-400\/30{border-color:#fbbf244d}.border-app-accent{border-color:var(--accent)}.border-app-border{border-color:var(--border)}.border-blue-400\/30{border-color:#60a5fa4d}.border-rose-400\/30{border-color:#fb71854d}.bg-amber-400{--tw-bg-opacity: 1;background-color:rgb(251 191 36 / var(--tw-bg-opacity, 1))}.bg-app-accent{background-color:var(--accent)}.bg-app-bg{background-color:var(--bg)}.bg-app-border{background-color:var(--border)}.bg-blue-400{--tw-bg-opacity: 1;background-color:rgb(96 165 250 / var(--tw-bg-opacity, 1))}.bg-blue-500{--tw-bg-opacity: 1;background-color:rgb(59 130 246 / var(--tw-bg-opacity, 1))}.bg-rose-400{--tw-bg-opacity: 1;background-color:rgb(251 113 133 / var(--tw-bg-opacity, 1))}.bg-white{--tw-bg-opacity: 1;background-color:rgb(255 255 255 / var(--tw-bg-opacity, 1))}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-6{padding:1.5rem}.\!px-3{padding-left:.75rem!important;padding-right:.75rem!important}.\!py-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.px-2{padding-left:.5rem;padding-right:.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.py-0\.5{padding-top:.125rem;padding-bottom:.125rem}.py-1{padding-top:.25rem;padding-bottom:.25rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.py-3{padding-top:.75rem;padding-bottom:.75rem}.pb-2{padding-bottom:.5rem}.text-left{text-align:left}.text-center{text-align:center}.align-bottom{vertical-align:bottom}.font-mono{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace}.text-2xl{font-size:1.5rem;line-height:2rem}.text-\[10px\]{font-size:10px}.text-sm{font-size:.875rem;line-height:1.25rem}.text-xl{font-size:1.25rem;line-height:1.75rem}.text-xs{font-size:.75rem;line-height:1rem}.font-bold{font-weight:700}.font-medium{font-weight:500}.font-semibold{font-weight:600}.text-amber-400{--tw-text-opacity: 1;color:rgb(251 191 36 / var(--tw-text-opacity, 1))}.text-app-accent{color:var(--accent)}.text-app-muted{color:var(--muted)}.text-app-text{color:var(--text)}.text-blue-400{--tw-text-opacity: 1;color:rgb(96 165 250 / var(--tw-text-opacity, 1))}.text-green-400{--tw-text-opacity: 1;color:rgb(74 222 128 / var(--tw-text-opacity, 1))}.text-rose-400{--tw-text-opacity: 1;color:rgb(251 113 133 / var(--tw-text-opacity, 1))}.accent-blue-500{accent-color:#3b82f6}.shadow{--tw-shadow: 0 1px 3px 0 rgb(0 0 0 / .1), 0 1px 2px -1px rgb(0 0 0 / .1);--tw-shadow-colored: 0 1px 3px 0 var(--tw-shadow-color), 0 1px 2px -1px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.filter{filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.transition-colors{transition-property:color,background-color,border-color,text-decoration-color,fill,stroke;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.transition-transform{transition-property:transform;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}:root{--bg: #0b0c10;--card: #111318;--border: #1f2430;--text: #e6e6e6;--muted: #a0a7b4;--accent: #7c3aed;--success: #10b981;--warning: #f59e0b;--danger: #ef4444}[data-theme=dark]{--bg: #0b0c10;--card: #111318;--border: #1f2430;--text: #e6e6e6;--muted: #a0a7b4}[data-theme=light]{--bg: #f8fafc;--card: #ffffff;--border: #e5e7eb;--text: #0f172a;--muted: #475569}@media (prefers-color-scheme: light){:root{--bg: #f8fafc;--card: #ffffff;--border: #e5e7eb;--text: #0f172a;--muted: #475569}}html,body,#root{height:100%}body{margin:0;background:var(--bg);color:var(--text)}.container{margin:0 auto;padding:24px;max-width:1100px}.grid{display:grid;grid-template-columns:repeat(auto-fit,minmax(280px,1fr));gap:16px}.card{background:var(--card);border:1px solid var(--border);border-radius:12px;padding:16px;box-shadow:0 1px 2px #0000000a,0 2px 12px #00000014}.stat{display:flex;align-items:center;gap:12px}.badge{display:inline-block;font-size:12px;padding:2px 8px;border-radius:999px;border:1px solid var(--border);background:#7c3aed14;color:var(--text)}.table{width:100%;border-collapse:collapse}.table th,.table td{border-bottom:1px solid var(--border);padding:8px 4px;text-align:left}.muted{color:var(--muted)}.accent{color:var(--accent)}.success{color:var(--success)}.warning{color:var(--warning)}.danger{color:var(--danger)}.toolbar{display:flex;align-items:center;justify-content:space-between;gap:12px}.button{border:1px solid var(--border);background:var(--card);color:var(--text);padding:6px 10px;border-radius:8px;cursor:pointer}.button:hover{filter:brightness(1.05)}.focus\:outline-none:focus{outline:2px solid transparent;outline-offset:2px}.focus-visible\:ring-2:focus-visible{--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}.focus-visible\:ring-blue-400:focus-visible{--tw-ring-opacity: 1;--tw-ring-color: rgb(96 165 250 / var(--tw-ring-opacity, 1))}@media (min-width: 640px){.sm\:flex{display:flex}.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:flex-row{flex-direction:row}.sm\:items-end{align-items:flex-end}}@media (min-width: 1024px){.lg\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.lg\:grid-cols-\[220px_1fr\]{grid-template-columns:220px 1fr}}
@@ -10,8 +10,8 @@
10
10
  const prefersLight = window.matchMedia && window.matchMedia('(prefers-color-scheme: light)').matches;
11
11
  document.documentElement.setAttribute('data-theme', prefersLight ? 'light' : 'dark');
12
12
  </script>
13
- <script type="module" crossorigin src="/assets/index-CKkid2y-.js"></script>
14
- <link rel="stylesheet" crossorigin href="/assets/index-CRxojymD.css">
13
+ <script type="module" crossorigin src="/assets/index-_NTxjOfh.js"></script>
14
+ <link rel="stylesheet" crossorigin href="/assets/index-h6k8aL6h.css">
15
15
  </head>
16
16
 
17
17
  <body>
@@ -14,23 +14,86 @@ names (with server-name/path prefixes) to their security classifications:
14
14
 
15
15
  import json
16
16
  from dataclasses import dataclass
17
- from functools import lru_cache
17
+ from functools import cache
18
18
  from pathlib import Path
19
19
  from typing import Any
20
20
 
21
21
  from loguru import logger as log
22
22
 
23
23
  from src.config import ConfigError
24
+ from src.telemetry import (
25
+ record_private_data_access,
26
+ record_prompt_access_blocked,
27
+ record_resource_access_blocked,
28
+ record_tool_call_blocked,
29
+ record_untrusted_public_data,
30
+ record_write_operation,
31
+ )
32
+
33
+ ACL_RANK: dict[str, int] = {"PUBLIC": 0, "PRIVATE": 1, "SECRET": 2}
34
+
35
+ # Default flat permissions applied when fields are missing in config
36
+ DEFAULT_PERMISSIONS: dict[str, Any] = {
37
+ "enabled": False,
38
+ "write_operation": False,
39
+ "read_private_data": False,
40
+ "read_untrusted_public_data": False,
41
+ "acl": "PUBLIC",
42
+ }
43
+
44
+
45
+ def _normalize_acl(value: Any, *, default: str = "PUBLIC") -> str:
46
+ """Normalize ACL string, defaulting and uppercasing; validate against known values."""
47
+ try:
48
+ if value is None:
49
+ return default
50
+ acl = str(value).upper().strip()
51
+ if acl not in ACL_RANK:
52
+ # Fallback to default if invalid
53
+ return default
54
+ return acl
55
+ except Exception:
56
+ return default
57
+
58
+
59
+ def _apply_permission_defaults(config_perms: dict[str, Any]) -> dict[str, Any]:
60
+ """Merge provided config flags with DEFAULT_PERMISSIONS, including ACL derivation."""
61
+ # Start from defaults
62
+ merged: dict[str, Any] = dict(DEFAULT_PERMISSIONS)
63
+ # Booleans
64
+ enabled = bool(config_perms.get("enabled", merged["enabled"]))
65
+ write_operation = bool(config_perms.get("write_operation", merged["write_operation"]))
66
+ read_private_data = bool(config_perms.get("read_private_data", merged["read_private_data"]))
67
+ read_untrusted_public_data = bool(
68
+ config_perms.get("read_untrusted_public_data", merged["read_untrusted_public_data"]) # type: ignore[reportUnknownArgumentType]
69
+ )
24
70
 
71
+ # ACL: explicit value wins; otherwise default PRIVATE if read_private_data True, else default
72
+ if "acl" in config_perms and config_perms.get("acl") is not None:
73
+ acl = _normalize_acl(config_perms.get("acl"), default=str(merged["acl"]))
74
+ else:
75
+ acl = _normalize_acl("PRIVATE" if read_private_data else str(merged["acl"]))
76
+
77
+ merged.update(
78
+ {
79
+ "enabled": enabled,
80
+ "write_operation": write_operation,
81
+ "read_private_data": read_private_data,
82
+ "read_untrusted_public_data": read_untrusted_public_data,
83
+ "acl": acl,
84
+ }
85
+ )
86
+ return merged
25
87
 
26
- def _flat_permissions_loader(config_path: Path) -> dict[str, dict[str, bool]]:
88
+
89
+ def _flat_permissions_loader(config_path: Path) -> dict[str, dict[str, Any]]:
27
90
  if config_path.exists():
28
91
  with open(config_path) as f:
29
92
  data: dict[str, Any] = json.load(f)
30
93
 
31
94
  # Handle new format: server -> {tool -> permissions}
32
95
  # Convert to flat tool -> permissions format
33
- flat_permissions: dict[str, dict[str, bool]] = {}
96
+ flat_permissions: dict[str, dict[str, Any]] = {}
34
97
  tool_to_server: dict[str, str] = {}
35
98
  server_tools: dict[str, set[str]] = {}
36
99
 
@@ -77,14 +140,7 @@ def _flat_permissions_loader(config_path: Path) -> dict[str, dict[str, bool]]:
77
140
 
78
141
  # Convert to flat format with explicit type casting
79
142
  tool_perms_dict: dict[str, Any] = tool_permissions # type: ignore
80
- flat_permissions[tool_name] = {
81
- "enabled": bool(tool_perms_dict.get("enabled", True)),
82
- "write_operation": bool(tool_perms_dict.get("write_operation", False)),
83
- "read_private_data": bool(tool_perms_dict.get("read_private_data", False)),
84
- "read_untrusted_public_data": bool(
85
- tool_perms_dict.get("read_untrusted_public_data", False)
86
- ),
87
- }
143
+ flat_permissions[tool_name] = _apply_permission_defaults(tool_perms_dict)
88
144
 
89
145
  log.debug(
90
146
  f"Loaded {len(flat_permissions)} tool permissions from {len(server_tools)} servers in {config_path}"
@@ -100,8 +156,8 @@ def _flat_permissions_loader(config_path: Path) -> dict[str, dict[str, bool]]:
100
156
  return {}
101
157
 
102
158
 
103
- @lru_cache(maxsize=1)
104
- def _load_tool_permissions_cached() -> dict[str, dict[str, bool]]:
159
+ @cache
160
+ def _load_tool_permissions_cached() -> dict[str, dict[str, Any]]:
105
161
  """Load tool permissions from JSON configuration file with LRU caching."""
106
162
  config_path = Path(__file__).parent.parent.parent / "tool_permissions.json"
107
163
 
@@ -115,8 +171,8 @@ def _load_tool_permissions_cached() -> dict[str, dict[str, bool]]:
115
171
  return {}
116
172
 
117
173
 
118
- @lru_cache(maxsize=1)
119
- def _load_resource_permissions_cached() -> dict[str, dict[str, bool]]:
174
+ @cache
175
+ def _load_resource_permissions_cached() -> dict[str, dict[str, Any]]:
120
176
  """Load resource permissions from JSON configuration file with LRU caching."""
121
177
  config_path = Path(__file__).parent.parent.parent / "resource_permissions.json"
122
178
 
@@ -130,8 +186,8 @@ def _load_resource_permissions_cached() -> dict[str, dict[str, bool]]:
130
186
  return {}
131
187
 
132
188
 
133
- @lru_cache(maxsize=1)
134
- def _load_prompt_permissions_cached() -> dict[str, dict[str, bool]]:
189
+ @cache
190
+ def _load_prompt_permissions_cached() -> dict[str, dict[str, Any]]:
135
191
  """Load prompt permissions from JSON configuration file with LRU caching."""
136
192
  config_path = Path(__file__).parent.parent.parent / "prompt_permissions.json"
137
193
 
@@ -145,55 +201,55 @@ def _load_prompt_permissions_cached() -> dict[str, dict[str, bool]]:
145
201
  return {}
146
202
 
147
203
 
148
- @lru_cache(maxsize=128)
149
- def _classify_tool_permissions_cached(tool_name: str) -> dict[str, bool]:
204
+ @cache
205
+ def _classify_tool_permissions_cached(tool_name: str) -> dict[str, Any]:
150
206
  """Classify tool permissions with LRU caching."""
151
207
  return _classify_permissions_cached(tool_name, _load_tool_permissions_cached(), "tool")
152
208
 
153
209
 
154
- @lru_cache(maxsize=128)
155
- def _classify_resource_permissions_cached(resource_name: str) -> dict[str, bool]:
210
+ @cache
211
+ def _classify_resource_permissions_cached(resource_name: str) -> dict[str, Any]:
156
212
  """Classify resource permissions with LRU caching."""
157
213
  return _classify_permissions_cached(
158
214
  resource_name, _load_resource_permissions_cached(), "resource"
159
215
  )
160
216
 
161
217
 
162
- @lru_cache(maxsize=128)
163
- def _classify_prompt_permissions_cached(prompt_name: str) -> dict[str, bool]:
218
+ @cache
219
+ def _classify_prompt_permissions_cached(prompt_name: str) -> dict[str, Any]:
164
220
  """Classify prompt permissions with LRU caching."""
165
221
  return _classify_permissions_cached(prompt_name, _load_prompt_permissions_cached(), "prompt")
166
222
 
167
223
 
168
- def _get_builtin_tool_permissions(name: str) -> dict[str, bool] | None:
224
+ def _get_builtin_tool_permissions(name: str) -> dict[str, Any] | None:
169
225
  """Get permissions for built-in safe tools."""
170
226
  builtin_safe_tools = ["echo", "get_server_info", "get_security_status"]
171
227
  if name in builtin_safe_tools:
172
- permissions = {
173
- "enabled": True,
174
- "write_operation": False,
175
- "read_private_data": False,
176
- "read_untrusted_public_data": False,
177
- }
228
+ permissions = _apply_permission_defaults({"enabled": True})
178
229
  log.debug(f"Built-in safe tool {name}: {permissions}")
179
230
  return permissions
180
231
  return None
181
232
 
182
233
 
183
234
  def _get_exact_match_permissions(
184
- name: str, permissions_config: dict[str, dict[str, bool]], type_name: str
185
- ) -> dict[str, bool] | None:
235
+ name: str, permissions_config: dict[str, dict[str, Any]], type_name: str
236
+ ) -> dict[str, Any] | None:
186
237
  """Check for exact match permissions."""
187
238
  if name in permissions_config and not name.startswith("_"):
188
239
  config_perms = permissions_config[name]
189
- permissions = {
190
- "enabled": config_perms.get("enabled", False),
191
- "write_operation": config_perms.get("write_operation", False),
192
- "read_private_data": config_perms.get("read_private_data", False),
193
- "read_untrusted_public_data": config_perms.get("read_untrusted_public_data", False),
194
- }
240
+ permissions = _apply_permission_defaults(config_perms)
195
241
  log.debug(f"Found exact match for {type_name} {name}: {permissions}")
196
242
  return permissions
243
+ # Fallback: support names like "server_tool" by checking the part after first underscore
244
+ if "_" in name:
245
+ suffix = name.split("_", 1)[1]
246
+ if suffix in permissions_config and not suffix.startswith("_"):
247
+ config_perms = permissions_config[suffix]
248
+ permissions = _apply_permission_defaults(config_perms)
249
+ log.debug(
250
+ f"Found fallback match for {type_name} {name} using suffix {suffix}: {permissions}"
251
+ )
252
+ return permissions
197
253
  return None
198
254
 
199
255
 
@@ -224,8 +280,8 @@ def _get_wildcard_patterns(name: str, type_name: str) -> list[str]:
224
280
 
225
281
 
226
282
  def _classify_permissions_cached(
227
- name: str, permissions_config: dict[str, dict[str, bool]], type_name: str
228
- ) -> dict[str, bool]:
283
+ name: str, permissions_config: dict[str, dict[str, Any]], type_name: str
284
+ ) -> dict[str, Any]:
229
285
  """Generic permission classification with pattern matching support."""
230
286
  # Built-in safe tools that don't need external config (only for tools)
231
287
  if type_name == "tool":
@@ -243,12 +299,7 @@ def _classify_permissions_cached(
243
299
  for pattern in wildcard_patterns:
244
300
  if pattern in permissions_config:
245
301
  config_perms = permissions_config[pattern]
246
- permissions = {
247
- "enabled": config_perms.get("enabled", False),
248
- "write_operation": config_perms.get("write_operation", False),
249
- "read_private_data": config_perms.get("read_private_data", False),
250
- "read_untrusted_public_data": config_perms.get("read_untrusted_public_data", False),
251
- }
302
+ permissions = _apply_permission_defaults(config_perms)
252
303
  log.debug(f"Found wildcard match for {type_name} {name} using {pattern}: {permissions}")
253
304
  return permissions
254
305
 
@@ -277,6 +328,8 @@ class DataAccessTracker:
277
328
  has_private_data_access: bool = False
278
329
  has_untrusted_content_exposure: bool = False
279
330
  has_external_communication: bool = False
331
+ # ACL tracking: the most restrictive ACL encountered during this session via reads
332
+ highest_acl_level: str = "PUBLIC"
280
333
 
281
334
  def is_trifecta_achieved(self) -> bool:
282
335
  """Check if the lethal trifecta has been achieved."""
@@ -286,31 +339,31 @@ class DataAccessTracker:
286
339
  and self.has_external_communication
287
340
  )
288
341
 
289
- def _load_tool_permissions(self) -> dict[str, dict[str, bool]]:
342
+ def _load_tool_permissions(self) -> dict[str, dict[str, Any]]:
290
343
  """Load tool permissions from JSON configuration file with caching."""
291
344
  return _load_tool_permissions_cached()
292
345
 
293
- def _load_resource_permissions(self) -> dict[str, dict[str, bool]]:
346
+ def _load_resource_permissions(self) -> dict[str, dict[str, Any]]:
294
347
  """Load resource permissions from JSON configuration file with caching."""
295
348
  return _load_resource_permissions_cached()
296
349
 
297
- def _load_prompt_permissions(self) -> dict[str, dict[str, bool]]:
350
+ def _load_prompt_permissions(self) -> dict[str, dict[str, Any]]:
298
351
  """Load prompt permissions from JSON configuration file with caching."""
299
352
  return _load_prompt_permissions_cached()
300
353
 
301
- def _classify_by_tool_name(self, tool_name: str) -> dict[str, bool]:
354
+ def _classify_by_tool_name(self, tool_name: str) -> dict[str, Any]:
302
355
  """Classify permissions based on external JSON configuration only."""
303
356
  return _classify_tool_permissions_cached(tool_name)
304
357
 
305
- def _classify_by_resource_name(self, resource_name: str) -> dict[str, bool]:
358
+ def _classify_by_resource_name(self, resource_name: str) -> dict[str, Any]:
306
359
  """Classify resource permissions based on external JSON configuration only."""
307
360
  return _classify_resource_permissions_cached(resource_name)
308
361
 
309
- def _classify_by_prompt_name(self, prompt_name: str) -> dict[str, bool]:
362
+ def _classify_by_prompt_name(self, prompt_name: str) -> dict[str, Any]:
310
363
  """Classify prompt permissions based on external JSON configuration only."""
311
364
  return _classify_prompt_permissions_cached(prompt_name)
312
365
 
313
- def _classify_tool_permissions(self, tool_name: str) -> dict[str, bool]:
366
+ def _classify_tool_permissions(self, tool_name: str) -> dict[str, Any]:
314
367
  """
315
368
  Classify tool permissions based on tool name.
316
369
 
@@ -323,7 +376,7 @@ class DataAccessTracker:
323
376
  log.debug(f"Classified tool {tool_name}: {permissions}")
324
377
  return permissions
325
378
 
326
- def _classify_resource_permissions(self, resource_name: str) -> dict[str, bool]:
379
+ def _classify_resource_permissions(self, resource_name: str) -> dict[str, Any]:
327
380
  """
328
381
  Classify resource permissions based on resource name.
329
382
 
@@ -336,7 +389,7 @@ class DataAccessTracker:
336
389
  log.debug(f"Classified resource {resource_name}: {permissions}")
337
390
  return permissions
338
391
 
339
- def _classify_prompt_permissions(self, prompt_name: str) -> dict[str, bool]:
392
+ def _classify_prompt_permissions(self, prompt_name: str) -> dict[str, Any]:
340
393
  """
341
394
  Classify prompt permissions based on prompt name.
342
395
 
@@ -349,35 +402,90 @@ class DataAccessTracker:
349
402
  log.debug(f"Classified prompt {prompt_name}: {permissions}")
350
403
  return permissions
351
404
 
352
- def get_tool_permissions(self, tool_name: str) -> dict[str, bool]:
405
+ def get_tool_permissions(self, tool_name: str) -> dict[str, Any]:
353
406
  """Get tool permissions based on tool name."""
354
407
  return self._classify_tool_permissions(tool_name)
355
408
 
356
- def get_resource_permissions(self, resource_name: str) -> dict[str, bool]:
409
+ def get_resource_permissions(self, resource_name: str) -> dict[str, Any]:
357
410
  """Get resource permissions based on resource name."""
358
411
  return self._classify_resource_permissions(resource_name)
359
412
 
360
- def get_prompt_permissions(self, prompt_name: str) -> dict[str, bool]:
413
+ def get_prompt_permissions(self, prompt_name: str) -> dict[str, Any]:
361
414
  """Get prompt permissions based on prompt name."""
362
415
  return self._classify_prompt_permissions(prompt_name)
363
416
 
364
- def add_tool_call(self, tool_name: str) -> str:
417
+ def _would_call_complete_trifecta(self, permissions: dict[str, Any]) -> bool:
418
+ """Return True if applying these permissions would complete the trifecta."""
419
+ would_private = self.has_private_data_access or bool(permissions.get("read_private_data"))
420
+ would_untrusted = self.has_untrusted_content_exposure or bool(
421
+ permissions.get("read_untrusted_public_data")
422
+ )
423
+ would_write = self.has_external_communication or bool(permissions.get("write_operation"))
424
+ return bool(would_private and would_untrusted and would_write)
425
+
426
+ def _enforce_tool_enabled(self, permissions: dict[str, Any], tool_name: str) -> None:
427
+ if permissions["enabled"] is False:
428
+ log.warning(f"🚫 BLOCKING tool call {tool_name} - tool is disabled")
429
+ record_tool_call_blocked(tool_name, "disabled")
430
+ raise SecurityError(f"'{tool_name}' / Tool disabled")
431
+
432
+ def _enforce_acl_downgrade_block(
433
+ self, tool_acl: str, permissions: dict[str, Any], tool_name: str
434
+ ) -> None:
435
+ if permissions["write_operation"]:
436
+ current_rank = ACL_RANK.get(self.highest_acl_level, 0)
437
+ write_rank = ACL_RANK.get(tool_acl, 0)
438
+ if write_rank < current_rank:
439
+ log.error(
440
+ f"🚫 BLOCKING tool call {tool_name} - write to lower ACL ({tool_acl}) while session has higher ACL {self.highest_acl_level}"
441
+ )
442
+ record_tool_call_blocked(tool_name, "acl_downgrade")
443
+ raise SecurityError(f"'{tool_name}' / ACL (level={self.highest_acl_level})")
444
+
445
+ def _apply_permissions_effects(
446
+ self,
447
+ permissions: dict[str, Any],
448
+ *,
449
+ source_type: str,
450
+ name: str,
451
+ ) -> None:
452
+ """Apply side effects (flags, ACL, telemetry) for any source type."""
453
+ acl_value: str = _normalize_acl(permissions.get("acl"), default="PUBLIC")
454
+ if permissions["read_private_data"]:
455
+ self.has_private_data_access = True
456
+ log.info(f"🔒 Private data access detected via {source_type}: {name}")
457
+ record_private_data_access(source_type, name)
458
+ # Update highest ACL based on ACL when reading private data
459
+ current_rank = ACL_RANK.get(self.highest_acl_level, 0)
460
+ new_rank = ACL_RANK.get(acl_value, 0)
461
+ if new_rank > current_rank:
462
+ self.highest_acl_level = acl_value
463
+
464
+ if permissions["read_untrusted_public_data"]:
465
+ self.has_untrusted_content_exposure = True
466
+ log.info(f"🌐 Untrusted content exposure detected via {source_type}: {name}")
467
+ record_untrusted_public_data(source_type, name)
468
+
469
+ if permissions["write_operation"]:
470
+ self.has_external_communication = True
471
+ log.info(f"✍️ Write operation detected via {source_type}: {name}")
472
+ record_write_operation(source_type, name)
473
+
474
+ def add_tool_call(self, tool_name: str):
365
475
  """
366
476
  Add a tool call and update trifecta flags based on tool classification.
367
477
 
368
478
  Args:
369
479
  tool_name: Name of the tool being called
370
480
 
371
- Returns:
372
- Placeholder ID for compatibility
373
-
374
481
  Raises:
375
482
  SecurityError: If the lethal trifecta is already achieved and this call would be blocked
376
483
  """
377
484
  # Check if trifecta is already achieved before processing this call
378
485
  if self.is_trifecta_achieved():
379
- log.error(f"🚫 BLOCKING tool call {tool_name} - lethal trifecta already achieved")
380
- raise SecurityError(f"Tool call '{tool_name}' blocked: lethal trifecta achieved")
486
+ log.error(f"🚫 BLOCKING tool call {tool_name} - lethal trifecta achieved")
487
+ record_tool_call_blocked(tool_name, "trifecta")
488
+ raise SecurityError(f"'{tool_name}' / Lethal trifecta")
381
489
 
382
490
  # Get tool permissions and update trifecta flags
383
491
  permissions = self._classify_tool_permissions(tool_name)
@@ -385,38 +493,30 @@ class DataAccessTracker:
385
493
  log.debug(f"add_tool_call: Tool permissions: {permissions}")
386
494
 
387
495
  # Check if tool is enabled
388
- if permissions["enabled"] is False:
389
- log.warning(f"🚫 BLOCKING tool call {tool_name} - tool is disabled")
390
- raise SecurityError(f"Tool call '{tool_name}' blocked: tool is disabled")
496
+ self._enforce_tool_enabled(permissions, tool_name)
391
497
 
392
- if permissions["read_private_data"]:
393
- self.has_private_data_access = True
394
- log.info(f"🔒 Private data access detected: {tool_name}")
498
+ # ACL-based write downgrade prevention
499
+ tool_acl: str = _normalize_acl(permissions.get("acl"), default="PUBLIC")
500
+ self._enforce_acl_downgrade_block(tool_acl, permissions, tool_name)
395
501
 
396
- if permissions["read_untrusted_public_data"]:
397
- self.has_untrusted_content_exposure = True
398
- log.info(f"🌐 Untrusted content exposure detected: {tool_name}")
502
+ # Pre-check: would this call achieve the lethal trifecta? If so, block immediately
503
+ if self._would_call_complete_trifecta(permissions):
504
+ log.error(f"🚫 BLOCKING tool call {tool_name} - would achieve lethal trifecta")
505
+ record_tool_call_blocked(tool_name, "trifecta_prevent")
506
+ raise SecurityError(f"'{tool_name}' / Lethal trifecta")
399
507
 
400
- if permissions["write_operation"]:
401
- self.has_external_communication = True
402
- log.info(f"✍️ Write operation detected: {tool_name}")
403
-
404
- # Log if trifecta is achieved after this call
405
- if self.is_trifecta_achieved():
406
- log.warning(f"⚠️ LETHAL TRIFECTA ACHIEVED after tool call: {tool_name}")
508
+ self._apply_permissions_effects(permissions, source_type="tool", name=tool_name)
407
509
 
408
- return "placeholder_id"
510
+ # We proactively prevent trifecta; by design we should never reach a state where
511
+ # a completed call newly achieves trifecta.
409
512
 
410
- def add_resource_access(self, resource_name: str) -> str:
513
+ def add_resource_access(self, resource_name: str):
411
514
  """
412
515
  Add a resource access and update trifecta flags based on resource classification.
413
516
 
414
517
  Args:
415
518
  resource_name: Name/URI of the resource being accessed
416
519
 
417
- Returns:
418
- Placeholder ID for compatibility
419
-
420
520
  Raises:
421
521
  SecurityError: If the lethal trifecta is already achieved and this access would be blocked
422
522
  """
@@ -425,69 +525,52 @@ class DataAccessTracker:
425
525
  log.error(
426
526
  f"🚫 BLOCKING resource access {resource_name} - lethal trifecta already achieved"
427
527
  )
428
- raise SecurityError(
429
- f"Resource access '{resource_name}' blocked: lethal trifecta achieved"
430
- )
528
+ raise SecurityError(f"'{resource_name}' / Lethal trifecta")
431
529
 
432
530
  # Get resource permissions and update trifecta flags
433
531
  permissions = self._classify_resource_permissions(resource_name)
434
532
 
435
- if permissions["read_private_data"]:
436
- self.has_private_data_access = True
437
- log.info(f"🔒 Private data access detected via resource: {resource_name}")
438
-
439
- if permissions["read_untrusted_public_data"]:
440
- self.has_untrusted_content_exposure = True
441
- log.info(f"🌐 Untrusted content exposure detected via resource: {resource_name}")
442
-
443
- if permissions["write_operation"]:
444
- self.has_external_communication = True
445
- log.info(f"✍️ Write operation detected via resource: {resource_name}")
533
+ # Pre-check: would this access achieve the lethal trifecta? If so, block immediately
534
+ if self._would_call_complete_trifecta(permissions):
535
+ log.error(
536
+ f"🚫 BLOCKING resource access {resource_name} - would achieve lethal trifecta"
537
+ )
538
+ record_resource_access_blocked(resource_name, "trifecta_prevent")
539
+ raise SecurityError(f"'{resource_name}' / Lethal trifecta")
446
540
 
447
- # Log if trifecta is achieved after this access
448
- if self.is_trifecta_achieved():
449
- log.warning(f"⚠️ LETHAL TRIFECTA ACHIEVED after resource access: {resource_name}")
541
+ self._apply_permissions_effects(permissions, source_type="resource", name=resource_name)
450
542
 
451
- return "placeholder_id"
543
+ # We proactively prevent trifecta; by design we should never reach a state where
544
+ # a completed access newly achieves trifecta.
452
545
 
453
- def add_prompt_access(self, prompt_name: str) -> str:
546
+ def add_prompt_access(self, prompt_name: str):
454
547
  """
455
548
  Add a prompt access and update trifecta flags based on prompt classification.
456
549
 
457
550
  Args:
458
551
  prompt_name: Name/type of the prompt being accessed
459
552
 
460
- Returns:
461
- Placeholder ID for compatibility
462
-
463
553
  Raises:
464
554
  SecurityError: If the lethal trifecta is already achieved and this access would be blocked
465
555
  """
466
556
  # Check if trifecta is already achieved before processing this access
467
557
  if self.is_trifecta_achieved():
468
558
  log.error(f"🚫 BLOCKING prompt access {prompt_name} - lethal trifecta already achieved")
469
- raise SecurityError(f"Prompt access '{prompt_name}' blocked: lethal trifecta achieved")
559
+ raise SecurityError(f"'{prompt_name}' / Lethal trifecta")
470
560
 
471
561
  # Get prompt permissions and update trifecta flags
472
562
  permissions = self._classify_prompt_permissions(prompt_name)
473
563
 
474
- if permissions["read_private_data"]:
475
- self.has_private_data_access = True
476
- log.info(f"🔒 Private data access detected via prompt: {prompt_name}")
477
-
478
- if permissions["read_untrusted_public_data"]:
479
- self.has_untrusted_content_exposure = True
480
- log.info(f"🌐 Untrusted content exposure detected via prompt: {prompt_name}")
481
-
482
- if permissions["write_operation"]:
483
- self.has_external_communication = True
484
- log.info(f"✍️ Write operation detected via prompt: {prompt_name}")
564
+ # Pre-check: would this access achieve the lethal trifecta? If so, block immediately
565
+ if self._would_call_complete_trifecta(permissions):
566
+ log.error(f"🚫 BLOCKING prompt access {prompt_name} - would achieve lethal trifecta")
567
+ record_prompt_access_blocked(prompt_name, "trifecta_prevent")
568
+ raise SecurityError(f"'{prompt_name}' / Lethal trifecta")
485
569
 
486
- # Log if trifecta is achieved after this access
487
- if self.is_trifecta_achieved():
488
- log.warning(f"⚠️ LETHAL TRIFECTA ACHIEVED after prompt access: {prompt_name}")
570
+ self._apply_permissions_effects(permissions, source_type="prompt", name=prompt_name)
489
571
 
490
- return "placeholder_id"
572
+ # We proactively prevent trifecta; by design we should never reach a state where
573
+ # a completed access newly achieves trifecta.
491
574
 
492
575
  def to_dict(self) -> dict[str, Any]:
493
576
  """
@@ -503,8 +586,26 @@ class DataAccessTracker:
503
586
  "has_external_communication": self.has_external_communication,
504
587
  "trifecta_achieved": self.is_trifecta_achieved(),
505
588
  },
589
+ "acl": {
590
+ "highest_acl_level": self.highest_acl_level,
591
+ },
506
592
  }
507
593
 
508
594
 
509
595
  class SecurityError(Exception):
510
596
  """Raised when a security policy violation occurs."""
597
+
598
+ def __init__(self, message: str):
599
+ """We format with a brick ascii wall"""
600
+ message = f"""
601
+ ████ ████ ████ ████ ████ ████
602
+ ██ ████ ████ ████ ████ ████ █
603
+ ████ ████ ████ ████ ████ ████
604
+ BLOCKED BY EDISON
605
+ {message:^30}
606
+ ████ ████ ████ ████ ████ ████
607
+ ██ ████ ████ ████ ████ ████ █
608
+ ████ ████ ████ ████ ████ ████
609
+ {message}
610
+ """
611
+ super().__init__(message)