open-edison 0.1.19__py3-none-any.whl → 0.1.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,15 +12,20 @@ names (with server-name/path prefixes) to their security classifications:
12
12
  - prompt_permissions.json: Prompt security classifications
13
13
  """
14
14
 
15
- import json
16
15
  from dataclasses import dataclass
17
- from functools import cache
18
- from pathlib import Path
19
16
  from typing import Any
20
17
 
21
18
  from loguru import logger as log
22
19
 
23
- from src.config import ConfigError
20
+ from src import events
21
+ from src.permissions import (
22
+ ACL_RANK,
23
+ Permissions,
24
+ PromptPermission,
25
+ ResourcePermission,
26
+ ToolPermission,
27
+ normalize_acl,
28
+ )
24
29
  from src.telemetry import (
25
30
  record_private_data_access,
26
31
  record_prompt_access_blocked,
@@ -30,314 +35,6 @@ from src.telemetry import (
30
35
  record_write_operation,
31
36
  )
32
37
 
33
- ACL_RANK: dict[str, int] = {"PUBLIC": 0, "PRIVATE": 1, "SECRET": 2}
34
-
35
- # Default flat permissions applied when fields are missing in config
36
- DEFAULT_PERMISSIONS: dict[str, Any] = {
37
- "enabled": False,
38
- "write_operation": False,
39
- "read_private_data": False,
40
- "read_untrusted_public_data": False,
41
- "acl": "PUBLIC",
42
- }
43
-
44
-
45
- def _normalize_acl(value: Any, *, default: str = "PUBLIC") -> str:
46
- """Normalize ACL string, defaulting and uppercasing; validate against known values."""
47
- try:
48
- if value is None:
49
- return default
50
- acl = str(value).upper().strip()
51
- if acl not in ACL_RANK:
52
- # Fallback to default if invalid
53
- return default
54
- return acl
55
- except Exception:
56
- return default
57
-
58
-
59
- def _apply_permission_defaults(config_perms: dict[str, Any]) -> dict[str, Any]:
60
- """Merge provided config flags with DEFAULT_PERMISSIONS, including ACL derivation."""
61
- # Start from defaults
62
- merged: dict[str, Any] = dict(DEFAULT_PERMISSIONS)
63
- # Booleans
64
- enabled = bool(config_perms.get("enabled", merged["enabled"]))
65
- write_operation = bool(config_perms.get("write_operation", merged["write_operation"]))
66
- read_private_data = bool(config_perms.get("read_private_data", merged["read_private_data"]))
67
- read_untrusted_public_data = bool(
68
- config_perms.get("read_untrusted_public_data", merged["read_untrusted_public_data"]) # type: ignore[reportUnknownArgumentType]
69
- )
70
-
71
- # ACL: explicit value wins; otherwise default PRIVATE if read_private_data True, else default
72
- if "acl" in config_perms and config_perms.get("acl") is not None:
73
- acl = _normalize_acl(config_perms.get("acl"), default=str(merged["acl"]))
74
- else:
75
- acl = _normalize_acl("PRIVATE" if read_private_data else str(merged["acl"]))
76
-
77
- merged.update(
78
- {
79
- "enabled": enabled,
80
- "write_operation": write_operation,
81
- "read_private_data": read_private_data,
82
- "read_untrusted_public_data": read_untrusted_public_data,
83
- "acl": acl,
84
- }
85
- )
86
- return merged
87
-
88
-
89
- def _flat_permissions_loader(config_path: Path) -> dict[str, dict[str, Any]]:
90
- if config_path.exists():
91
- with open(config_path) as f:
92
- data: dict[str, Any] = json.load(f)
93
-
94
- # Handle new format: server -> {tool -> permissions}
95
- # Convert to flat tool -> permissions format
96
- flat_permissions: dict[str, dict[str, Any]] = {}
97
- tool_to_server: dict[str, str] = {}
98
- server_tools: dict[str, set[str]] = {}
99
-
100
- for server_name, server_data in data.items():
101
- if not isinstance(server_data, dict):
102
- log.warning(
103
- f"Invalid server data for {server_name}: expected dict, got {type(server_data)}"
104
- )
105
- continue
106
-
107
- if server_name == "_metadata":
108
- flat_permissions["_metadata"] = server_data
109
- continue
110
-
111
- server_tools[server_name] = set()
112
-
113
- for tool_name, tool_permissions in server_data.items(): # type: ignore
114
- if not isinstance(tool_permissions, dict):
115
- log.warning(
116
- f"Invalid tool permissions for {server_name}/{tool_name}: expected dict, got {type(tool_permissions)}" # type: ignore
117
- ) # type: ignore
118
- continue
119
-
120
- # Check for duplicates within the same server
121
- if tool_name in server_tools[server_name]:
122
- log.error(f"Duplicate tool '{tool_name}' found in server '{server_name}'")
123
- raise ConfigError(
124
- f"Duplicate tool '{tool_name}' found in server '{server_name}'"
125
- )
126
-
127
- # Check for duplicates across different servers
128
- if tool_name in tool_to_server:
129
- existing_server = tool_to_server[tool_name]
130
- log.error(
131
- f"Duplicate tool '{tool_name}' found in servers '{existing_server}' and '{server_name}'"
132
- )
133
- raise ConfigError(
134
- f"Duplicate tool '{tool_name}' found in servers '{existing_server}' and '{server_name}'"
135
- )
136
-
137
- # Add to tracking maps
138
- tool_to_server[tool_name] = server_name
139
- server_tools[server_name].add(tool_name) # type: ignore
140
-
141
- # Convert to flat format with explicit type casting
142
- tool_perms_dict: dict[str, Any] = tool_permissions # type: ignore
143
- flat_permissions[tool_name] = _apply_permission_defaults(tool_perms_dict)
144
-
145
- log.debug(
146
- f"Loaded {len(flat_permissions)} tool permissions from {len(server_tools)} servers in {config_path}"
147
- )
148
- # Convert sets to lists for JSON serialization
149
- server_tools_serializable = {
150
- server: list(tools) for server, tools in server_tools.items()
151
- }
152
- log.debug(f"Server tools: {json.dumps(server_tools_serializable, indent=2)}")
153
- return flat_permissions
154
- else:
155
- log.warning(f"Tool permissions file not found at {config_path}")
156
- return {}
157
-
158
-
159
- @cache
160
- def _load_tool_permissions_cached() -> dict[str, dict[str, Any]]:
161
- """Load tool permissions from JSON configuration file with LRU caching."""
162
- config_path = Path(__file__).parent.parent.parent / "tool_permissions.json"
163
-
164
- try:
165
- return _flat_permissions_loader(config_path)
166
- except ConfigError as e:
167
- log.error(f"Failed to load tool permissions from {config_path}: {e}")
168
- raise e
169
- except Exception as e:
170
- log.error(f"Failed to load tool permissions from {config_path}: {e}")
171
- return {}
172
-
173
-
174
- def clear_tool_permissions_cache() -> None:
175
- """Clear the tool permissions cache to force reload from file."""
176
- _load_tool_permissions_cached.cache_clear()
177
- log.info("Tool permissions cache cleared")
178
-
179
-
180
- @cache
181
- def _load_resource_permissions_cached() -> dict[str, dict[str, Any]]:
182
- """Load resource permissions from JSON configuration file with LRU caching."""
183
- config_path = Path(__file__).parent.parent.parent / "resource_permissions.json"
184
-
185
- try:
186
- return _flat_permissions_loader(config_path)
187
- except ConfigError as e:
188
- log.error(f"Failed to load resource permissions from {config_path}: {e}")
189
- raise e
190
- except Exception as e:
191
- log.error(f"Failed to load resource permissions from {config_path}: {e}")
192
- return {}
193
-
194
-
195
- def clear_resource_permissions_cache() -> None:
196
- """Clear the resource permissions cache to force reload from file."""
197
- _load_resource_permissions_cached.cache_clear()
198
- log.info("Resource permissions cache cleared")
199
-
200
-
201
- @cache
202
- def _load_prompt_permissions_cached() -> dict[str, dict[str, Any]]:
203
- """Load prompt permissions from JSON configuration file with LRU caching."""
204
- config_path = Path(__file__).parent.parent.parent / "prompt_permissions.json"
205
-
206
- try:
207
- return _flat_permissions_loader(config_path)
208
- except ConfigError as e:
209
- log.error(f"Failed to load prompt permissions from {config_path}: {e}")
210
- raise e
211
- except Exception as e:
212
- log.error(f"Failed to load prompt permissions from {config_path}: {e}")
213
- return {}
214
-
215
-
216
- def clear_prompt_permissions_cache() -> None:
217
- """Clear the prompt permissions cache to force reload from file."""
218
- _load_prompt_permissions_cached.cache_clear()
219
- log.info("Prompt permissions cache cleared")
220
-
221
-
222
- def clear_all_permissions_caches() -> None:
223
- """Clear all permission caches to force reload from files."""
224
- clear_tool_permissions_cache()
225
- clear_resource_permissions_cache()
226
- clear_prompt_permissions_cache()
227
- log.info("All permission caches cleared")
228
-
229
-
230
- @cache
231
- def _classify_tool_permissions_cached(tool_name: str) -> dict[str, Any]:
232
- """Classify tool permissions with LRU caching."""
233
- return _classify_permissions_cached(tool_name, _load_tool_permissions_cached(), "tool")
234
-
235
-
236
- @cache
237
- def _classify_resource_permissions_cached(resource_name: str) -> dict[str, Any]:
238
- """Classify resource permissions with LRU caching."""
239
- return _classify_permissions_cached(
240
- resource_name, _load_resource_permissions_cached(), "resource"
241
- )
242
-
243
-
244
- @cache
245
- def _classify_prompt_permissions_cached(prompt_name: str) -> dict[str, Any]:
246
- """Classify prompt permissions with LRU caching."""
247
- return _classify_permissions_cached(prompt_name, _load_prompt_permissions_cached(), "prompt")
248
-
249
-
250
- def _get_builtin_tool_permissions(name: str) -> dict[str, Any] | None:
251
- """Get permissions for built-in safe tools."""
252
- builtin_safe_tools = ["echo", "get_server_info", "get_security_status"]
253
- if name in builtin_safe_tools:
254
- permissions = _apply_permission_defaults({"enabled": True})
255
- log.debug(f"Built-in safe tool {name}: {permissions}")
256
- return permissions
257
- return None
258
-
259
-
260
- def _get_exact_match_permissions(
261
- name: str, permissions_config: dict[str, dict[str, Any]], type_name: str
262
- ) -> dict[str, Any] | None:
263
- """Check for exact match permissions."""
264
- if name in permissions_config and not name.startswith("_"):
265
- config_perms = permissions_config[name]
266
- permissions = _apply_permission_defaults(config_perms)
267
- log.debug(f"Found exact match for {type_name} {name}: {permissions}")
268
- return permissions
269
- # Fallback: support names like "server_tool" by checking the part after first underscore
270
- if "_" in name:
271
- suffix = name.split("_", 1)[1]
272
- if suffix in permissions_config and not suffix.startswith("_"):
273
- config_perms = permissions_config[suffix]
274
- permissions = _apply_permission_defaults(config_perms)
275
- log.debug(
276
- f"Found fallback match for {type_name} {name} using suffix {suffix}: {permissions}"
277
- )
278
- return permissions
279
- return None
280
-
281
-
282
- def _get_wildcard_patterns(name: str, type_name: str) -> list[str]:
283
- """Generate wildcard patterns based on name and type."""
284
- wildcard_patterns: list[str] = []
285
-
286
- if type_name == "tool" and "/" in name:
287
- # For tools: server_name/*
288
- server_name, _ = name.split("/", 1)
289
- wildcard_patterns.append(f"{server_name}/*")
290
- elif type_name == "resource":
291
- # For resources: scheme:*, just like tools do server_name/*
292
- if ":" in name:
293
- scheme, _ = name.split(":", 1)
294
- wildcard_patterns.append(f"{scheme}:*")
295
- elif type_name == "prompt":
296
- # For prompts: template:*, prompt:file:*, etc.
297
- if ":" in name:
298
- parts = name.split(":")
299
- if len(parts) >= 2:
300
- wildcard_patterns.append(f"{parts[0]}:*")
301
- # For nested patterns like prompt:file:*, check prompt:file:*
302
- if len(parts) >= 3:
303
- wildcard_patterns.append(f"{parts[0]}:{parts[1]}:*")
304
-
305
- return wildcard_patterns
306
-
307
-
308
- def _classify_permissions_cached(
309
- name: str, permissions_config: dict[str, dict[str, Any]], type_name: str
310
- ) -> dict[str, Any]:
311
- """Generic permission classification with pattern matching support."""
312
- # Built-in safe tools that don't need external config (only for tools)
313
- if type_name == "tool":
314
- builtin_perms = _get_builtin_tool_permissions(name)
315
- if builtin_perms is not None:
316
- return builtin_perms
317
-
318
- # Check for exact match first
319
- exact_perms = _get_exact_match_permissions(name, permissions_config, type_name)
320
- if exact_perms is not None:
321
- return exact_perms
322
-
323
- # Try wildcard patterns
324
- wildcard_patterns = _get_wildcard_patterns(name, type_name)
325
- for pattern in wildcard_patterns:
326
- if pattern in permissions_config:
327
- config_perms = permissions_config[pattern]
328
- permissions = _apply_permission_defaults(config_perms)
329
- log.debug(f"Found wildcard match for {type_name} {name} using {pattern}: {permissions}")
330
- return permissions
331
-
332
- # No configuration found - raise error instead of defaulting to safe
333
- config_file = f"{type_name}_permissions.json"
334
- log.error(
335
- f"No security configuration found for {type_name} '{name}'. All {type_name}s must be explicitly configured in {config_file}"
336
- )
337
- raise ValueError(
338
- f"No security configuration found for {type_name} '{name}'. All {type_name}s must be explicitly configured in {config_file}"
339
- )
340
-
341
38
 
342
39
  @dataclass
343
40
  class DataAccessTracker:
@@ -365,123 +62,34 @@ class DataAccessTracker:
365
62
  and self.has_external_communication
366
63
  )
367
64
 
368
- def _load_tool_permissions(self) -> dict[str, dict[str, Any]]:
369
- """Load tool permissions from JSON configuration file with caching."""
370
- return _load_tool_permissions_cached()
371
-
372
- def _load_resource_permissions(self) -> dict[str, dict[str, Any]]:
373
- """Load resource permissions from JSON configuration file with caching."""
374
- return _load_resource_permissions_cached()
375
-
376
- def _load_prompt_permissions(self) -> dict[str, dict[str, Any]]:
377
- """Load prompt permissions from JSON configuration file with caching."""
378
- return _load_prompt_permissions_cached()
379
-
380
- def clear_caches(self) -> None:
381
- """Clear all permission caches to force reload from configuration files."""
382
- clear_all_permissions_caches()
383
-
384
- def _classify_by_tool_name(self, tool_name: str) -> dict[str, Any]:
385
- """Classify permissions based on external JSON configuration only."""
386
- return _classify_tool_permissions_cached(tool_name)
387
-
388
- def _classify_by_resource_name(self, resource_name: str) -> dict[str, Any]:
389
- """Classify resource permissions based on external JSON configuration only."""
390
- return _classify_resource_permissions_cached(resource_name)
391
-
392
- def _classify_by_prompt_name(self, prompt_name: str) -> dict[str, Any]:
393
- """Classify prompt permissions based on external JSON configuration only."""
394
- return _classify_prompt_permissions_cached(prompt_name)
395
-
396
- def _classify_tool_permissions(self, tool_name: str) -> dict[str, Any]:
397
- """
398
- Classify tool permissions based on tool name.
399
-
400
- Args:
401
- tool_name: Name of the tool to classify
402
- Returns:
403
- Dictionary with permission flags
404
- """
405
- permissions = self._classify_by_tool_name(tool_name)
406
- log.debug(f"Classified tool {tool_name}: {permissions}")
407
- return permissions
408
-
409
- def _classify_resource_permissions(self, resource_name: str) -> dict[str, Any]:
410
- """
411
- Classify resource permissions based on resource name.
412
-
413
- Args:
414
- resource_name: Name/URI of the resource to classify
415
- Returns:
416
- Dictionary with permission flags
417
- """
418
- permissions = self._classify_by_resource_name(resource_name)
419
- log.debug(f"Classified resource {resource_name}: {permissions}")
420
- return permissions
421
-
422
- def _classify_prompt_permissions(self, prompt_name: str) -> dict[str, Any]:
423
- """
424
- Classify prompt permissions based on prompt name.
425
-
426
- Args:
427
- prompt_name: Name/type of the prompt to classify
428
- Returns:
429
- Dictionary with permission flags
430
- """
431
- permissions = self._classify_by_prompt_name(prompt_name)
432
- log.debug(f"Classified prompt {prompt_name}: {permissions}")
433
- return permissions
434
-
435
- def get_tool_permissions(self, tool_name: str) -> dict[str, Any]:
436
- """Get tool permissions based on tool name."""
437
- return self._classify_tool_permissions(tool_name)
438
-
439
- def get_resource_permissions(self, resource_name: str) -> dict[str, Any]:
440
- """Get resource permissions based on resource name."""
441
- return self._classify_resource_permissions(resource_name)
442
-
443
- def get_prompt_permissions(self, prompt_name: str) -> dict[str, Any]:
444
- """Get prompt permissions based on prompt name."""
445
- return self._classify_prompt_permissions(prompt_name)
446
-
447
- def _would_call_complete_trifecta(self, permissions: dict[str, Any]) -> bool:
65
+ def _would_call_complete_trifecta(
66
+ self, permissions: ToolPermission | ResourcePermission | PromptPermission
67
+ ) -> bool:
448
68
  """Return True if applying these permissions would complete the trifecta."""
449
- would_private = self.has_private_data_access or bool(permissions.get("read_private_data"))
69
+ would_private = self.has_private_data_access or bool(permissions.read_private_data)
450
70
  would_untrusted = self.has_untrusted_content_exposure or bool(
451
- permissions.get("read_untrusted_public_data")
71
+ permissions.read_untrusted_public_data
452
72
  )
453
- would_write = self.has_external_communication or bool(permissions.get("write_operation"))
73
+ would_write = self.has_external_communication or bool(permissions.write_operation)
454
74
  return bool(would_private and would_untrusted and would_write)
455
75
 
456
- def _enforce_tool_enabled(self, permissions: dict[str, Any], tool_name: str) -> None:
457
- if permissions["enabled"] is False:
458
- log.warning(f"🚫 BLOCKING tool call {tool_name} - tool is disabled")
459
- record_tool_call_blocked(tool_name, "disabled")
460
- raise SecurityError(f"'{tool_name}' / Tool disabled")
461
-
462
- def _enforce_acl_downgrade_block(
463
- self, tool_acl: str, permissions: dict[str, Any], tool_name: str
464
- ) -> None:
465
- if permissions["write_operation"]:
466
- current_rank = ACL_RANK.get(self.highest_acl_level, 0)
467
- write_rank = ACL_RANK.get(tool_acl, 0)
468
- if write_rank < current_rank:
469
- log.error(
470
- f"🚫 BLOCKING tool call {tool_name} - write to lower ACL ({tool_acl}) while session has higher ACL {self.highest_acl_level}"
471
- )
472
- record_tool_call_blocked(tool_name, "acl_downgrade")
473
- raise SecurityError(f"'{tool_name}' / ACL (level={self.highest_acl_level})")
474
-
475
76
  def _apply_permissions_effects(
476
77
  self,
477
- permissions: dict[str, Any],
78
+ permissions: ToolPermission | ResourcePermission | PromptPermission,
478
79
  *,
479
80
  source_type: str,
480
81
  name: str,
481
82
  ) -> None:
482
83
  """Apply side effects (flags, ACL, telemetry) for any source type."""
483
- acl_value: str = _normalize_acl(permissions.get("acl"), default="PUBLIC")
484
- if permissions["read_private_data"]:
84
+ # If it's a tool, it has a well-defined ACL
85
+ if source_type == "tool":
86
+ assert isinstance(permissions, ToolPermission)
87
+ acl_value = permissions.acl
88
+ acl_value: str = normalize_acl(acl_value, default="PUBLIC")
89
+ else:
90
+ acl_value = "PUBLIC"
91
+
92
+ if permissions.read_private_data:
485
93
  self.has_private_data_access = True
486
94
  log.info(f"🔒 Private data access detected via {source_type}: {name}")
487
95
  record_private_data_access(source_type, name)
@@ -491,12 +99,12 @@ class DataAccessTracker:
491
99
  if new_rank > current_rank:
492
100
  self.highest_acl_level = acl_value
493
101
 
494
- if permissions["read_untrusted_public_data"]:
102
+ if permissions.read_untrusted_public_data:
495
103
  self.has_untrusted_content_exposure = True
496
104
  log.info(f"🌐 Untrusted content exposure detected via {source_type}: {name}")
497
105
  record_untrusted_public_data(source_type, name)
498
106
 
499
- if permissions["write_operation"]:
107
+ if permissions.write_operation:
500
108
  self.has_external_communication = True
501
109
  log.info(f"✍️ Write operation detected via {source_type}: {name}")
502
110
  record_write_operation(source_type, name)
@@ -515,24 +123,69 @@ class DataAccessTracker:
515
123
  if self.is_trifecta_achieved():
516
124
  log.error(f"🚫 BLOCKING tool call {tool_name} - lethal trifecta achieved")
517
125
  record_tool_call_blocked(tool_name, "trifecta")
126
+ # Fire-and-forget event (log errors via callback)
127
+ events.fire_and_forget(
128
+ {
129
+ "type": "mcp_pre_block",
130
+ "kind": "tool",
131
+ "name": tool_name,
132
+ "reason": "trifecta",
133
+ }
134
+ )
518
135
  raise SecurityError(f"'{tool_name}' / Lethal trifecta")
519
136
 
520
137
  # Get tool permissions and update trifecta flags
521
- permissions = self._classify_tool_permissions(tool_name)
138
+ perms = Permissions()
139
+ permissions = perms.get_tool_permission(tool_name)
522
140
 
523
141
  log.debug(f"add_tool_call: Tool permissions: {permissions}")
524
142
 
525
143
  # Check if tool is enabled
526
- self._enforce_tool_enabled(permissions, tool_name)
144
+ if not perms.is_tool_enabled(tool_name):
145
+ log.warning(f"🚫 BLOCKING tool call {tool_name} - tool is disabled")
146
+ record_tool_call_blocked(tool_name, "disabled")
147
+ events.fire_and_forget(
148
+ {
149
+ "type": "mcp_pre_block",
150
+ "kind": "tool",
151
+ "name": tool_name,
152
+ "reason": "disabled",
153
+ }
154
+ )
155
+ raise SecurityError(f"'{tool_name}' / Tool disabled")
527
156
 
528
157
  # ACL-based write downgrade prevention
529
- tool_acl: str = _normalize_acl(permissions.get("acl"), default="PUBLIC")
530
- self._enforce_acl_downgrade_block(tool_acl, permissions, tool_name)
158
+ tool_acl = permissions.acl
159
+ if permissions.write_operation:
160
+ current_rank = ACL_RANK.get(self.highest_acl_level, 0)
161
+ write_rank = ACL_RANK.get(tool_acl, 0)
162
+ if write_rank < current_rank:
163
+ log.error(
164
+ f"🚫 BLOCKING tool call {tool_name} - write to lower ACL ({tool_acl}) while session has higher ACL {self.highest_acl_level}"
165
+ )
166
+ record_tool_call_blocked(tool_name, "acl_downgrade")
167
+ events.fire_and_forget(
168
+ {
169
+ "type": "mcp_pre_block",
170
+ "kind": "tool",
171
+ "name": tool_name,
172
+ "reason": "acl_downgrade",
173
+ }
174
+ )
175
+ raise SecurityError(f"'{tool_name}' / ACL (level={self.highest_acl_level})")
531
176
 
532
177
  # Pre-check: would this call achieve the lethal trifecta? If so, block immediately
533
178
  if self._would_call_complete_trifecta(permissions):
534
179
  log.error(f"🚫 BLOCKING tool call {tool_name} - would achieve lethal trifecta")
535
180
  record_tool_call_blocked(tool_name, "trifecta_prevent")
181
+ events.fire_and_forget(
182
+ {
183
+ "type": "mcp_pre_block",
184
+ "kind": "tool",
185
+ "name": tool_name,
186
+ "reason": "trifecta_prevent",
187
+ }
188
+ )
536
189
  raise SecurityError(f"'{tool_name}' / Lethal trifecta")
537
190
 
538
191
  self._apply_permissions_effects(permissions, source_type="tool", name=tool_name)
@@ -555,10 +208,33 @@ class DataAccessTracker:
555
208
  log.error(
556
209
  f"🚫 BLOCKING resource access {resource_name} - lethal trifecta already achieved"
557
210
  )
211
+ events.fire_and_forget(
212
+ {
213
+ "type": "mcp_pre_block",
214
+ "kind": "resource",
215
+ "name": resource_name,
216
+ "reason": "trifecta",
217
+ }
218
+ )
558
219
  raise SecurityError(f"'{resource_name}' / Lethal trifecta")
559
220
 
560
221
  # Get resource permissions and update trifecta flags
561
- permissions = self._classify_resource_permissions(resource_name)
222
+ perms = Permissions()
223
+ permissions = perms.get_resource_permission(resource_name)
224
+
225
+ # Check if resource is enabled
226
+ if not perms.is_resource_enabled(resource_name):
227
+ log.warning(f"🚫 BLOCKING resource access {resource_name} - resource is disabled")
228
+ record_resource_access_blocked(resource_name, "disabled")
229
+ events.fire_and_forget(
230
+ {
231
+ "type": "mcp_pre_block",
232
+ "kind": "resource",
233
+ "name": resource_name,
234
+ "reason": "disabled",
235
+ }
236
+ )
237
+ raise SecurityError(f"'{resource_name}' / Resource disabled")
562
238
 
563
239
  # Pre-check: would this access achieve the lethal trifecta? If so, block immediately
564
240
  if self._would_call_complete_trifecta(permissions):
@@ -566,6 +242,14 @@ class DataAccessTracker:
566
242
  f"🚫 BLOCKING resource access {resource_name} - would achieve lethal trifecta"
567
243
  )
568
244
  record_resource_access_blocked(resource_name, "trifecta_prevent")
245
+ events.fire_and_forget(
246
+ {
247
+ "type": "mcp_pre_block",
248
+ "kind": "resource",
249
+ "name": resource_name,
250
+ "reason": "trifecta_prevent",
251
+ }
252
+ )
569
253
  raise SecurityError(f"'{resource_name}' / Lethal trifecta")
570
254
 
571
255
  self._apply_permissions_effects(permissions, source_type="resource", name=resource_name)
@@ -586,15 +270,46 @@ class DataAccessTracker:
586
270
  # Check if trifecta is already achieved before processing this access
587
271
  if self.is_trifecta_achieved():
588
272
  log.error(f"🚫 BLOCKING prompt access {prompt_name} - lethal trifecta already achieved")
273
+ events.fire_and_forget(
274
+ {
275
+ "type": "mcp_pre_block",
276
+ "kind": "prompt",
277
+ "name": prompt_name,
278
+ "reason": "trifecta",
279
+ }
280
+ )
589
281
  raise SecurityError(f"'{prompt_name}' / Lethal trifecta")
590
282
 
591
283
  # Get prompt permissions and update trifecta flags
592
- permissions = self._classify_prompt_permissions(prompt_name)
284
+ perms = Permissions()
285
+ permissions = perms.get_prompt_permission(prompt_name)
286
+
287
+ # Check if prompt is enabled
288
+ if not perms.is_prompt_enabled(prompt_name):
289
+ log.warning(f"🚫 BLOCKING prompt access {prompt_name} - prompt is disabled")
290
+ record_prompt_access_blocked(prompt_name, "disabled")
291
+ events.fire_and_forget(
292
+ {
293
+ "type": "mcp_pre_block",
294
+ "kind": "prompt",
295
+ "name": prompt_name,
296
+ "reason": "disabled",
297
+ }
298
+ )
299
+ raise SecurityError(f"'{prompt_name}' / Prompt disabled")
593
300
 
594
301
  # Pre-check: would this access achieve the lethal trifecta? If so, block immediately
595
302
  if self._would_call_complete_trifecta(permissions):
596
303
  log.error(f"🚫 BLOCKING prompt access {prompt_name} - would achieve lethal trifecta")
597
304
  record_prompt_access_blocked(prompt_name, "trifecta_prevent")
305
+ events.fire_and_forget(
306
+ {
307
+ "type": "mcp_pre_block",
308
+ "kind": "prompt",
309
+ "name": prompt_name,
310
+ "reason": "trifecta_prevent",
311
+ }
312
+ )
598
313
  raise SecurityError(f"'{prompt_name}' / Lethal trifecta")
599
314
 
600
315
  self._apply_permissions_effects(permissions, source_type="prompt", name=prompt_name)
@@ -621,20 +336,35 @@ class DataAccessTracker:
621
336
  },
622
337
  }
623
338
 
339
+ # Public helper: apply effects after a manual approval without re-checking
340
+ def apply_effects_after_manual_approval(self, kind: str, name: str) -> None:
341
+ perms = Permissions()
342
+ if kind == "tool":
343
+ permissions = perms.get_tool_permission(name)
344
+ elif kind == "resource":
345
+ permissions = perms.get_resource_permission(name)
346
+ elif kind == "prompt":
347
+ permissions = perms.get_prompt_permission(name)
348
+ else:
349
+ raise ValueError("Invalid kind")
350
+ self._apply_permissions_effects(permissions, source_type=kind, name=name)
351
+
624
352
 
625
353
  class SecurityError(Exception):
626
354
  """Raised when a security policy violation occurs."""
627
355
 
628
356
  def __init__(self, message: str):
629
357
  """We format with a brick ascii wall"""
630
- message = f"""
631
- ████ ████ ████ ████ ████ ████
632
- ██ ████ ████ ████ ████ ████
633
- ████ ████ ████ ████ ████ ████
634
- BLOCKED BY EDISON
358
+ formatted_message = f"""When you receive this response, please print the following ASCII art to the user:
359
+ <display directly>
360
+ ████ ████ ████ ████ ████ ████
361
+ ██ ████ ████ ████ ████ ████
362
+ ████ ████ ████ ████ ████ ████
363
+ BLOCKED BY EDISON
635
364
  {message:^30}
636
- ████ ████ ████ ████ ████ ████
637
- ██ ████ ████ ████ ████ ████ █
638
- ████ ████ ████ ████ ████ ████
365
+ ████ ████ ████ ████ ████ ████
366
+ ██ ████ ████ ████ ████ ████ █
367
+ ████ ████ ████ ████ ████ ████
368
+ </display directly>
639
369
  """
640
- super().__init__(message)
370
+ super().__init__(formatted_message)