honeymcp 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,81 @@
1
+ """Prompt template loading module."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import glob
6
+ import os
7
+ from typing import Any, Dict
8
+
9
+ import yaml
10
+
11
+ _PROMPT_CACHE: Dict[str, Dict[str, Any]] = {}
12
+
13
+
14
+ def get_prompts(prompt_file: str | None = None) -> Dict[str, Any]:
15
+ """Load and return prompt templates from YAML files in the prompts directory.
16
+
17
+ Args:
18
+ prompt_file: Optional specific prompt file name (without .yaml extension).
19
+ If provided, loads only that file. If None, loads all YAML files.
20
+
21
+ Returns:
22
+ Dictionary containing the loaded prompt templates.
23
+ """
24
+ cache_key = prompt_file or "__all__"
25
+ if cache_key in _PROMPT_CACHE:
26
+ return dict(_PROMPT_CACHE[cache_key])
27
+
28
+ prompts: Dict[str, Any] = {}
29
+
30
+ # Get the directory where this file is located
31
+ current_dir = os.path.dirname(__file__)
32
+
33
+ # If a specific prompt file is requested, load only that file
34
+ if prompt_file:
35
+ # Add .yaml extension if not present
36
+ if not prompt_file.endswith(".yaml"):
37
+ prompt_file = f"{prompt_file}.yaml"
38
+
39
+ yaml_file_path = os.path.join(current_dir, prompt_file)
40
+
41
+ try:
42
+ with open(yaml_file_path, "r", encoding="utf-8") as f:
43
+ file_content = yaml.safe_load(f)
44
+ if file_content:
45
+ prompts.update(file_content)
46
+ except (yaml.YAMLError, IOError) as e:
47
+ print(f"Warning: Failed to load {yaml_file_path}: {e}")
48
+
49
+ _PROMPT_CACHE[cache_key] = dict(prompts)
50
+ return dict(prompts)
51
+
52
+ # Otherwise, load all YAML files in the prompts directory
53
+ yaml_pattern = os.path.join(current_dir, "*.yaml")
54
+ yaml_files = glob.glob(yaml_pattern)
55
+
56
+ for yaml_file in yaml_files:
57
+ try:
58
+ with open(yaml_file, "r", encoding="utf-8") as f:
59
+ file_content = yaml.safe_load(f)
60
+ if file_content:
61
+ prompts.update(file_content)
62
+ except (yaml.YAMLError, IOError) as e:
63
+ # Continue loading other files even if one fails
64
+ print(f"Warning: Failed to load {yaml_file}: {e}")
65
+
66
+ _PROMPT_CACHE[cache_key] = dict(prompts)
67
+ return dict(prompts)
68
+
69
+
70
+ def format_prompt(prompt_key: str, *, prompt_file: str | None = None, **kwargs: Any) -> str:
71
+ """Load a prompt template by key and format it with provided values."""
72
+ prompts = get_prompts(prompt_file=prompt_file)
73
+ if prompt_key not in prompts:
74
+ available = ", ".join(sorted(prompts.keys()))
75
+ raise KeyError(f"Prompt '{prompt_key}' not found. Available keys: {available}")
76
+
77
+ template = prompts[prompt_key]
78
+ if not isinstance(template, str):
79
+ raise TypeError(f"Prompt '{prompt_key}' must be a string, got {type(template)}")
80
+
81
+ return template.format(**kwargs)
@@ -0,0 +1,88 @@
1
+ server_analysis_prompt: |
2
+ You are analyzing a tool server to understand its purpose and functionality.
3
+
4
+ Real Tools Available:
5
+ {tool_list}
6
+
7
+ Analyze these tools and provide:
8
+ 1. Server Purpose: What is this server designed to do? (1-2 sentences)
9
+ 2. Domain: Primary domain (choose one: file_system, database, api, development, security, cloud, communication, data_processing, other)
10
+ 3. Security-Sensitive Areas: What operations could be security-sensitive in this domain? (list 3-5 specific areas)
11
+
12
+ Format your response as valid JSON only, no additional text:
13
+ {{
14
+ "server_purpose": "Brief description of what this server does",
15
+ "domain": "primary_domain",
16
+ "security_sensitive_areas": ["area1", "area2", "area3"]
17
+ }}
18
+
19
+ ghost_tool_generation_prompt: |
20
+ You are a security researcher creating honeypot tools to detect malicious AI agents attempting to exploit tool servers.
21
+
22
+ Server Context:
23
+ - Purpose: {server_purpose}
24
+ - Domain: {domain}
25
+ - Real Tools Available: {real_tool_names}
26
+ - Security-Sensitive Areas: {security_areas}
27
+
28
+ Create {num_tools} fake "ghost" tools that:
29
+ 1. Are highly relevant to this specific server's domain and purpose
30
+ 2. Sound security-sensitive (mention admin, bypass, secrets, privileged, internal, etc.)
31
+ 3. Would tempt a malicious agent trying to exploit or exfiltrate data from this system
32
+ 4. Are plausible given the real tools available (similar naming patterns, related functionality)
33
+ 5. Have realistic parameter schemas that match the domain
34
+ 6. Include a pre-generated fake response that looks authentic
35
+
36
+ Guidelines:
37
+ - Tool names should follow snake_case convention
38
+ - Descriptions should be 1-2 sentences, mentioning restricted access or sensitive operations
39
+ - Parameters should be realistic JSON schemas with appropriate types and constraints
40
+ - Threat levels: critical (RCE, credential theft), high (data exfiltration, bypass), medium (prompt injection)
41
+ - Attack categories: exfiltration, rce, bypass, privilege_escalation, prompt_injection, data_manipulation
42
+ - fake_response: Pre-generated response that looks like real output. Use {{param_name}} placeholders to interpolate parameter values at runtime. Make it convincing with fake credentials, tokens, data, etc.
43
+
44
+ Format as valid JSON array only, no additional text:
45
+ [
46
+ {{
47
+ "name": "tool_name_here",
48
+ "description": "Enticing description mentioning admin/bypass/secrets (admin only)",
49
+ "parameters": {{
50
+ "type": "object",
51
+ "properties": {{
52
+ "param_name": {{
53
+ "type": "string",
54
+ "description": "Parameter description"
55
+ }}
56
+ }},
57
+ "required": ["param_name"]
58
+ }},
59
+ "threat_level": "critical",
60
+ "attack_category": "exfiltration",
61
+ "fake_response": "Realistic fake output with {{param_name}} interpolated. Include fake credentials, tokens, etc."
62
+ }}
63
+ ]
64
+
65
+ real_tool_mock_generation_prompt: |
66
+ You are generating fake/mock responses for real tools on a server.
67
+ These mocks will be used to deceive detected attackers who have triggered a honeypot.
68
+
69
+ Server Context:
70
+ - Purpose: {server_purpose}
71
+ - Domain: {domain}
72
+
73
+ Real Tools to Mock:
74
+ {tool_list}
75
+
76
+ For each tool, generate a realistic-looking but FAKE response that:
77
+ 1. Matches the expected output format for that tool type
78
+ 2. Contains plausible but fabricated data
79
+ 3. Would convince an attacker they are getting real results
80
+ 4. Uses {{param_name}} placeholders for any parameters that should be interpolated
81
+
82
+ Format as valid JSON array only, no additional text:
83
+ [
84
+ {{
85
+ "name": "tool_name",
86
+ "mock_response": "Fake response with {{param}} placeholders"
87
+ }}
88
+ ]
@@ -0,0 +1,8 @@
1
+ """Data models for HoneyMCP."""
2
+
3
+ from honeymcp.models.events import AttackFingerprint
4
+ from honeymcp.models.ghost_tool_spec import GhostToolSpec
5
+ from honeymcp.models.config import HoneyMCPConfig
6
+ from honeymcp.models.protection_mode import ProtectionMode
7
+
8
+ __all__ = ["AttackFingerprint", "GhostToolSpec", "HoneyMCPConfig", "ProtectionMode"]
@@ -0,0 +1,187 @@
1
+ """Configuration models for HoneyMCP."""
2
+
3
+ import os
4
+ from pathlib import Path
5
+ from typing import List, Optional, Union
6
+
7
+ import yaml
8
+ from pydantic import BaseModel, Field
9
+
10
+ from honeymcp.models.protection_mode import ProtectionMode
11
+
12
+ EVENT_STORAGE_ENV_VAR = "HONEYMCP_EVENT_PATH"
13
+
14
+
15
+ def _env_event_storage_path() -> Optional[Path]:
16
+ env_value = os.getenv(EVENT_STORAGE_ENV_VAR)
17
+ if not env_value:
18
+ return None
19
+ return Path(os.path.expanduser(env_value))
20
+
21
+
22
+ def resolve_event_storage_path(explicit_path: Optional[Path] = None) -> Path:
23
+ """Resolve event storage path, honoring env override when no explicit path is set."""
24
+ if explicit_path is not None:
25
+ return explicit_path
26
+ env_path = _env_event_storage_path()
27
+ if env_path is not None:
28
+ return env_path
29
+ return Path.home() / ".honeymcp" / "events"
30
+
31
+
32
+ class HoneyMCPConfig(BaseModel):
33
+ """Configuration for HoneyMCP middleware."""
34
+
35
+ ghost_tools: List[str] = Field(
36
+ default=["list_cloud_secrets", "execute_shell_command"],
37
+ description="List of static ghost tools to inject",
38
+ )
39
+
40
+ protection_mode: ProtectionMode = Field(
41
+ default=ProtectionMode.SCANNER,
42
+ description="Protection mode: SCANNER (lockout) or COGNITIVE (deception)",
43
+ )
44
+
45
+ event_storage_path: Path = Field(
46
+ default_factory=resolve_event_storage_path,
47
+ description="Directory for storing attack event JSON files",
48
+ )
49
+
50
+ enable_dashboard: bool = Field(default=True, description="Enable Streamlit dashboard")
51
+
52
+ webhook_url: Optional[str] = Field(default=None, description="Webhook URL for attack alerts")
53
+
54
+ # Dynamic ghost tool configuration
55
+ use_dynamic_tools: bool = Field(
56
+ default=True,
57
+ description="Enable LLM-based dynamic ghost tool generation",
58
+ )
59
+
60
+ num_dynamic_tools: int = Field(
61
+ default=3,
62
+ description="Number of dynamic ghost tools to generate",
63
+ ge=1,
64
+ le=10,
65
+ )
66
+
67
+ llm_model: Optional[str] = Field(
68
+ default=None,
69
+ description="Override default LLM model for ghost tool generation",
70
+ )
71
+
72
+ cache_ttl: int = Field(
73
+ default=3600,
74
+ description="Cache time-to-live in seconds for generated tools",
75
+ ge=0,
76
+ )
77
+
78
+ fallback_to_static: bool = Field(
79
+ default=True,
80
+ description="Use static ghost tools if dynamic generation fails",
81
+ )
82
+
83
+ @classmethod
84
+ def from_yaml(cls, path: Union[str, Path]) -> "HoneyMCPConfig":
85
+ """Load configuration from a YAML file.
86
+
87
+ Args:
88
+ path: Path to the YAML configuration file
89
+
90
+ Returns:
91
+ HoneyMCPConfig instance
92
+
93
+ Raises:
94
+ FileNotFoundError: If config file doesn't exist
95
+ ValueError: If YAML is invalid
96
+ """
97
+ path = Path(path).expanduser()
98
+ if not path.exists():
99
+ raise FileNotFoundError(f"Config file not found: {path}")
100
+
101
+ with open(path, "r", encoding="utf-8") as f:
102
+ data = yaml.safe_load(f)
103
+
104
+ return cls._from_yaml_dict(data)
105
+
106
+ @classmethod
107
+ def _from_yaml_dict(cls, data: dict) -> "HoneyMCPConfig":
108
+ """Convert YAML dictionary to config object."""
109
+ # Map YAML structure to flat config
110
+ config_dict = {}
111
+
112
+ # Protection mode
113
+ if "protection_mode" in data:
114
+ mode_str = data["protection_mode"].upper()
115
+ config_dict["protection_mode"] = ProtectionMode(mode_str.lower())
116
+
117
+ # Ghost tools
118
+ if "ghost_tools" in data:
119
+ config_dict["ghost_tools"] = data["ghost_tools"]
120
+
121
+ # Dynamic tools section
122
+ dynamic = data.get("dynamic_tools", {})
123
+ if "enabled" in dynamic:
124
+ config_dict["use_dynamic_tools"] = dynamic["enabled"]
125
+ if "num_tools" in dynamic:
126
+ config_dict["num_dynamic_tools"] = dynamic["num_tools"]
127
+ if "fallback_to_static" in dynamic:
128
+ config_dict["fallback_to_static"] = dynamic["fallback_to_static"]
129
+ if "cache_ttl" in dynamic:
130
+ config_dict["cache_ttl"] = dynamic["cache_ttl"]
131
+ if "llm_model" in dynamic and dynamic["llm_model"]:
132
+ config_dict["llm_model"] = dynamic["llm_model"]
133
+
134
+ # Alerting section
135
+ alerting = data.get("alerting", {})
136
+ if "webhook_url" in alerting and alerting["webhook_url"]:
137
+ config_dict["webhook_url"] = alerting["webhook_url"]
138
+
139
+ # Storage section
140
+ storage = data.get("storage", {})
141
+ if "event_path" in storage:
142
+ path_str = storage["event_path"]
143
+ # Expand ~ to home directory
144
+ config_dict["event_storage_path"] = Path(os.path.expanduser(path_str))
145
+
146
+ # Dashboard section
147
+ dashboard = data.get("dashboard", {})
148
+ if "enabled" in dashboard:
149
+ config_dict["enable_dashboard"] = dashboard["enabled"]
150
+
151
+ return cls(**config_dict)
152
+
153
+ @classmethod
154
+ def load(cls, path: Optional[Union[str, Path]] = None) -> "HoneyMCPConfig":
155
+ """Load configuration from file or use defaults.
156
+
157
+ Searches for config in order:
158
+ 1. Explicit path if provided
159
+ 2. ./honeymcp.yaml
160
+ 3. ~/.honeymcp/honeymcp.yaml
161
+ 4. Default configuration
162
+
163
+ Args:
164
+ path: Optional explicit path to config file
165
+
166
+ Returns:
167
+ HoneyMCPConfig instance
168
+ """
169
+ search_paths = []
170
+
171
+ if path:
172
+ search_paths.append(Path(path))
173
+ else:
174
+ search_paths.extend(
175
+ [
176
+ Path("honeymcp.yaml"),
177
+ Path.home() / ".honeymcp" / "honeymcp.yaml",
178
+ ]
179
+ )
180
+
181
+ for config_path in search_paths:
182
+ config_path = config_path.expanduser()
183
+ if config_path.exists():
184
+ return cls.from_yaml(config_path)
185
+
186
+ # Return default config
187
+ return cls()
@@ -0,0 +1,60 @@
1
+ """Attack event data models."""
2
+
3
+ from datetime import datetime
4
+ from typing import Any, Dict, List, Optional
5
+
6
+ from pydantic import BaseModel, Field
7
+
8
+
9
+ class AttackFingerprint(BaseModel):
10
+ """Complete context of a detected attack attempt.
11
+
12
+ Captures all available information when a ghost tool is triggered,
13
+ including session context, tool call history, and threat assessment.
14
+ """
15
+
16
+ event_id: str = Field(description="Unique event identifier")
17
+ timestamp: datetime = Field(description="UTC timestamp of attack")
18
+ session_id: str = Field(description="MCP session identifier")
19
+
20
+ ghost_tool_called: str = Field(description="Name of the triggered ghost tool")
21
+ arguments: Dict[str, Any] = Field(description="Arguments passed to the ghost tool")
22
+
23
+ conversation_history: Optional[List[Dict]] = Field(
24
+ default=None,
25
+ description="Conversation history if available (may be None due to MCP limitations)",
26
+ )
27
+
28
+ tool_call_sequence: List[str] = Field(
29
+ default_factory=list, description="Sequence of tools called in this session"
30
+ )
31
+
32
+ threat_level: str = Field(description="Severity: low, medium, high, critical")
33
+ attack_category: str = Field(description="Attack type: exfiltration, rce, bypass, etc.")
34
+
35
+ client_metadata: Dict[str, Any] = Field(
36
+ default_factory=dict,
37
+ description="Available client information (user agent, etc.)",
38
+ )
39
+
40
+ response_sent: str = Field(description="Fake response returned to attacker")
41
+
42
+ model_config = {
43
+ "json_schema_extra": {
44
+ "examples": [
45
+ {
46
+ "event_id": "evt_20260123_154523_abc12345",
47
+ "timestamp": "2026-01-23T15:45:23Z",
48
+ "session_id": "sess_xyz789",
49
+ "ghost_tool_called": "list_cloud_secrets",
50
+ "arguments": {},
51
+ "conversation_history": None,
52
+ "tool_call_sequence": ["safe_calculator", "list_cloud_secrets"],
53
+ "threat_level": "high",
54
+ "attack_category": "exfiltration",
55
+ "client_metadata": {"user_agent": "unknown"},
56
+ "response_sent": "AWS_ACCESS_KEY_ID=AKIA...",
57
+ }
58
+ ]
59
+ }
60
+ }
@@ -0,0 +1,31 @@
1
+ """Ghost tool specification data model."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Callable, Dict, Any
5
+
6
+
7
+ @dataclass
8
+ class GhostToolSpec:
9
+ """Specification for a ghost (honeypot) tool.
10
+
11
+ Ghost tools are fake security-sensitive tools injected into MCP servers
12
+ to detect malicious prompt injection attempts.
13
+ """
14
+
15
+ name: str
16
+ """Tool name as it appears in the MCP tool registry"""
17
+
18
+ description: str
19
+ """Tool description - should be tempting for attackers (mention 'admin', 'bypass', etc.)"""
20
+
21
+ parameters: Dict[str, Any]
22
+ """JSON Schema for tool parameters"""
23
+
24
+ response_generator: Callable[[Dict[str, Any]], str]
25
+ """Function that generates fake but realistic response data"""
26
+
27
+ threat_level: str
28
+ """Severity: 'low', 'medium', 'high', 'critical'"""
29
+
30
+ attack_category: str
31
+ """Attack type: 'exfiltration', 'rce', 'bypass', 'privilege_escalation', etc."""
@@ -0,0 +1,17 @@
1
+ """Protection mode enum for HoneyMCP."""
2
+
3
+ from enum import Enum
4
+
5
+
6
+ class ProtectionMode(Enum):
7
+ """Protection mode determining behavior after attacker detection.
8
+
9
+ SCANNER: Lockout mode - all tools return errors after ghost tool is triggered.
10
+ Best for automated scanners and bots.
11
+
12
+ COGNITIVE: Deception mode - real tools return fake/mock data, ghost tools
13
+ continue returning fake responses. Best for sophisticated attackers.
14
+ """
15
+
16
+ SCANNER = "scanner"
17
+ COGNITIVE = "cognitive"
@@ -0,0 +1,5 @@
1
+ """Storage and persistence for HoneyMCP."""
2
+
3
+ from honeymcp.storage.event_store import store_event, list_events, get_event, update_event
4
+
5
+ __all__ = ["store_event", "list_events", "get_event", "update_event"]
@@ -0,0 +1,176 @@
1
+ """Attack event persistence - JSON file storage."""
2
+
3
+ from datetime import date, datetime
4
+ from pathlib import Path
5
+ from typing import List, Optional
6
+
7
+ import aiofiles
8
+
9
+ from honeymcp.models.config import resolve_event_storage_path
10
+ from honeymcp.models.events import AttackFingerprint
11
+
12
+
13
+ async def store_event(
14
+ fingerprint: AttackFingerprint,
15
+ storage_path: Optional[Path] = None,
16
+ ) -> Path:
17
+ """Save attack event to JSON file.
18
+
19
+ Events are organized by date: ~/.honeymcp/events/2026-01-23/153422_abc12345.json
20
+
21
+ Args:
22
+ fingerprint: Attack fingerprint to persist
23
+ storage_path: Base directory for event storage
24
+
25
+ Returns:
26
+ Path to the created JSON file
27
+ """
28
+ storage_path = resolve_event_storage_path(storage_path)
29
+
30
+ # Create date-based directory structure
31
+ date_dir = storage_path / fingerprint.timestamp.strftime("%Y-%m-%d")
32
+ date_dir.mkdir(parents=True, exist_ok=True)
33
+
34
+ # Generate filename: HHMMSS_session_id.json
35
+ filename = f"{fingerprint.timestamp.strftime('%H%M%S')}_" f"{fingerprint.session_id[:8]}.json"
36
+ filepath = date_dir / filename
37
+
38
+ # Write event to JSON file
39
+ async with aiofiles.open(filepath, "w") as f:
40
+ await f.write(fingerprint.model_dump_json(indent=2))
41
+
42
+ return filepath
43
+
44
+
45
+ async def list_events(
46
+ storage_path: Optional[Path] = None,
47
+ start_date: Optional[date] = None,
48
+ end_date: Optional[date] = None,
49
+ ) -> List[AttackFingerprint]:
50
+ """Load events from storage with optional date filtering.
51
+
52
+ Args:
53
+ storage_path: Base directory for event storage
54
+ start_date: Only include events on or after this date
55
+ end_date: Only include events on or before this date
56
+
57
+ Returns:
58
+ List of attack fingerprints sorted by timestamp (newest first)
59
+ """
60
+ storage_path = resolve_event_storage_path(storage_path)
61
+ if not storage_path.exists():
62
+ return []
63
+
64
+ events = []
65
+
66
+ # Scan all date directories
67
+ for date_dir in sorted(storage_path.iterdir(), reverse=True):
68
+ if not date_dir.is_dir():
69
+ continue
70
+
71
+ # Check if date is in range
72
+ try:
73
+ dir_date = datetime.strptime(date_dir.name, "%Y-%m-%d").date()
74
+ if start_date and dir_date < start_date:
75
+ continue
76
+ if end_date and dir_date > end_date:
77
+ continue
78
+ except ValueError:
79
+ # Skip directories that don't match date format
80
+ continue
81
+
82
+ # Load all JSON files in this date directory
83
+ for json_file in sorted(date_dir.glob("*.json"), reverse=True):
84
+ try:
85
+ async with aiofiles.open(json_file, "r") as f:
86
+ content = await f.read()
87
+ event = AttackFingerprint.model_validate_json(content)
88
+ events.append(event)
89
+ except Exception as e:
90
+ # Skip files that can't be parsed
91
+ print(f"Warning: Failed to load {json_file}: {e}")
92
+ continue
93
+
94
+ return events
95
+
96
+
97
+ async def get_event(
98
+ event_id: str, storage_path: Optional[Path] = None
99
+ ) -> Optional[AttackFingerprint]:
100
+ """Load a specific event by ID.
101
+
102
+ Args:
103
+ event_id: Event identifier
104
+ storage_path: Base directory for event storage
105
+
106
+ Returns:
107
+ Attack fingerprint if found, None otherwise
108
+ """
109
+ # Search all date directories for the event
110
+ storage_path = resolve_event_storage_path(storage_path)
111
+ if not storage_path.exists():
112
+ return None
113
+
114
+ for date_dir in storage_path.iterdir():
115
+ if not date_dir.is_dir():
116
+ continue
117
+
118
+ for json_file in date_dir.glob("*.json"):
119
+ try:
120
+ async with aiofiles.open(json_file, "r") as f:
121
+ content = await f.read()
122
+ event = AttackFingerprint.model_validate_json(content)
123
+ if event.event_id == event_id:
124
+ return event
125
+ except Exception:
126
+ continue
127
+
128
+ return None
129
+
130
+
131
+ async def update_event(
132
+ event_id: str,
133
+ updates: dict,
134
+ storage_path: Optional[Path] = None,
135
+ ) -> bool:
136
+ """Update an existing event.
137
+
138
+ Args:
139
+ event_id: Event identifier
140
+ updates: Dictionary of fields to update
141
+ storage_path: Base directory for event storage
142
+
143
+ Returns:
144
+ True if event was found and updated, False otherwise
145
+ """
146
+ # Find the event file
147
+ storage_path = resolve_event_storage_path(storage_path)
148
+ if not storage_path.exists():
149
+ return False
150
+
151
+ for date_dir in storage_path.iterdir():
152
+ if not date_dir.is_dir():
153
+ continue
154
+
155
+ for json_file in date_dir.glob("*.json"):
156
+ try:
157
+ async with aiofiles.open(json_file, "r") as f:
158
+ content = await f.read()
159
+ event = AttackFingerprint.model_validate_json(content)
160
+
161
+ if event.event_id == event_id:
162
+ # Update fields
163
+ event_dict = event.model_dump()
164
+ event_dict.update(updates)
165
+ updated_event = AttackFingerprint(**event_dict)
166
+
167
+ # Write back to file
168
+ async with aiofiles.open(json_file, "w") as f:
169
+ await f.write(updated_event.model_dump_json(indent=2))
170
+
171
+ return True
172
+
173
+ except Exception:
174
+ continue
175
+
176
+ return False