entari-plugin-hyw 4.0.0rc17__py3-none-any.whl → 4.0.0rc19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of entari-plugin-hyw might be problematic. Click here for more details.

Files changed (55) hide show
  1. entari_plugin_hyw-4.0.0rc19.dist-info/METADATA +26 -0
  2. entari_plugin_hyw-4.0.0rc19.dist-info/RECORD +4 -0
  3. entari_plugin_hyw-4.0.0rc19.dist-info/top_level.txt +1 -0
  4. entari_plugin_hyw/__init__.py +0 -914
  5. entari_plugin_hyw/filters.py +0 -83
  6. entari_plugin_hyw/history.py +0 -251
  7. entari_plugin_hyw/misc.py +0 -214
  8. entari_plugin_hyw/search_cache.py +0 -253
  9. entari_plugin_hyw-4.0.0rc17.dist-info/METADATA +0 -119
  10. entari_plugin_hyw-4.0.0rc17.dist-info/RECORD +0 -52
  11. entari_plugin_hyw-4.0.0rc17.dist-info/top_level.txt +0 -2
  12. hyw_core/__init__.py +0 -94
  13. hyw_core/agent.py +0 -876
  14. hyw_core/browser_control/__init__.py +0 -63
  15. hyw_core/browser_control/assets/card-dist/index.html +0 -429
  16. hyw_core/browser_control/assets/card-dist/logos/anthropic.svg +0 -1
  17. hyw_core/browser_control/assets/card-dist/logos/cerebras.svg +0 -9
  18. hyw_core/browser_control/assets/card-dist/logos/deepseek.png +0 -0
  19. hyw_core/browser_control/assets/card-dist/logos/gemini.svg +0 -1
  20. hyw_core/browser_control/assets/card-dist/logos/google.svg +0 -1
  21. hyw_core/browser_control/assets/card-dist/logos/grok.png +0 -0
  22. hyw_core/browser_control/assets/card-dist/logos/huggingface.png +0 -0
  23. hyw_core/browser_control/assets/card-dist/logos/microsoft.svg +0 -15
  24. hyw_core/browser_control/assets/card-dist/logos/minimax.png +0 -0
  25. hyw_core/browser_control/assets/card-dist/logos/mistral.png +0 -0
  26. hyw_core/browser_control/assets/card-dist/logos/nvida.png +0 -0
  27. hyw_core/browser_control/assets/card-dist/logos/openai.svg +0 -1
  28. hyw_core/browser_control/assets/card-dist/logos/openrouter.png +0 -0
  29. hyw_core/browser_control/assets/card-dist/logos/perplexity.svg +0 -24
  30. hyw_core/browser_control/assets/card-dist/logos/qwen.png +0 -0
  31. hyw_core/browser_control/assets/card-dist/logos/xai.png +0 -0
  32. hyw_core/browser_control/assets/card-dist/logos/xiaomi.png +0 -0
  33. hyw_core/browser_control/assets/card-dist/logos/zai.png +0 -0
  34. hyw_core/browser_control/assets/card-dist/vite.svg +0 -1
  35. hyw_core/browser_control/engines/__init__.py +0 -15
  36. hyw_core/browser_control/engines/base.py +0 -13
  37. hyw_core/browser_control/engines/default.py +0 -166
  38. hyw_core/browser_control/engines/duckduckgo.py +0 -171
  39. hyw_core/browser_control/landing.html +0 -172
  40. hyw_core/browser_control/manager.py +0 -173
  41. hyw_core/browser_control/renderer.py +0 -446
  42. hyw_core/browser_control/service.py +0 -1002
  43. hyw_core/config.py +0 -154
  44. hyw_core/core.py +0 -454
  45. hyw_core/crawling/__init__.py +0 -18
  46. hyw_core/crawling/completeness.py +0 -437
  47. hyw_core/crawling/models.py +0 -88
  48. hyw_core/definitions.py +0 -166
  49. hyw_core/image_cache.py +0 -274
  50. hyw_core/pipeline.py +0 -502
  51. hyw_core/search.py +0 -169
  52. hyw_core/stages/__init__.py +0 -21
  53. hyw_core/stages/base.py +0 -95
  54. hyw_core/stages/summary.py +0 -218
  55. {entari_plugin_hyw-4.0.0rc17.dist-info → entari_plugin_hyw-4.0.0rc19.dist-info}/WHEEL +0 -0
@@ -1,83 +0,0 @@
1
- """
2
- Filter syntax parsing utilities.
3
- """
4
-
5
- import re
6
- from typing import List, Tuple, Optional
7
-
8
-
9
- def parse_filter_syntax(query: str, max_count: int = 3):
10
- """
11
- Parse enhanced filter syntax supporting:
12
- - Chinese/English colons (: :) and commas (, ,)
13
- - Multiple filters: "mcmod=2, github=1 : xxx"
14
- - Index lists: "1, 2, 3 : xxx"
15
- - Max total selections
16
-
17
- Returns:
18
- (filters, search_query, error_msg)
19
- filters: list of (filter_type, filter_value, count) tuples
20
- filter_type: 'index' or 'link'
21
- count: how many to get (default 1)
22
- search_query: the actual search query
23
- error_msg: error message if exceeded max
24
- """
25
- if not query:
26
- return [], query, None
27
-
28
- # Skip filter parsing if query contains URL (has :// pattern)
29
- if re.search(r'https?://', query):
30
- return [], query.strip(), None
31
-
32
- # Normalize Chinese punctuation to English
33
- normalized = query.replace(':', ':').replace(',', ',').replace('、', ',')
34
-
35
- # Handle escaped colons: \: or /: -> placeholder
36
- normalized = re.sub(r'[/\\]:', '\x00COLON\x00', normalized)
37
-
38
- # Split by colon - last part is the search query
39
- parts = normalized.split(':')
40
- if len(parts) < 2:
41
- # No colon found, restore escaped colons and return as-is
42
- return [], query.replace('\\:', ':').replace('/:', ':'), None
43
-
44
- # Everything after the last colon is the search query
45
- search_query = parts[-1].strip().replace('\x00COLON\x00', ':')
46
-
47
- # Everything before is the filter specification
48
- filter_spec = ':'.join(parts[:-1]).strip().replace('\x00COLON\x00', ':')
49
-
50
- if not filter_spec or not search_query:
51
- return [], query.replace('\\:', ':').replace('/:', ':'), None
52
-
53
- # Parse filter specifications (comma-separated)
54
- filter_items = [f.strip() for f in filter_spec.split(',') if f.strip()]
55
-
56
- filters = []
57
- for item in filter_items:
58
- # Check for "filter=count" pattern (e.g., "mcmod=2")
59
- eq_match = re.match(r'^(\w+)\s*=\s*(\d+)$', item)
60
- if eq_match:
61
- filter_name = eq_match.group(1).lower()
62
- count = int(eq_match.group(2))
63
- filters.append(('link', filter_name, count))
64
- elif item.isdigit():
65
- # Pure index
66
- filters.append(('index', int(item), 1))
67
- else:
68
- # Filter name without count (default count=1)
69
- filters.append(('link', item.lower(), 1))
70
-
71
- # Calculate total count
72
- total = sum(f[2] for f in filters)
73
- if total > max_count:
74
- return [], search_query, f"最多选择{max_count}个结果 (当前选择了{total}个)"
75
-
76
- # Append filter names to search query
77
- # Extract filter names (only 'link' type, skip 'index' type)
78
- filter_names = [f[1] for f in filters if f[0] == 'link']
79
- if filter_names:
80
- # Append filter names to search query: "search_query filter1 filter2"
81
- search_query = f"{search_query} {' '.join(filter_names)}"
82
-
83
- return filters, search_query, None
@@ -1,251 +0,0 @@
1
- import random
2
- import string
3
- from typing import Dict, List, Any, Optional
4
-
5
- class HistoryManager:
6
- def __init__(self):
7
- self._history: Dict[str, List[Dict[str, Any]]] = {}
8
- self._metadata: Dict[str, Dict[str, Any]] = {}
9
- self._mapping: Dict[str, str] = {}
10
- self._context_latest: Dict[str, str] = {}
11
-
12
- # New: Short code management
13
- self._short_codes: Dict[str, str] = {} # code -> key
14
- self._key_to_code: Dict[str, str] = {} # key -> code
15
- self._context_history: Dict[str, List[str]] = {} # context_id -> list of keys
16
-
17
- def is_bot_message(self, message_id: str) -> bool:
18
- """Check if the message ID belongs to a bot message"""
19
- return message_id in self._history
20
-
21
- def generate_short_code(self) -> str:
22
- """Generate a unique 4-digit hex code"""
23
- while True:
24
- code = ''.join(random.choices(string.hexdigits.lower(), k=4))
25
- if code not in self._short_codes:
26
- return code
27
-
28
- def get_conversation_id(self, message_id: str) -> Optional[str]:
29
- return self._mapping.get(message_id)
30
-
31
- def get_key_by_code(self, code: str) -> Optional[str]:
32
- return self._short_codes.get(code.lower())
33
-
34
- def get_code_by_key(self, key: str) -> Optional[str]:
35
- return self._key_to_code.get(key)
36
-
37
- def get_history(self, key: str) -> List[Dict[str, Any]]:
38
- return self._history.get(key, [])
39
-
40
- def get_metadata(self, key: str) -> Dict[str, Any]:
41
- return self._metadata.get(key, {})
42
-
43
- def get_latest_from_context(self, context_id: str) -> Optional[str]:
44
- return self._context_latest.get(context_id)
45
-
46
- def list_by_context(self, context_id: str, limit: int = 10) -> List[str]:
47
- """Return list of keys for a context, most recent first"""
48
- keys = self._context_history.get(context_id, [])
49
- return keys[-limit:][::-1]
50
-
51
- def remember(self, message_id: Optional[str], history: List[Dict[str, Any]], related_ids: List[str], metadata: Optional[Dict[str, Any]] = None, context_id: Optional[str] = None, code: Optional[str] = None):
52
- if not message_id:
53
- return
54
-
55
- key = message_id
56
- self._history[key] = history
57
- if metadata:
58
- self._metadata[key] = metadata
59
-
60
- self._mapping[key] = key
61
- for rid in related_ids:
62
- if rid:
63
- self._mapping[rid] = key
64
-
65
- # Generate or use provided short code
66
- if key not in self._key_to_code:
67
- if not code:
68
- code = self.generate_short_code()
69
- self._short_codes[code] = key
70
- self._key_to_code[key] = code
71
-
72
- if context_id:
73
- self._context_latest[context_id] = key
74
- if context_id not in self._context_history:
75
- self._context_history[context_id] = []
76
- self._context_history[context_id].append(key)
77
-
78
- def save_to_disk(self, key: str, save_root: str = "data/conversations", image_path: Optional[str] = None, web_results: Optional[List[Dict]] = None, vision_trace: Optional[Dict] = None, instruct_traces: Optional[List[Dict]] = None):
79
- """Save conversation history to specific folder structure"""
80
- import os
81
- import time
82
- import re
83
- import shutil
84
- import json
85
-
86
- if key not in self._history and not web_results:
87
- return
88
-
89
- try:
90
- # Extract user's first message (question) for folder name
91
- user_question = "unknown_query"
92
- if key in self._history:
93
- for msg in self._history[key]:
94
- if msg.get("role") == "user":
95
- content = msg.get("content", "")
96
- if isinstance(content, list):
97
- for item in content:
98
- if isinstance(item, dict) and item.get("type") == "text":
99
- user_question = item.get("text", "")
100
- break
101
- else:
102
- user_question = str(content)
103
- break
104
-
105
- # Use raw query from first web result if available and no history (for pure search debug)
106
- if user_question == "unknown_query" and web_results and len(web_results) > 0:
107
- q = web_results[0].get("query", "")
108
- if q: user_question = q
109
-
110
- # Clean and truncate question
111
- question_part = re.sub(r'[\\/:*?"<>|\n\r\t]', '', user_question)[:20].strip()
112
- if not question_part:
113
- question_part = "conversation"
114
-
115
- # Create folder: YYYYMMDD_HHMMSS_question
116
- time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
117
- folder_name = f"{time_str}_{question_part}"
118
-
119
- # Auto-resolve relative paths to absolute if needed
120
- if not os.path.isabs(save_root):
121
- # Try to save next to the project root (assuming we are in src/...)
122
- # But safer to just use CWD
123
- save_root = os.path.abspath(save_root)
124
-
125
- folder_path = os.path.join(save_root, folder_name)
126
-
127
- os.makedirs(folder_path, exist_ok=True)
128
-
129
- meta = self._metadata.get(key, {})
130
-
131
- # 1. Save Context/Trace
132
- trace_md = meta.get("trace_markdown")
133
- if trace_md:
134
- with open(os.path.join(folder_path, "context_trace.md"), "w", encoding="utf-8") as f:
135
- f.write(trace_md)
136
-
137
- # 2. Save Web Results (Search & Pages)
138
- if web_results:
139
- pages_dir = os.path.join(folder_path, "pages")
140
- os.makedirs(pages_dir, exist_ok=True)
141
-
142
- search_buffer = [] # Buffer for unfetched search results
143
-
144
- for i, item in enumerate(web_results):
145
- item_type = item.get("_type", "unknown")
146
- title = item.get("title", "Untitled")
147
- url = item.get("url", "")
148
- content = item.get("content", "")
149
- item_id = item.get("_id", i + 1)
150
-
151
- if not content:
152
- continue
153
-
154
- if item_type == "search":
155
- # Collect search snippets for consolidated file
156
- search_buffer.append(f"## [{item_id}] {title}\n- **URL**: {url}\n\n{content}\n")
157
-
158
- elif item_type in ["page", "search_raw_page"]:
159
- # Save fetched pages/raw search pages individually
160
- clean_title = re.sub(r'[\\/:*?"<>|\n\r\t]', '', title)[:30].strip() or "page"
161
- filename = f"{item_id:02d}_{item_type}_{clean_title}.md"
162
-
163
- # Save screenshot if available
164
- screenshot_b64 = item.get("screenshot_b64")
165
- image_ref = ""
166
- if screenshot_b64:
167
- try:
168
- import base64
169
- img_filename = f"{item_id:02d}_{item_type}_{clean_title}.jpg"
170
- img_path = os.path.join(pages_dir, img_filename)
171
- with open(img_path, "wb") as f:
172
- f.write(base64.b64decode(screenshot_b64))
173
- image_ref = f"\n### Screenshot\n![Screenshot]({img_filename})\n"
174
- except Exception as e:
175
- print(f"Failed to save screenshot for {title}: {e}")
176
-
177
- page_md = f"# [{item_id}] {title}\n\n"
178
- page_md += f"- **Type**: {item_type}\n"
179
- page_md += f"- **URL**: {url}\n\n"
180
- if image_ref:
181
- page_md += f"{image_ref}\n"
182
- page_md += f"---\n\n{content}\n"
183
-
184
- with open(os.path.join(pages_dir, filename), "w", encoding="utf-8") as f:
185
- f.write(page_md)
186
-
187
- # Save consolidated search results
188
- if search_buffer:
189
- with open(os.path.join(folder_path, "search_results.md"), "w", encoding="utf-8") as f:
190
- f.write(f"# Search Results\n\nGenerated at {time.strftime('%Y-%m-%d %H:%M:%S')}\n\n" + "\n---\n\n".join(search_buffer))
191
-
192
- # 3. Save Final Response (MD)
193
- final_content = ""
194
- # Find last assistant message
195
- for msg in reversed(self._history[key]):
196
- if msg.get("role") == "assistant":
197
- content = msg.get("content", "")
198
- if isinstance(content, str):
199
- final_content = content
200
- break
201
-
202
- if final_content:
203
- with open(os.path.join(folder_path, "final_response.md"), "w", encoding="utf-8") as f:
204
- f.write(final_content)
205
-
206
- # Save Output Image (Final Card)
207
- if image_path and os.path.exists(image_path):
208
- try:
209
- dest_img_path = os.path.join(folder_path, "output_card.jpg")
210
- shutil.copy2(image_path, dest_img_path)
211
- except Exception as e:
212
- print(f"Failed to copy output image: {e}")
213
-
214
- # 4. Save Vision Log (if vision stage was used)
215
- if vision_trace and not vision_trace.get("skipped"):
216
- vision_md = "# Vision Stage Log\n\n"
217
- vision_md += f"- **Model**: {vision_trace.get('model', 'unknown')}\n"
218
- vision_md += f"- **Time**: {vision_trace.get('time', 0):.2f}s\n"
219
- vision_md += f"- **Images Count**: {vision_trace.get('images_count', 0)}\n"
220
- vision_md += f"- **Input Tokens**: {vision_trace.get('usage', {}).get('input_tokens', 0)}\n"
221
- vision_md += f"- **Output Tokens**: {vision_trace.get('usage', {}).get('output_tokens', 0)}\n\n"
222
- vision_md += "## Vision Description Output\n\n"
223
- vision_md += f"```\n{vision_trace.get('output', '')}\n```\n"
224
-
225
- with open(os.path.join(folder_path, "vision_log.md"), "w", encoding="utf-8") as f:
226
- f.write(vision_md)
227
-
228
- # 5. Save Instruct Log (all instruct rounds)
229
- if instruct_traces:
230
- instruct_md = "# Instruct Stage Log\n\n"
231
- for i, trace in enumerate(instruct_traces):
232
- stage_name = trace.get("stage_name", f"Round {i+1}")
233
- instruct_md += f"## {stage_name}\n\n"
234
- instruct_md += f"- **Model**: {trace.get('model', 'unknown')}\n"
235
- instruct_md += f"- **Time**: {trace.get('time', 0):.2f}s\n"
236
- instruct_md += f"- **Tool Calls**: {trace.get('tool_calls', 0)}\n"
237
- instruct_md += f"- **Input Tokens**: {trace.get('usage', {}).get('input_tokens', 0)}\n"
238
- instruct_md += f"- **Output Tokens**: {trace.get('usage', {}).get('output_tokens', 0)}\n\n"
239
-
240
- output = trace.get("output", "")
241
- if output:
242
- instruct_md += "### Reasoning Output\n\n"
243
- instruct_md += f"```\n{output}\n```\n\n"
244
-
245
- instruct_md += "---\n\n"
246
-
247
- with open(os.path.join(folder_path, "instruct_log.md"), "w", encoding="utf-8") as f:
248
- f.write(instruct_md)
249
-
250
- except Exception as e:
251
- print(f"Failed to save conversation: {e}")
entari_plugin_hyw/misc.py DELETED
@@ -1,214 +0,0 @@
1
- import json
2
- import base64
3
- import httpx
4
- import re
5
- import time
6
- from typing import Dict, Any, List, Optional
7
- from loguru import logger
8
- from arclet.entari import MessageChain, Image
9
- from typing import Tuple
10
- import asyncio
11
- from satori.exception import ActionFailed
12
-
13
- def process_onebot_json(data: Dict[str, Any]) -> str:
14
- """Process OneBot JSON elements"""
15
- try:
16
- if "data" in data:
17
- json_str = data["data"]
18
- if isinstance(json_str, str):
19
- json_str = json_str.replace("&quot;", '"').replace("&#44;", ",")
20
- content = json.loads(json_str)
21
- if "meta" in content and "detail_1" in content["meta"]:
22
- detail = content["meta"]["detail_1"]
23
- if "desc" in detail and "qqdocurl" in detail:
24
- return f"[Shared Document] {detail['desc']}: {detail['qqdocurl']}"
25
- except Exception as e:
26
- logger.warning(f"Failed to process JSON element: {e}")
27
- return ""
28
-
29
-
30
- async def download_image(url: str) -> bytes:
31
- """下载图片"""
32
- try:
33
- async with httpx.AsyncClient(timeout=30.0) as client:
34
- resp = await client.get(url)
35
- if resp.status_code == 200:
36
- return resp.content
37
- else:
38
- raise ActionFailed(f"下载图片失败,状态码: {resp.status_code}")
39
- except Exception as e:
40
- raise ActionFailed(f"下载图片失败: {url}, 错误: {str(e)}")
41
-
42
- async def process_images(mc: MessageChain, vision_model: Optional[str] = None) -> Tuple[List[str], Optional[str]]:
43
- # If vision model is explicitly set to "off", skip image processing
44
- if vision_model == "off":
45
- return [], None
46
-
47
- has_images = bool(mc.get(Image))
48
- images = []
49
- if has_images:
50
- urls = mc[Image].map(lambda x: x.src)
51
- tasks = [download_image(url) for url in urls]
52
- raw_images = await asyncio.gather(*tasks)
53
- import base64
54
- images = [base64.b64encode(img).decode('utf-8') for img in raw_images]
55
-
56
- return images, None
57
-
58
-
59
- def resolve_model_name(name: str, models_config: List[Dict[str, Any]]) -> Tuple[Optional[str], Optional[str]]:
60
- """
61
- Resolve a user input model name to the full API model name from config.
62
- Supports partial matching if unique.
63
- """
64
- if not name:
65
- return None, "No model name provided"
66
-
67
- name = name.lower()
68
-
69
- # 1. Exact match (name or id or shortname)
70
- for m in models_config:
71
- if m.get("name") == name or m.get("id") == name:
72
- return m.get("name"), None
73
-
74
- # 2. Key/Shortcut match
75
- # Assuming the config might have keys like 'gpt4' mapping to full name
76
- # But usually models list is [{'name': '...', 'provider': '...'}, ...]
77
-
78
- # Check if 'name' matches any model 'name' partially?
79
- # Or just return the name itself if it looks like a valid model ID (contains / or -)
80
- if "/" in name or "-" in name or "." in name:
81
- return name, None
82
-
83
- # If not found in config specific list, and doesn't look like an ID, maybe return error
84
- # But let's look for partial match in config names
85
- matches = [m["name"] for m in models_config if name in m.get("name", "").lower()]
86
- if len(matches) == 1:
87
- return matches[0], None
88
- elif len(matches) > 1:
89
- return None, f"Model name '{name}' is ambiguous. Matches: {', '.join(matches[:3])}..."
90
-
91
- # Default: assume it's a valid ID passed directly
92
- return name, None
93
-
94
-
95
- # Hardcoded markdown for refuse answer
96
- REFUSE_ANSWER_MARKDOWN = """
97
- <summary>
98
- Instruct 专家分配此任务流程失败,请尝试提出其他问题~
99
- </summary>
100
- """
101
-
102
-
103
- async def render_refuse_answer(
104
- renderer,
105
- output_path: str,
106
- reason: str = "Instruct 专家分配此任务流程失败,请尝试提出其他问题~",
107
- theme_color: str = "#ef4444",
108
- tab_id: str = None,
109
- ) -> bool:
110
- """
111
- Render a refuse-to-answer image using the provided reason.
112
-
113
- Args:
114
- renderer: ContentRenderer instance
115
- output_path: Path to save the output image
116
- reason: The refusal reason to display
117
- theme_color: Theme color for the card
118
- tab_id: Optional tab ID for reusing a prepared browser tab
119
-
120
- Returns:
121
- True if render succeeded, False otherwise
122
- """
123
- markdown = f"""
124
- # 任务中止
125
-
126
- > {reason}
127
- """
128
- return await renderer.render(
129
- markdown_content=markdown,
130
- output_path=output_path,
131
- stats={},
132
- references=[],
133
- page_references=[],
134
- image_references=[],
135
- stages_used=[],
136
- image_timeout=1000,
137
- theme_color=theme_color,
138
- tab_id=tab_id,
139
- )
140
-
141
-
142
- IMAGE_UNSUPPORTED_MARKDOWN = """
143
- <summary>
144
- 当前模型不支持图片输入,请使用支持视觉能力的模型或仅发送文本。
145
- </summary>
146
- """
147
-
148
- async def render_image_unsupported(
149
- renderer,
150
- output_path: str,
151
- theme_color: str = "#ef4444",
152
- tab_id: str = None
153
- ) -> bool:
154
- """
155
- Render a card indicating that the model does not support image input.
156
- """
157
- markdown = f"""
158
- # 图片输入不支持
159
-
160
- > 当前选择的模型不支持图片输入。
161
- > 请切换到支持视觉的模型,或仅发送文本内容。
162
- """
163
- return await renderer.render(
164
- markdown_content=markdown,
165
- output_path=output_path,
166
- stats={},
167
- references=[],
168
- page_references=[],
169
- image_references=[],
170
- stages_used=[],
171
- image_timeout=1000,
172
- theme_color=theme_color,
173
- tab_id=tab_id
174
- )
175
-
176
-
177
- def parse_color(color: str) -> str:
178
- """Parse color string to hex format."""
179
- if not color:
180
- return "#ef4444"
181
- color = str(color).strip()
182
- if color.startswith('#') and len(color) in [4, 7]:
183
- return color
184
- if re.match(r'^[0-9a-fA-F]{6}$', color):
185
- return f'#{color}'
186
- rgb_match = re.match(r'^\(?(\d+)[,\s]+(\d+)[,\s]+(\d+)\)?$', color)
187
- if rgb_match:
188
- r, g, b = (max(0, min(255, int(x))) for x in rgb_match.groups())
189
- return f'#{r:02x}{g:02x}{b:02x}'
190
- return "#ef4444"
191
-
192
-
193
- class RecentEventDeduper:
194
- """Deduplicates recent events based on a key with TTL."""
195
-
196
- def __init__(self, ttl_seconds: float = 30.0, max_size: int = 2048):
197
- self.ttl_seconds = ttl_seconds
198
- self.max_size = max_size
199
- self._seen: Dict[str, float] = {}
200
-
201
- def seen_recently(self, key: str) -> bool:
202
- now = time.time()
203
- if len(self._seen) > self.max_size:
204
- self._prune(now)
205
- ts = self._seen.get(key)
206
- if ts is None or now - ts > self.ttl_seconds:
207
- self._seen[key] = now
208
- return False
209
- return True
210
-
211
- def _prune(self, now: float):
212
- expired = [k for k, ts in self._seen.items() if now - ts > self.ttl_seconds]
213
- for k in expired:
214
- self._seen.pop(k, None)