pomera-ai-commander 0.1.0 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +105 -680
- package/bin/pomera-ai-commander.js +62 -62
- package/core/__init__.py +65 -65
- package/core/app_context.py +482 -482
- package/core/async_text_processor.py +421 -421
- package/core/backup_manager.py +655 -655
- package/core/backup_recovery_manager.py +1033 -1033
- package/core/content_hash_cache.py +508 -508
- package/core/context_menu.py +313 -313
- package/core/data_validator.py +1066 -1066
- package/core/database_connection_manager.py +744 -744
- package/core/database_curl_settings_manager.py +608 -608
- package/core/database_promera_ai_settings_manager.py +446 -446
- package/core/database_schema.py +411 -411
- package/core/database_schema_manager.py +395 -395
- package/core/database_settings_manager.py +1507 -1507
- package/core/database_settings_manager_interface.py +456 -456
- package/core/dialog_manager.py +734 -734
- package/core/efficient_line_numbers.py +510 -510
- package/core/error_handler.py +746 -746
- package/core/error_service.py +431 -431
- package/core/event_consolidator.py +511 -511
- package/core/mcp/__init__.py +43 -43
- package/core/mcp/protocol.py +288 -288
- package/core/mcp/schema.py +251 -251
- package/core/mcp/server_stdio.py +299 -299
- package/core/mcp/tool_registry.py +2372 -2345
- package/core/memory_efficient_text_widget.py +711 -711
- package/core/migration_manager.py +914 -914
- package/core/migration_test_suite.py +1085 -1085
- package/core/migration_validator.py +1143 -1143
- package/core/optimized_find_replace.py +714 -714
- package/core/optimized_pattern_engine.py +424 -424
- package/core/optimized_search_highlighter.py +552 -552
- package/core/performance_monitor.py +674 -674
- package/core/persistence_manager.py +712 -712
- package/core/progressive_stats_calculator.py +632 -632
- package/core/regex_pattern_cache.py +529 -529
- package/core/regex_pattern_library.py +350 -350
- package/core/search_operation_manager.py +434 -434
- package/core/settings_defaults_registry.py +1087 -1087
- package/core/settings_integrity_validator.py +1111 -1111
- package/core/settings_serializer.py +557 -557
- package/core/settings_validator.py +1823 -1823
- package/core/smart_stats_calculator.py +709 -709
- package/core/statistics_update_manager.py +619 -619
- package/core/stats_config_manager.py +858 -858
- package/core/streaming_text_handler.py +723 -723
- package/core/task_scheduler.py +596 -596
- package/core/update_pattern_library.py +168 -168
- package/core/visibility_monitor.py +596 -596
- package/core/widget_cache.py +498 -498
- package/mcp.json +51 -61
- package/package.json +61 -57
- package/pomera.py +7482 -7482
- package/pomera_mcp_server.py +183 -144
- package/requirements.txt +32 -0
- package/tools/__init__.py +4 -4
- package/tools/ai_tools.py +2891 -2891
- package/tools/ascii_art_generator.py +352 -352
- package/tools/base64_tools.py +183 -183
- package/tools/base_tool.py +511 -511
- package/tools/case_tool.py +308 -308
- package/tools/column_tools.py +395 -395
- package/tools/cron_tool.py +884 -884
- package/tools/curl_history.py +600 -600
- package/tools/curl_processor.py +1207 -1207
- package/tools/curl_settings.py +502 -502
- package/tools/curl_tool.py +5467 -5467
- package/tools/diff_viewer.py +1071 -1071
- package/tools/email_extraction_tool.py +248 -248
- package/tools/email_header_analyzer.py +425 -425
- package/tools/extraction_tools.py +250 -250
- package/tools/find_replace.py +1750 -1750
- package/tools/folder_file_reporter.py +1463 -1463
- package/tools/folder_file_reporter_adapter.py +480 -480
- package/tools/generator_tools.py +1216 -1216
- package/tools/hash_generator.py +255 -255
- package/tools/html_tool.py +656 -656
- package/tools/jsonxml_tool.py +729 -729
- package/tools/line_tools.py +419 -419
- package/tools/markdown_tools.py +561 -561
- package/tools/mcp_widget.py +1417 -1417
- package/tools/notes_widget.py +973 -973
- package/tools/number_base_converter.py +372 -372
- package/tools/regex_extractor.py +571 -571
- package/tools/slug_generator.py +310 -310
- package/tools/sorter_tools.py +458 -458
- package/tools/string_escape_tool.py +392 -392
- package/tools/text_statistics_tool.py +365 -365
- package/tools/text_wrapper.py +430 -430
- package/tools/timestamp_converter.py +421 -421
- package/tools/tool_loader.py +710 -710
- package/tools/translator_tools.py +522 -522
- package/tools/url_link_extractor.py +261 -261
- package/tools/url_parser.py +204 -204
- package/tools/whitespace_tools.py +355 -355
- package/tools/word_frequency_counter.py +146 -146
- package/core/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/__pycache__/app_context.cpython-313.pyc +0 -0
- package/core/__pycache__/async_text_processor.cpython-313.pyc +0 -0
- package/core/__pycache__/backup_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/backup_recovery_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/content_hash_cache.cpython-313.pyc +0 -0
- package/core/__pycache__/context_menu.cpython-313.pyc +0 -0
- package/core/__pycache__/data_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/database_connection_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_curl_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_promera_ai_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_schema.cpython-313.pyc +0 -0
- package/core/__pycache__/database_schema_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_settings_manager_interface.cpython-313.pyc +0 -0
- package/core/__pycache__/dialog_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/efficient_line_numbers.cpython-313.pyc +0 -0
- package/core/__pycache__/error_handler.cpython-313.pyc +0 -0
- package/core/__pycache__/error_service.cpython-313.pyc +0 -0
- package/core/__pycache__/event_consolidator.cpython-313.pyc +0 -0
- package/core/__pycache__/memory_efficient_text_widget.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_test_suite.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_find_replace.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_pattern_engine.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_search_highlighter.cpython-313.pyc +0 -0
- package/core/__pycache__/performance_monitor.cpython-313.pyc +0 -0
- package/core/__pycache__/persistence_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/progressive_stats_calculator.cpython-313.pyc +0 -0
- package/core/__pycache__/regex_pattern_cache.cpython-313.pyc +0 -0
- package/core/__pycache__/regex_pattern_library.cpython-313.pyc +0 -0
- package/core/__pycache__/search_operation_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_defaults_registry.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_integrity_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_serializer.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/smart_stats_calculator.cpython-313.pyc +0 -0
- package/core/__pycache__/statistics_update_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/stats_config_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/streaming_text_handler.cpython-313.pyc +0 -0
- package/core/__pycache__/task_scheduler.cpython-313.pyc +0 -0
- package/core/__pycache__/visibility_monitor.cpython-313.pyc +0 -0
- package/core/__pycache__/widget_cache.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/protocol.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/schema.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/server_stdio.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/tool_registry.cpython-313.pyc +0 -0
- package/tools/__pycache__/__init__.cpython-313.pyc +0 -0
- package/tools/__pycache__/ai_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/ascii_art_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/base64_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/base_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/case_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/column_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/cron_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_history.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_processor.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_settings.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/diff_viewer.cpython-313.pyc +0 -0
- package/tools/__pycache__/email_extraction_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/email_header_analyzer.cpython-313.pyc +0 -0
- package/tools/__pycache__/extraction_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/find_replace.cpython-313.pyc +0 -0
- package/tools/__pycache__/folder_file_reporter.cpython-313.pyc +0 -0
- package/tools/__pycache__/folder_file_reporter_adapter.cpython-313.pyc +0 -0
- package/tools/__pycache__/generator_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/hash_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/html_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/huggingface_helper.cpython-313.pyc +0 -0
- package/tools/__pycache__/jsonxml_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/line_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/list_comparator.cpython-313.pyc +0 -0
- package/tools/__pycache__/markdown_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/mcp_widget.cpython-313.pyc +0 -0
- package/tools/__pycache__/notes_widget.cpython-313.pyc +0 -0
- package/tools/__pycache__/number_base_converter.cpython-313.pyc +0 -0
- package/tools/__pycache__/regex_extractor.cpython-313.pyc +0 -0
- package/tools/__pycache__/slug_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/sorter_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/string_escape_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/text_statistics_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/text_wrapper.cpython-313.pyc +0 -0
- package/tools/__pycache__/timestamp_converter.cpython-313.pyc +0 -0
- package/tools/__pycache__/tool_loader.cpython-313.pyc +0 -0
- package/tools/__pycache__/translator_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/url_link_extractor.cpython-313.pyc +0 -0
- package/tools/__pycache__/url_parser.cpython-313.pyc +0 -0
- package/tools/__pycache__/whitespace_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/word_frequency_counter.cpython-313.pyc +0 -0
|
@@ -1,2345 +1,2372 @@
|
|
|
1
|
-
"""
|
|
2
|
-
MCP Tool Registry - Maps Pomera tools to MCP tool definitions
|
|
3
|
-
|
|
4
|
-
This module provides:
|
|
5
|
-
- MCPToolAdapter: Wrapper for Pomera tools to expose them via MCP
|
|
6
|
-
- ToolRegistry: Central registry for all MCP-exposed tools
|
|
7
|
-
|
|
8
|
-
Tools are registered with their input schemas and handlers,
|
|
9
|
-
allowing external MCP clients to discover and execute them.
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
import logging
|
|
13
|
-
from typing import Dict, Any, List, Callable, Optional
|
|
14
|
-
from dataclasses import dataclass
|
|
15
|
-
|
|
16
|
-
from .schema import MCPTool, MCPToolResult
|
|
17
|
-
|
|
18
|
-
logger = logging.getLogger(__name__)
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
@dataclass
|
|
22
|
-
class MCPToolAdapter:
|
|
23
|
-
"""
|
|
24
|
-
Adapter that wraps a Pomera tool for MCP exposure.
|
|
25
|
-
|
|
26
|
-
Attributes:
|
|
27
|
-
name: MCP tool name (e.g., 'pomera_case_transform')
|
|
28
|
-
description: Human-readable description
|
|
29
|
-
input_schema: JSON Schema for input validation
|
|
30
|
-
handler: Function that executes the tool
|
|
31
|
-
"""
|
|
32
|
-
name: str
|
|
33
|
-
description: str
|
|
34
|
-
input_schema: Dict[str, Any]
|
|
35
|
-
handler: Callable[[Dict[str, Any]], str]
|
|
36
|
-
|
|
37
|
-
def to_mcp_tool(self) -> MCPTool:
|
|
38
|
-
"""Convert to MCPTool definition."""
|
|
39
|
-
return MCPTool(
|
|
40
|
-
name=self.name,
|
|
41
|
-
description=self.description,
|
|
42
|
-
inputSchema=self.input_schema
|
|
43
|
-
)
|
|
44
|
-
|
|
45
|
-
def execute(self, arguments: Dict[str, Any]) -> MCPToolResult:
|
|
46
|
-
"""
|
|
47
|
-
Execute the tool with given arguments.
|
|
48
|
-
|
|
49
|
-
Args:
|
|
50
|
-
arguments: Tool arguments matching input_schema
|
|
51
|
-
|
|
52
|
-
Returns:
|
|
53
|
-
MCPToolResult with execution output
|
|
54
|
-
"""
|
|
55
|
-
try:
|
|
56
|
-
result = self.handler(arguments)
|
|
57
|
-
return MCPToolResult.text(result)
|
|
58
|
-
except Exception as e:
|
|
59
|
-
logger.exception(f"Tool execution failed: {self.name}")
|
|
60
|
-
return MCPToolResult.error(f"Tool execution failed: {str(e)}")
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
class ToolRegistry:
|
|
64
|
-
"""
|
|
65
|
-
Central registry for MCP-exposed tools.
|
|
66
|
-
|
|
67
|
-
Manages tool registration, discovery, and execution.
|
|
68
|
-
Automatically registers built-in Pomera tools on initialization.
|
|
69
|
-
"""
|
|
70
|
-
|
|
71
|
-
def __init__(self, register_builtins: bool = True):
|
|
72
|
-
"""
|
|
73
|
-
Initialize the tool registry.
|
|
74
|
-
|
|
75
|
-
Args:
|
|
76
|
-
register_builtins: Whether to register built-in tools
|
|
77
|
-
"""
|
|
78
|
-
self._tools: Dict[str, MCPToolAdapter] = {}
|
|
79
|
-
self._logger = logging.getLogger(__name__)
|
|
80
|
-
|
|
81
|
-
if register_builtins:
|
|
82
|
-
self._register_builtin_tools()
|
|
83
|
-
|
|
84
|
-
def register(self, adapter: MCPToolAdapter) -> None:
|
|
85
|
-
"""
|
|
86
|
-
Register a tool adapter.
|
|
87
|
-
|
|
88
|
-
Args:
|
|
89
|
-
adapter: MCPToolAdapter to register
|
|
90
|
-
"""
|
|
91
|
-
self._tools[adapter.name] = adapter
|
|
92
|
-
self._logger.info(f"Registered MCP tool: {adapter.name}")
|
|
93
|
-
|
|
94
|
-
def unregister(self, name: str) -> bool:
|
|
95
|
-
"""
|
|
96
|
-
Unregister a tool by name.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
name: Tool name to unregister
|
|
100
|
-
|
|
101
|
-
Returns:
|
|
102
|
-
True if tool was removed, False if not found
|
|
103
|
-
"""
|
|
104
|
-
if name in self._tools:
|
|
105
|
-
del self._tools[name]
|
|
106
|
-
self._logger.info(f"Unregistered MCP tool: {name}")
|
|
107
|
-
return True
|
|
108
|
-
return False
|
|
109
|
-
|
|
110
|
-
def get_tool(self, name: str) -> Optional[MCPToolAdapter]:
|
|
111
|
-
"""
|
|
112
|
-
Get a tool adapter by name.
|
|
113
|
-
|
|
114
|
-
Args:
|
|
115
|
-
name: Tool name
|
|
116
|
-
|
|
117
|
-
Returns:
|
|
118
|
-
MCPToolAdapter or None if not found
|
|
119
|
-
"""
|
|
120
|
-
return self._tools.get(name)
|
|
121
|
-
|
|
122
|
-
def list_tools(self) -> List[MCPTool]:
|
|
123
|
-
"""
|
|
124
|
-
Get list of all registered tools as MCPTool definitions.
|
|
125
|
-
|
|
126
|
-
Returns:
|
|
127
|
-
List of MCPTool objects
|
|
128
|
-
"""
|
|
129
|
-
return [adapter.to_mcp_tool() for adapter in self._tools.values()]
|
|
130
|
-
|
|
131
|
-
def execute(self, name: str, arguments: Dict[str, Any]) -> MCPToolResult:
|
|
132
|
-
"""
|
|
133
|
-
Execute a tool by name.
|
|
134
|
-
|
|
135
|
-
Args:
|
|
136
|
-
name: Tool name
|
|
137
|
-
arguments: Tool arguments
|
|
138
|
-
|
|
139
|
-
Returns:
|
|
140
|
-
MCPToolResult with execution output
|
|
141
|
-
|
|
142
|
-
Raises:
|
|
143
|
-
KeyError: If tool not found
|
|
144
|
-
"""
|
|
145
|
-
adapter = self._tools.get(name)
|
|
146
|
-
if adapter is None:
|
|
147
|
-
return MCPToolResult.error(f"Tool not found: {name}")
|
|
148
|
-
|
|
149
|
-
return adapter.execute(arguments)
|
|
150
|
-
|
|
151
|
-
def get_tool_names(self) -> List[str]:
|
|
152
|
-
"""Get list of all registered tool names."""
|
|
153
|
-
return list(self._tools.keys())
|
|
154
|
-
|
|
155
|
-
def __len__(self) -> int:
|
|
156
|
-
"""Return number of registered tools."""
|
|
157
|
-
return len(self._tools)
|
|
158
|
-
|
|
159
|
-
def __contains__(self, name: str) -> bool:
|
|
160
|
-
"""Check if tool is registered."""
|
|
161
|
-
return name in self._tools
|
|
162
|
-
|
|
163
|
-
# =========================================================================
|
|
164
|
-
# Built-in Tool Registration
|
|
165
|
-
# =========================================================================
|
|
166
|
-
|
|
167
|
-
def _register_builtin_tools(self) -> None:
|
|
168
|
-
"""Register all built-in Pomera tools."""
|
|
169
|
-
# Core text transformation tools
|
|
170
|
-
self._register_case_tool()
|
|
171
|
-
self.
|
|
172
|
-
self.
|
|
173
|
-
self.
|
|
174
|
-
self.
|
|
175
|
-
self.
|
|
176
|
-
self.
|
|
177
|
-
self.
|
|
178
|
-
self.
|
|
179
|
-
self.
|
|
180
|
-
self.
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
self.
|
|
186
|
-
self.
|
|
187
|
-
self.
|
|
188
|
-
self.
|
|
189
|
-
self.
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
self.
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
self.
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
"
|
|
218
|
-
"description": "
|
|
219
|
-
},
|
|
220
|
-
"
|
|
221
|
-
"type": "string",
|
|
222
|
-
"
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
"text": {
|
|
267
|
-
"type": "string",
|
|
268
|
-
"description": "
|
|
269
|
-
},
|
|
270
|
-
"
|
|
271
|
-
"type": "string",
|
|
272
|
-
"
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
"
|
|
301
|
-
"
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
}
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
return
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
"
|
|
411
|
-
"
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
"
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
"
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
"
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
operation
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
return
|
|
448
|
-
elif operation == "
|
|
449
|
-
return
|
|
450
|
-
elif operation == "
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
"description": "
|
|
473
|
-
},
|
|
474
|
-
"
|
|
475
|
-
"type": "string",
|
|
476
|
-
"enum": ["
|
|
477
|
-
|
|
478
|
-
"
|
|
479
|
-
}
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
if operation
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
"
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
"
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
"
|
|
576
|
-
"type": "
|
|
577
|
-
"description": "
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
"description": "
|
|
633
|
-
},
|
|
634
|
-
"
|
|
635
|
-
"type": "integer",
|
|
636
|
-
"description": "
|
|
637
|
-
"default":
|
|
638
|
-
}
|
|
639
|
-
},
|
|
640
|
-
"required": ["text"
|
|
641
|
-
},
|
|
642
|
-
handler=self.
|
|
643
|
-
))
|
|
644
|
-
|
|
645
|
-
def
|
|
646
|
-
"""Handle
|
|
647
|
-
import
|
|
648
|
-
import
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
}
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
"
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
"
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
if
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
"
|
|
909
|
-
"
|
|
910
|
-
}
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
"
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
"
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
"
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
"
|
|
1067
|
-
"
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
"
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
"
|
|
1078
|
-
"
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
input_schema={
|
|
1161
|
-
"type": "object",
|
|
1162
|
-
"properties": {
|
|
1163
|
-
"
|
|
1164
|
-
"type": "string",
|
|
1165
|
-
"description": "
|
|
1166
|
-
},
|
|
1167
|
-
"operation": {
|
|
1168
|
-
"type": "string",
|
|
1169
|
-
"enum": ["
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
"
|
|
1175
|
-
"
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
return
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
"
|
|
1320
|
-
"
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
},
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
""
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
)
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
""
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
|
|
1508
|
-
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
"
|
|
1537
|
-
"type": "string",
|
|
1538
|
-
"
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
"
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
"
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
"
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
"
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
"
|
|
1647
|
-
"
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
"
|
|
1652
|
-
"description": "
|
|
1653
|
-
"default":
|
|
1654
|
-
}
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
|
|
1674
|
-
|
|
1675
|
-
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
|
|
1689
|
-
|
|
1690
|
-
|
|
1691
|
-
|
|
1692
|
-
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
"
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
"
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
"
|
|
1741
|
-
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1755
|
-
|
|
1756
|
-
|
|
1757
|
-
|
|
1758
|
-
|
|
1759
|
-
|
|
1760
|
-
|
|
1761
|
-
|
|
1762
|
-
|
|
1763
|
-
|
|
1764
|
-
|
|
1765
|
-
|
|
1766
|
-
|
|
1767
|
-
|
|
1768
|
-
|
|
1769
|
-
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
|
|
1778
|
-
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
|
|
1787
|
-
"
|
|
1788
|
-
"type": "string",
|
|
1789
|
-
"
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
"
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
|
|
1812
|
-
|
|
1813
|
-
|
|
1814
|
-
|
|
1815
|
-
|
|
1816
|
-
|
|
1817
|
-
|
|
1818
|
-
|
|
1819
|
-
|
|
1820
|
-
|
|
1821
|
-
|
|
1822
|
-
|
|
1823
|
-
|
|
1824
|
-
|
|
1825
|
-
|
|
1826
|
-
|
|
1827
|
-
|
|
1828
|
-
|
|
1829
|
-
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
return
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
|
|
1845
|
-
|
|
1846
|
-
|
|
1847
|
-
|
|
1848
|
-
|
|
1849
|
-
|
|
1850
|
-
|
|
1851
|
-
|
|
1852
|
-
|
|
1853
|
-
|
|
1854
|
-
|
|
1855
|
-
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
|
|
1860
|
-
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
1889
|
-
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
conn
|
|
1896
|
-
|
|
1897
|
-
if
|
|
1898
|
-
|
|
1899
|
-
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
|
|
1903
|
-
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
|
|
1916
|
-
|
|
1917
|
-
|
|
1918
|
-
|
|
1919
|
-
|
|
1920
|
-
|
|
1921
|
-
|
|
1922
|
-
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
|
|
1926
|
-
|
|
1927
|
-
|
|
1928
|
-
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1933
|
-
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
|
|
1940
|
-
|
|
1941
|
-
|
|
1942
|
-
|
|
1943
|
-
|
|
1944
|
-
|
|
1945
|
-
|
|
1946
|
-
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
|
|
1982
|
-
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
|
|
1986
|
-
|
|
1987
|
-
|
|
1988
|
-
|
|
1989
|
-
|
|
1990
|
-
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
|
|
2008
|
-
|
|
2009
|
-
|
|
2010
|
-
|
|
2011
|
-
|
|
2012
|
-
|
|
2013
|
-
|
|
2014
|
-
|
|
2015
|
-
|
|
2016
|
-
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
|
|
2020
|
-
|
|
2021
|
-
|
|
2022
|
-
|
|
2023
|
-
|
|
2024
|
-
|
|
2025
|
-
|
|
2026
|
-
|
|
2027
|
-
|
|
2028
|
-
|
|
2029
|
-
|
|
2030
|
-
|
|
2031
|
-
|
|
2032
|
-
|
|
2033
|
-
|
|
2034
|
-
|
|
2035
|
-
|
|
2036
|
-
|
|
2037
|
-
|
|
2038
|
-
|
|
2039
|
-
|
|
2040
|
-
|
|
2041
|
-
|
|
2042
|
-
|
|
2043
|
-
|
|
2044
|
-
|
|
2045
|
-
|
|
2046
|
-
|
|
2047
|
-
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
|
|
2053
|
-
conn.
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
|
|
2065
|
-
|
|
2066
|
-
|
|
2067
|
-
|
|
2068
|
-
|
|
2069
|
-
|
|
2070
|
-
|
|
2071
|
-
|
|
2072
|
-
|
|
2073
|
-
|
|
2074
|
-
|
|
2075
|
-
|
|
2076
|
-
|
|
2077
|
-
|
|
2078
|
-
|
|
2079
|
-
|
|
2080
|
-
|
|
2081
|
-
|
|
2082
|
-
|
|
2083
|
-
|
|
2084
|
-
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
|
|
2095
|
-
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
"
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2104
|
-
|
|
2105
|
-
|
|
2106
|
-
|
|
2107
|
-
|
|
2108
|
-
|
|
2109
|
-
|
|
2110
|
-
|
|
2111
|
-
|
|
2112
|
-
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
|
|
2116
|
-
|
|
2117
|
-
|
|
2118
|
-
|
|
2119
|
-
|
|
2120
|
-
|
|
2121
|
-
|
|
2122
|
-
|
|
2123
|
-
|
|
2124
|
-
|
|
2125
|
-
|
|
2126
|
-
|
|
2127
|
-
|
|
2128
|
-
|
|
2129
|
-
|
|
2130
|
-
|
|
2131
|
-
|
|
2132
|
-
|
|
2133
|
-
|
|
2134
|
-
|
|
2135
|
-
|
|
2136
|
-
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
|
|
2141
|
-
|
|
2142
|
-
|
|
2143
|
-
|
|
2144
|
-
|
|
2145
|
-
|
|
2146
|
-
|
|
2147
|
-
|
|
2148
|
-
|
|
2149
|
-
|
|
2150
|
-
|
|
2151
|
-
|
|
2152
|
-
|
|
2153
|
-
"
|
|
2154
|
-
"
|
|
2155
|
-
},
|
|
2156
|
-
"
|
|
2157
|
-
"type": "
|
|
2158
|
-
"
|
|
2159
|
-
|
|
2160
|
-
|
|
2161
|
-
|
|
2162
|
-
|
|
2163
|
-
|
|
2164
|
-
"
|
|
2165
|
-
|
|
2166
|
-
|
|
2167
|
-
|
|
2168
|
-
|
|
2169
|
-
"
|
|
2170
|
-
|
|
2171
|
-
|
|
2172
|
-
|
|
2173
|
-
|
|
2174
|
-
"
|
|
2175
|
-
|
|
2176
|
-
|
|
2177
|
-
|
|
2178
|
-
|
|
2179
|
-
"
|
|
2180
|
-
|
|
2181
|
-
|
|
2182
|
-
|
|
2183
|
-
|
|
2184
|
-
"
|
|
2185
|
-
|
|
2186
|
-
|
|
2187
|
-
|
|
2188
|
-
|
|
2189
|
-
"
|
|
2190
|
-
|
|
2191
|
-
|
|
2192
|
-
|
|
2193
|
-
|
|
2194
|
-
|
|
2195
|
-
|
|
2196
|
-
|
|
2197
|
-
|
|
2198
|
-
|
|
2199
|
-
|
|
2200
|
-
|
|
2201
|
-
|
|
2202
|
-
|
|
2203
|
-
|
|
2204
|
-
|
|
2205
|
-
|
|
2206
|
-
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
|
|
2210
|
-
|
|
2211
|
-
|
|
2212
|
-
|
|
2213
|
-
|
|
2214
|
-
|
|
2215
|
-
|
|
2216
|
-
|
|
2217
|
-
|
|
2218
|
-
|
|
2219
|
-
|
|
2220
|
-
|
|
2221
|
-
|
|
2222
|
-
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2227
|
-
|
|
2228
|
-
|
|
2229
|
-
|
|
2230
|
-
|
|
2231
|
-
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2238
|
-
|
|
2239
|
-
|
|
2240
|
-
|
|
2241
|
-
|
|
2242
|
-
|
|
2243
|
-
|
|
2244
|
-
|
|
2245
|
-
|
|
2246
|
-
|
|
2247
|
-
|
|
2248
|
-
|
|
2249
|
-
|
|
2250
|
-
|
|
2251
|
-
|
|
2252
|
-
|
|
2253
|
-
|
|
2254
|
-
|
|
2255
|
-
|
|
2256
|
-
|
|
2257
|
-
|
|
2258
|
-
|
|
2259
|
-
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
|
|
2269
|
-
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2274
|
-
|
|
2275
|
-
|
|
2276
|
-
|
|
2277
|
-
|
|
2278
|
-
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
|
|
2283
|
-
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
|
|
2288
|
-
|
|
2289
|
-
|
|
2290
|
-
|
|
2291
|
-
|
|
2292
|
-
|
|
2293
|
-
|
|
2294
|
-
|
|
2295
|
-
|
|
2296
|
-
|
|
2297
|
-
|
|
2298
|
-
|
|
2299
|
-
|
|
2300
|
-
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
|
|
2305
|
-
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
|
|
2322
|
-
|
|
2323
|
-
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
|
-
|
|
2327
|
-
|
|
2328
|
-
|
|
2329
|
-
|
|
2330
|
-
|
|
2331
|
-
|
|
2332
|
-
|
|
2333
|
-
|
|
2334
|
-
|
|
2335
|
-
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
|
|
2339
|
-
|
|
2340
|
-
|
|
2341
|
-
|
|
2342
|
-
|
|
2343
|
-
|
|
2344
|
-
|
|
2345
|
-
|
|
1
|
+
"""
|
|
2
|
+
MCP Tool Registry - Maps Pomera tools to MCP tool definitions
|
|
3
|
+
|
|
4
|
+
This module provides:
|
|
5
|
+
- MCPToolAdapter: Wrapper for Pomera tools to expose them via MCP
|
|
6
|
+
- ToolRegistry: Central registry for all MCP-exposed tools
|
|
7
|
+
|
|
8
|
+
Tools are registered with their input schemas and handlers,
|
|
9
|
+
allowing external MCP clients to discover and execute them.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import logging
|
|
13
|
+
from typing import Dict, Any, List, Callable, Optional
|
|
14
|
+
from dataclasses import dataclass
|
|
15
|
+
|
|
16
|
+
from .schema import MCPTool, MCPToolResult
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class MCPToolAdapter:
|
|
23
|
+
"""
|
|
24
|
+
Adapter that wraps a Pomera tool for MCP exposure.
|
|
25
|
+
|
|
26
|
+
Attributes:
|
|
27
|
+
name: MCP tool name (e.g., 'pomera_case_transform')
|
|
28
|
+
description: Human-readable description
|
|
29
|
+
input_schema: JSON Schema for input validation
|
|
30
|
+
handler: Function that executes the tool
|
|
31
|
+
"""
|
|
32
|
+
name: str
|
|
33
|
+
description: str
|
|
34
|
+
input_schema: Dict[str, Any]
|
|
35
|
+
handler: Callable[[Dict[str, Any]], str]
|
|
36
|
+
|
|
37
|
+
def to_mcp_tool(self) -> MCPTool:
|
|
38
|
+
"""Convert to MCPTool definition."""
|
|
39
|
+
return MCPTool(
|
|
40
|
+
name=self.name,
|
|
41
|
+
description=self.description,
|
|
42
|
+
inputSchema=self.input_schema
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
def execute(self, arguments: Dict[str, Any]) -> MCPToolResult:
|
|
46
|
+
"""
|
|
47
|
+
Execute the tool with given arguments.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
arguments: Tool arguments matching input_schema
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
MCPToolResult with execution output
|
|
54
|
+
"""
|
|
55
|
+
try:
|
|
56
|
+
result = self.handler(arguments)
|
|
57
|
+
return MCPToolResult.text(result)
|
|
58
|
+
except Exception as e:
|
|
59
|
+
logger.exception(f"Tool execution failed: {self.name}")
|
|
60
|
+
return MCPToolResult.error(f"Tool execution failed: {str(e)}")
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class ToolRegistry:
|
|
64
|
+
"""
|
|
65
|
+
Central registry for MCP-exposed tools.
|
|
66
|
+
|
|
67
|
+
Manages tool registration, discovery, and execution.
|
|
68
|
+
Automatically registers built-in Pomera tools on initialization.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def __init__(self, register_builtins: bool = True):
|
|
72
|
+
"""
|
|
73
|
+
Initialize the tool registry.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
register_builtins: Whether to register built-in tools
|
|
77
|
+
"""
|
|
78
|
+
self._tools: Dict[str, MCPToolAdapter] = {}
|
|
79
|
+
self._logger = logging.getLogger(__name__)
|
|
80
|
+
|
|
81
|
+
if register_builtins:
|
|
82
|
+
self._register_builtin_tools()
|
|
83
|
+
|
|
84
|
+
def register(self, adapter: MCPToolAdapter) -> None:
|
|
85
|
+
"""
|
|
86
|
+
Register a tool adapter.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
adapter: MCPToolAdapter to register
|
|
90
|
+
"""
|
|
91
|
+
self._tools[adapter.name] = adapter
|
|
92
|
+
self._logger.info(f"Registered MCP tool: {adapter.name}")
|
|
93
|
+
|
|
94
|
+
def unregister(self, name: str) -> bool:
|
|
95
|
+
"""
|
|
96
|
+
Unregister a tool by name.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
name: Tool name to unregister
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
True if tool was removed, False if not found
|
|
103
|
+
"""
|
|
104
|
+
if name in self._tools:
|
|
105
|
+
del self._tools[name]
|
|
106
|
+
self._logger.info(f"Unregistered MCP tool: {name}")
|
|
107
|
+
return True
|
|
108
|
+
return False
|
|
109
|
+
|
|
110
|
+
def get_tool(self, name: str) -> Optional[MCPToolAdapter]:
|
|
111
|
+
"""
|
|
112
|
+
Get a tool adapter by name.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
name: Tool name
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
MCPToolAdapter or None if not found
|
|
119
|
+
"""
|
|
120
|
+
return self._tools.get(name)
|
|
121
|
+
|
|
122
|
+
def list_tools(self) -> List[MCPTool]:
|
|
123
|
+
"""
|
|
124
|
+
Get list of all registered tools as MCPTool definitions.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
List of MCPTool objects
|
|
128
|
+
"""
|
|
129
|
+
return [adapter.to_mcp_tool() for adapter in self._tools.values()]
|
|
130
|
+
|
|
131
|
+
def execute(self, name: str, arguments: Dict[str, Any]) -> MCPToolResult:
|
|
132
|
+
"""
|
|
133
|
+
Execute a tool by name.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
name: Tool name
|
|
137
|
+
arguments: Tool arguments
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
MCPToolResult with execution output
|
|
141
|
+
|
|
142
|
+
Raises:
|
|
143
|
+
KeyError: If tool not found
|
|
144
|
+
"""
|
|
145
|
+
adapter = self._tools.get(name)
|
|
146
|
+
if adapter is None:
|
|
147
|
+
return MCPToolResult.error(f"Tool not found: {name}")
|
|
148
|
+
|
|
149
|
+
return adapter.execute(arguments)
|
|
150
|
+
|
|
151
|
+
def get_tool_names(self) -> List[str]:
|
|
152
|
+
"""Get list of all registered tool names."""
|
|
153
|
+
return list(self._tools.keys())
|
|
154
|
+
|
|
155
|
+
def __len__(self) -> int:
|
|
156
|
+
"""Return number of registered tools."""
|
|
157
|
+
return len(self._tools)
|
|
158
|
+
|
|
159
|
+
def __contains__(self, name: str) -> bool:
|
|
160
|
+
"""Check if tool is registered."""
|
|
161
|
+
return name in self._tools
|
|
162
|
+
|
|
163
|
+
# =========================================================================
|
|
164
|
+
# Built-in Tool Registration
|
|
165
|
+
# =========================================================================
|
|
166
|
+
|
|
167
|
+
def _register_builtin_tools(self) -> None:
|
|
168
|
+
"""Register all built-in Pomera tools."""
|
|
169
|
+
# Core text transformation tools
|
|
170
|
+
self._register_case_tool()
|
|
171
|
+
self._register_encode_tool() # Consolidated: base64, hash, number_base
|
|
172
|
+
self._register_line_tools()
|
|
173
|
+
self._register_whitespace_tools()
|
|
174
|
+
self._register_string_escape_tool()
|
|
175
|
+
self._register_sorter_tools()
|
|
176
|
+
self._register_text_stats_tool()
|
|
177
|
+
self._register_json_xml_tool()
|
|
178
|
+
self._register_url_parser_tool()
|
|
179
|
+
self._register_text_wrapper_tool()
|
|
180
|
+
self._register_timestamp_tool()
|
|
181
|
+
|
|
182
|
+
# Additional tools (Phase 2)
|
|
183
|
+
self._register_extract_tool() # Consolidated: regex, emails, urls
|
|
184
|
+
self._register_markdown_tools()
|
|
185
|
+
self._register_translator_tools()
|
|
186
|
+
self._register_cron_tool()
|
|
187
|
+
self._register_word_frequency_tool()
|
|
188
|
+
self._register_column_tools()
|
|
189
|
+
self._register_generator_tools()
|
|
190
|
+
|
|
191
|
+
# Notes tools (Phase 3)
|
|
192
|
+
self._register_notes_tools()
|
|
193
|
+
|
|
194
|
+
# Additional tools (Phase 4)
|
|
195
|
+
self._register_email_header_analyzer_tool()
|
|
196
|
+
self._register_html_tool()
|
|
197
|
+
self._register_list_comparator_tool()
|
|
198
|
+
|
|
199
|
+
self._logger.info(f"Registered {len(self._tools)} built-in MCP tools")
|
|
200
|
+
|
|
201
|
+
def _register_case_tool(self) -> None:
|
|
202
|
+
"""Register the Case Tool."""
|
|
203
|
+
self.register(MCPToolAdapter(
|
|
204
|
+
name="pomera_case_transform",
|
|
205
|
+
description="Transform text case. Modes: sentence (capitalize first letter of sentences), "
|
|
206
|
+
"lower (all lowercase), upper (all uppercase), capitalized (title case), "
|
|
207
|
+
"title (title case with exclusions for articles/prepositions).",
|
|
208
|
+
input_schema={
|
|
209
|
+
"type": "object",
|
|
210
|
+
"properties": {
|
|
211
|
+
"text": {
|
|
212
|
+
"type": "string",
|
|
213
|
+
"description": "The text to transform"
|
|
214
|
+
},
|
|
215
|
+
"mode": {
|
|
216
|
+
"type": "string",
|
|
217
|
+
"enum": ["sentence", "lower", "upper", "capitalized", "title"],
|
|
218
|
+
"description": "Case transformation mode"
|
|
219
|
+
},
|
|
220
|
+
"exclusions": {
|
|
221
|
+
"type": "string",
|
|
222
|
+
"description": "Words to exclude from title case (one per line). "
|
|
223
|
+
"Only used when mode is 'title'.",
|
|
224
|
+
"default": "a\nan\nthe\nand\nbut\nor\nfor\nnor\non\nat\nto\nfrom\nby\nwith\nin\nof"
|
|
225
|
+
}
|
|
226
|
+
},
|
|
227
|
+
"required": ["text", "mode"]
|
|
228
|
+
},
|
|
229
|
+
handler=self._handle_case_transform
|
|
230
|
+
))
|
|
231
|
+
|
|
232
|
+
def _handle_case_transform(self, args: Dict[str, Any]) -> str:
|
|
233
|
+
"""Handle case transformation tool execution."""
|
|
234
|
+
from tools.case_tool import CaseToolProcessor
|
|
235
|
+
|
|
236
|
+
text = args.get("text", "")
|
|
237
|
+
mode = args.get("mode", "sentence")
|
|
238
|
+
exclusions = args.get("exclusions", "a\nan\nthe\nand\nbut\nor\nfor\nnor\non\nat\nto\nfrom\nby\nwith\nin\nof")
|
|
239
|
+
|
|
240
|
+
# Map lowercase mode names to processor's expected format
|
|
241
|
+
mode_map = {
|
|
242
|
+
"sentence": "Sentence",
|
|
243
|
+
"lower": "Lower",
|
|
244
|
+
"upper": "Upper",
|
|
245
|
+
"capitalized": "Capitalized",
|
|
246
|
+
"title": "Title"
|
|
247
|
+
}
|
|
248
|
+
processor_mode = mode_map.get(mode.lower(), "Sentence")
|
|
249
|
+
|
|
250
|
+
return CaseToolProcessor.process_text(text, processor_mode, exclusions)
|
|
251
|
+
|
|
252
|
+
def _register_encode_tool(self) -> None:
|
|
253
|
+
"""Register unified Encoding Tool."""
|
|
254
|
+
self.register(MCPToolAdapter(
|
|
255
|
+
name="pomera_encode",
|
|
256
|
+
description="Encoding and conversion operations. Types: base64 (encode/decode text), "
|
|
257
|
+
"hash (MD5/SHA/CRC32 hashes), number_base (binary/octal/decimal/hex conversion).",
|
|
258
|
+
input_schema={
|
|
259
|
+
"type": "object",
|
|
260
|
+
"properties": {
|
|
261
|
+
"type": {
|
|
262
|
+
"type": "string",
|
|
263
|
+
"enum": ["base64", "hash", "number_base"],
|
|
264
|
+
"description": "Encoding type"
|
|
265
|
+
},
|
|
266
|
+
"text": {
|
|
267
|
+
"type": "string",
|
|
268
|
+
"description": "Text to process (for base64/hash)"
|
|
269
|
+
},
|
|
270
|
+
"value": {
|
|
271
|
+
"type": "string",
|
|
272
|
+
"description": "For number_base: number to convert (0x/0b/0o prefix ok)"
|
|
273
|
+
},
|
|
274
|
+
"operation": {
|
|
275
|
+
"type": "string",
|
|
276
|
+
"enum": ["encode", "decode"],
|
|
277
|
+
"description": "For base64: encode or decode",
|
|
278
|
+
"default": "encode"
|
|
279
|
+
},
|
|
280
|
+
"algorithm": {
|
|
281
|
+
"type": "string",
|
|
282
|
+
"enum": ["md5", "sha1", "sha256", "sha512", "crc32"],
|
|
283
|
+
"description": "For hash: algorithm to use",
|
|
284
|
+
"default": "sha256"
|
|
285
|
+
},
|
|
286
|
+
"uppercase": {
|
|
287
|
+
"type": "boolean",
|
|
288
|
+
"description": "For hash: output in uppercase",
|
|
289
|
+
"default": False
|
|
290
|
+
},
|
|
291
|
+
"from_base": {
|
|
292
|
+
"type": "string",
|
|
293
|
+
"enum": ["binary", "octal", "decimal", "hex", "auto"],
|
|
294
|
+
"description": "For number_base: source base",
|
|
295
|
+
"default": "auto"
|
|
296
|
+
},
|
|
297
|
+
"to_base": {
|
|
298
|
+
"type": "string",
|
|
299
|
+
"enum": ["binary", "octal", "decimal", "hex", "all"],
|
|
300
|
+
"description": "For number_base: target base",
|
|
301
|
+
"default": "all"
|
|
302
|
+
}
|
|
303
|
+
},
|
|
304
|
+
"required": ["type"]
|
|
305
|
+
},
|
|
306
|
+
handler=self._handle_encode
|
|
307
|
+
))
|
|
308
|
+
|
|
309
|
+
def _handle_encode(self, args: Dict[str, Any]) -> str:
|
|
310
|
+
"""Route encoding to appropriate handler."""
|
|
311
|
+
encode_type = args.get("type", "")
|
|
312
|
+
|
|
313
|
+
if encode_type == "base64":
|
|
314
|
+
return self._handle_base64(args)
|
|
315
|
+
elif encode_type == "hash":
|
|
316
|
+
return self._handle_hash(args)
|
|
317
|
+
elif encode_type == "number_base":
|
|
318
|
+
return self._handle_number_base(args)
|
|
319
|
+
else:
|
|
320
|
+
return f"Unknown encoding type: {encode_type}. Valid types: base64, hash, number_base"
|
|
321
|
+
|
|
322
|
+
def _handle_base64(self, args: Dict[str, Any]) -> str:
|
|
323
|
+
"""Handle Base64 tool execution."""
|
|
324
|
+
from tools.base64_tools import Base64Tools
|
|
325
|
+
|
|
326
|
+
text = args.get("text", "")
|
|
327
|
+
if not text:
|
|
328
|
+
return "Error: 'text' is required for base64"
|
|
329
|
+
operation = args.get("operation", "encode")
|
|
330
|
+
|
|
331
|
+
return Base64Tools.base64_processor(text, operation)
|
|
332
|
+
|
|
333
|
+
def _handle_hash(self, args: Dict[str, Any]) -> str:
|
|
334
|
+
"""Handle hash generation tool execution."""
|
|
335
|
+
from tools.hash_generator import HashGeneratorProcessor
|
|
336
|
+
|
|
337
|
+
text = args.get("text", "")
|
|
338
|
+
if not text:
|
|
339
|
+
return "Error: 'text' is required for hash"
|
|
340
|
+
algorithm = args.get("algorithm", "sha256")
|
|
341
|
+
uppercase = args.get("uppercase", False)
|
|
342
|
+
|
|
343
|
+
return HashGeneratorProcessor.generate_hash(text, algorithm, uppercase)
|
|
344
|
+
|
|
345
|
+
def _handle_number_base(self, args: Dict[str, Any]) -> str:
|
|
346
|
+
"""Handle number base converter tool execution."""
|
|
347
|
+
value = args.get("value", "").strip()
|
|
348
|
+
if not value:
|
|
349
|
+
return "Error: 'value' is required for number_base"
|
|
350
|
+
from_base = args.get("from_base", "auto")
|
|
351
|
+
to_base = args.get("to_base", "all")
|
|
352
|
+
|
|
353
|
+
try:
|
|
354
|
+
# Parse input number
|
|
355
|
+
if from_base == "auto":
|
|
356
|
+
if value.startswith('0x') or value.startswith('0X'):
|
|
357
|
+
num = int(value, 16)
|
|
358
|
+
elif value.startswith('0b') or value.startswith('0B'):
|
|
359
|
+
num = int(value, 2)
|
|
360
|
+
elif value.startswith('0o') or value.startswith('0O'):
|
|
361
|
+
num = int(value, 8)
|
|
362
|
+
else:
|
|
363
|
+
num = int(value, 10)
|
|
364
|
+
else:
|
|
365
|
+
bases = {"binary": 2, "octal": 8, "decimal": 10, "hex": 16}
|
|
366
|
+
num = int(value.replace('0x', '').replace('0b', '').replace('0o', ''), bases[from_base])
|
|
367
|
+
|
|
368
|
+
# Convert to target base(s)
|
|
369
|
+
if to_base == "all":
|
|
370
|
+
return (f"Decimal: {num}\n"
|
|
371
|
+
f"Binary: 0b{bin(num)[2:]}\n"
|
|
372
|
+
f"Octal: 0o{oct(num)[2:]}\n"
|
|
373
|
+
f"Hexadecimal: 0x{hex(num)[2:]}")
|
|
374
|
+
elif to_base == "binary":
|
|
375
|
+
return f"0b{bin(num)[2:]}"
|
|
376
|
+
elif to_base == "octal":
|
|
377
|
+
return f"0o{oct(num)[2:]}"
|
|
378
|
+
elif to_base == "decimal":
|
|
379
|
+
return str(num)
|
|
380
|
+
elif to_base == "hex":
|
|
381
|
+
return f"0x{hex(num)[2:]}"
|
|
382
|
+
else:
|
|
383
|
+
return f"Unknown target base: {to_base}"
|
|
384
|
+
|
|
385
|
+
except ValueError as e:
|
|
386
|
+
return f"Error: Invalid number format - {str(e)}"
|
|
387
|
+
|
|
388
|
+
def _register_line_tools(self) -> None:
|
|
389
|
+
"""Register the Line Tools."""
|
|
390
|
+
self.register(MCPToolAdapter(
|
|
391
|
+
name="pomera_line_tools",
|
|
392
|
+
description="Line manipulation tools: remove duplicates, remove empty lines, "
|
|
393
|
+
"add/remove line numbers, reverse lines, shuffle lines.",
|
|
394
|
+
input_schema={
|
|
395
|
+
"type": "object",
|
|
396
|
+
"properties": {
|
|
397
|
+
"text": {
|
|
398
|
+
"type": "string",
|
|
399
|
+
"description": "The text to process (line by line)"
|
|
400
|
+
},
|
|
401
|
+
"operation": {
|
|
402
|
+
"type": "string",
|
|
403
|
+
"enum": ["remove_duplicates", "remove_empty", "add_numbers",
|
|
404
|
+
"remove_numbers", "reverse", "shuffle"],
|
|
405
|
+
"description": "Operation to perform"
|
|
406
|
+
},
|
|
407
|
+
"keep_mode": {
|
|
408
|
+
"type": "string",
|
|
409
|
+
"enum": ["keep_first", "keep_last"],
|
|
410
|
+
"description": "For remove_duplicates: which duplicate to keep",
|
|
411
|
+
"default": "keep_first"
|
|
412
|
+
},
|
|
413
|
+
"case_sensitive": {
|
|
414
|
+
"type": "boolean",
|
|
415
|
+
"description": "For remove_duplicates: case-sensitive comparison",
|
|
416
|
+
"default": True
|
|
417
|
+
},
|
|
418
|
+
"number_format": {
|
|
419
|
+
"type": "string",
|
|
420
|
+
"enum": ["1. ", "1) ", "[1] ", "1: "],
|
|
421
|
+
"description": "For add_numbers: number format style",
|
|
422
|
+
"default": "1. "
|
|
423
|
+
}
|
|
424
|
+
},
|
|
425
|
+
"required": ["text", "operation"]
|
|
426
|
+
},
|
|
427
|
+
handler=self._handle_line_tools
|
|
428
|
+
))
|
|
429
|
+
|
|
430
|
+
def _handle_line_tools(self, args: Dict[str, Any]) -> str:
|
|
431
|
+
"""Handle line tools execution."""
|
|
432
|
+
from tools.line_tools import LineToolsProcessor
|
|
433
|
+
|
|
434
|
+
text = args.get("text", "")
|
|
435
|
+
operation = args.get("operation", "remove_duplicates")
|
|
436
|
+
|
|
437
|
+
if operation == "remove_duplicates":
|
|
438
|
+
mode = args.get("keep_mode", "keep_first")
|
|
439
|
+
case_sensitive = args.get("case_sensitive", True)
|
|
440
|
+
return LineToolsProcessor.remove_duplicates(text, mode, case_sensitive)
|
|
441
|
+
elif operation == "remove_empty":
|
|
442
|
+
return LineToolsProcessor.remove_empty_lines(text)
|
|
443
|
+
elif operation == "add_numbers":
|
|
444
|
+
format_style = args.get("number_format", "1. ")
|
|
445
|
+
return LineToolsProcessor.add_line_numbers(text, format_style)
|
|
446
|
+
elif operation == "remove_numbers":
|
|
447
|
+
return LineToolsProcessor.remove_line_numbers(text)
|
|
448
|
+
elif operation == "reverse":
|
|
449
|
+
return LineToolsProcessor.reverse_lines(text)
|
|
450
|
+
elif operation == "shuffle":
|
|
451
|
+
return LineToolsProcessor.shuffle_lines(text)
|
|
452
|
+
else:
|
|
453
|
+
return f"Unknown operation: {operation}"
|
|
454
|
+
|
|
455
|
+
def _register_whitespace_tools(self) -> None:
|
|
456
|
+
"""Register the Whitespace Tools."""
|
|
457
|
+
self.register(MCPToolAdapter(
|
|
458
|
+
name="pomera_whitespace",
|
|
459
|
+
description="Whitespace manipulation: trim lines, remove extra spaces, "
|
|
460
|
+
"convert tabs/spaces, normalize line endings.",
|
|
461
|
+
input_schema={
|
|
462
|
+
"type": "object",
|
|
463
|
+
"properties": {
|
|
464
|
+
"text": {
|
|
465
|
+
"type": "string",
|
|
466
|
+
"description": "The text to process"
|
|
467
|
+
},
|
|
468
|
+
"operation": {
|
|
469
|
+
"type": "string",
|
|
470
|
+
"enum": ["trim", "remove_extra_spaces", "tabs_to_spaces",
|
|
471
|
+
"spaces_to_tabs", "normalize_endings"],
|
|
472
|
+
"description": "Operation to perform"
|
|
473
|
+
},
|
|
474
|
+
"trim_mode": {
|
|
475
|
+
"type": "string",
|
|
476
|
+
"enum": ["both", "leading", "trailing"],
|
|
477
|
+
"description": "For trim: which whitespace to remove",
|
|
478
|
+
"default": "both"
|
|
479
|
+
},
|
|
480
|
+
"tab_size": {
|
|
481
|
+
"type": "integer",
|
|
482
|
+
"description": "Tab width in spaces",
|
|
483
|
+
"default": 4
|
|
484
|
+
},
|
|
485
|
+
"line_ending": {
|
|
486
|
+
"type": "string",
|
|
487
|
+
"enum": ["lf", "crlf", "cr"],
|
|
488
|
+
"description": "For normalize_endings: target line ending",
|
|
489
|
+
"default": "lf"
|
|
490
|
+
}
|
|
491
|
+
},
|
|
492
|
+
"required": ["text", "operation"]
|
|
493
|
+
},
|
|
494
|
+
handler=self._handle_whitespace_tools
|
|
495
|
+
))
|
|
496
|
+
|
|
497
|
+
def _handle_whitespace_tools(self, args: Dict[str, Any]) -> str:
|
|
498
|
+
"""Handle whitespace tools execution."""
|
|
499
|
+
from tools.whitespace_tools import WhitespaceToolsProcessor
|
|
500
|
+
|
|
501
|
+
text = args.get("text", "")
|
|
502
|
+
operation = args.get("operation", "trim")
|
|
503
|
+
|
|
504
|
+
if operation == "trim":
|
|
505
|
+
mode = args.get("trim_mode", "both")
|
|
506
|
+
return WhitespaceToolsProcessor.trim_lines(text, mode)
|
|
507
|
+
elif operation == "remove_extra_spaces":
|
|
508
|
+
return WhitespaceToolsProcessor.remove_extra_spaces(text)
|
|
509
|
+
elif operation == "tabs_to_spaces":
|
|
510
|
+
tab_size = args.get("tab_size", 4)
|
|
511
|
+
return WhitespaceToolsProcessor.tabs_to_spaces(text, tab_size)
|
|
512
|
+
elif operation == "spaces_to_tabs":
|
|
513
|
+
tab_size = args.get("tab_size", 4)
|
|
514
|
+
return WhitespaceToolsProcessor.spaces_to_tabs(text, tab_size)
|
|
515
|
+
elif operation == "normalize_endings":
|
|
516
|
+
ending = args.get("line_ending", "lf")
|
|
517
|
+
return WhitespaceToolsProcessor.normalize_line_endings(text, ending)
|
|
518
|
+
else:
|
|
519
|
+
return f"Unknown operation: {operation}"
|
|
520
|
+
|
|
521
|
+
def _register_string_escape_tool(self) -> None:
|
|
522
|
+
"""Register the String Escape Tool."""
|
|
523
|
+
self.register(MCPToolAdapter(
|
|
524
|
+
name="pomera_string_escape",
|
|
525
|
+
description="Escape/unescape strings for various formats: JSON, HTML, URL, XML, JavaScript, SQL.",
|
|
526
|
+
input_schema={
|
|
527
|
+
"type": "object",
|
|
528
|
+
"properties": {
|
|
529
|
+
"text": {
|
|
530
|
+
"type": "string",
|
|
531
|
+
"description": "The text to escape or unescape"
|
|
532
|
+
},
|
|
533
|
+
"operation": {
|
|
534
|
+
"type": "string",
|
|
535
|
+
"enum": ["json_escape", "json_unescape", "html_escape", "html_unescape",
|
|
536
|
+
"url_encode", "url_decode", "xml_escape", "xml_unescape"],
|
|
537
|
+
"description": "Escape/unescape operation"
|
|
538
|
+
}
|
|
539
|
+
},
|
|
540
|
+
"required": ["text", "operation"]
|
|
541
|
+
},
|
|
542
|
+
handler=self._handle_string_escape
|
|
543
|
+
))
|
|
544
|
+
|
|
545
|
+
def _handle_string_escape(self, args: Dict[str, Any]) -> str:
|
|
546
|
+
"""Handle string escape tool execution."""
|
|
547
|
+
from tools.string_escape_tool import StringEscapeProcessor
|
|
548
|
+
|
|
549
|
+
text = args.get("text", "")
|
|
550
|
+
operation = args.get("operation", "json_escape")
|
|
551
|
+
|
|
552
|
+
operations = {
|
|
553
|
+
"json_escape": StringEscapeProcessor.json_escape,
|
|
554
|
+
"json_unescape": StringEscapeProcessor.json_unescape,
|
|
555
|
+
"html_escape": StringEscapeProcessor.html_escape,
|
|
556
|
+
"html_unescape": StringEscapeProcessor.html_unescape,
|
|
557
|
+
"url_encode": StringEscapeProcessor.url_encode,
|
|
558
|
+
"url_decode": StringEscapeProcessor.url_decode,
|
|
559
|
+
"xml_escape": StringEscapeProcessor.xml_escape,
|
|
560
|
+
"xml_unescape": StringEscapeProcessor.xml_unescape,
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
if operation in operations:
|
|
564
|
+
return operations[operation](text)
|
|
565
|
+
return f"Unknown operation: {operation}"
|
|
566
|
+
|
|
567
|
+
def _register_sorter_tools(self) -> None:
|
|
568
|
+
"""Register the Sorter Tools."""
|
|
569
|
+
self.register(MCPToolAdapter(
|
|
570
|
+
name="pomera_sort",
|
|
571
|
+
description="Sort lines numerically or alphabetically, ascending or descending.",
|
|
572
|
+
input_schema={
|
|
573
|
+
"type": "object",
|
|
574
|
+
"properties": {
|
|
575
|
+
"text": {
|
|
576
|
+
"type": "string",
|
|
577
|
+
"description": "Text with lines to sort"
|
|
578
|
+
},
|
|
579
|
+
"sort_type": {
|
|
580
|
+
"type": "string",
|
|
581
|
+
"enum": ["number", "alphabetical"],
|
|
582
|
+
"description": "Type of sorting"
|
|
583
|
+
},
|
|
584
|
+
"order": {
|
|
585
|
+
"type": "string",
|
|
586
|
+
"enum": ["ascending", "descending"],
|
|
587
|
+
"description": "Sort order",
|
|
588
|
+
"default": "ascending"
|
|
589
|
+
},
|
|
590
|
+
"unique_only": {
|
|
591
|
+
"type": "boolean",
|
|
592
|
+
"description": "For alphabetical: remove duplicates",
|
|
593
|
+
"default": False
|
|
594
|
+
},
|
|
595
|
+
"trim": {
|
|
596
|
+
"type": "boolean",
|
|
597
|
+
"description": "For alphabetical: trim whitespace",
|
|
598
|
+
"default": False
|
|
599
|
+
}
|
|
600
|
+
},
|
|
601
|
+
"required": ["text", "sort_type"]
|
|
602
|
+
},
|
|
603
|
+
handler=self._handle_sorter
|
|
604
|
+
))
|
|
605
|
+
|
|
606
|
+
def _handle_sorter(self, args: Dict[str, Any]) -> str:
|
|
607
|
+
"""Handle sorter tool execution."""
|
|
608
|
+
from tools.sorter_tools import SorterToolsProcessor
|
|
609
|
+
|
|
610
|
+
text = args.get("text", "")
|
|
611
|
+
sort_type = args.get("sort_type", "alphabetical")
|
|
612
|
+
order = args.get("order", "ascending")
|
|
613
|
+
|
|
614
|
+
if sort_type == "number":
|
|
615
|
+
return SorterToolsProcessor.number_sorter(text, order)
|
|
616
|
+
else:
|
|
617
|
+
unique_only = args.get("unique_only", False)
|
|
618
|
+
trim = args.get("trim", False)
|
|
619
|
+
return SorterToolsProcessor.alphabetical_sorter(text, order, unique_only, trim)
|
|
620
|
+
|
|
621
|
+
def _register_text_stats_tool(self) -> None:
|
|
622
|
+
"""Register the Text Statistics Tool."""
|
|
623
|
+
self.register(MCPToolAdapter(
|
|
624
|
+
name="pomera_text_stats",
|
|
625
|
+
description="Analyze text and return statistics: character count, word count, "
|
|
626
|
+
"line count, sentence count, reading time, and top frequent words.",
|
|
627
|
+
input_schema={
|
|
628
|
+
"type": "object",
|
|
629
|
+
"properties": {
|
|
630
|
+
"text": {
|
|
631
|
+
"type": "string",
|
|
632
|
+
"description": "Text to analyze"
|
|
633
|
+
},
|
|
634
|
+
"words_per_minute": {
|
|
635
|
+
"type": "integer",
|
|
636
|
+
"description": "Reading speed for time estimate",
|
|
637
|
+
"default": 200
|
|
638
|
+
}
|
|
639
|
+
},
|
|
640
|
+
"required": ["text"]
|
|
641
|
+
},
|
|
642
|
+
handler=self._handle_text_stats
|
|
643
|
+
))
|
|
644
|
+
|
|
645
|
+
def _handle_text_stats(self, args: Dict[str, Any]) -> str:
|
|
646
|
+
"""Handle text statistics tool execution."""
|
|
647
|
+
from tools.text_statistics_tool import TextStatisticsProcessor
|
|
648
|
+
import json
|
|
649
|
+
|
|
650
|
+
text = args.get("text", "")
|
|
651
|
+
wpm = args.get("words_per_minute", 200)
|
|
652
|
+
|
|
653
|
+
stats = TextStatisticsProcessor.analyze_text(text, wpm)
|
|
654
|
+
|
|
655
|
+
# Format as readable output
|
|
656
|
+
lines = [
|
|
657
|
+
"=== Text Statistics ===",
|
|
658
|
+
f"Characters: {stats['char_count']} (without spaces: {stats['char_count_no_spaces']})",
|
|
659
|
+
f"Words: {stats['word_count']} (unique: {stats['unique_words']})",
|
|
660
|
+
f"Lines: {stats['line_count']} (non-empty: {stats.get('non_empty_lines', stats['line_count'])})",
|
|
661
|
+
f"Sentences: {stats['sentence_count']}",
|
|
662
|
+
f"Paragraphs: {stats['paragraph_count']}",
|
|
663
|
+
f"Average word length: {stats['avg_word_length']} characters",
|
|
664
|
+
f"Reading time: {stats['reading_time_seconds']} seconds (~{stats['reading_time_seconds']//60} min)",
|
|
665
|
+
]
|
|
666
|
+
|
|
667
|
+
if stats['top_words']:
|
|
668
|
+
lines.append("\nTop words:")
|
|
669
|
+
for word, count in stats['top_words'][:10]:
|
|
670
|
+
lines.append(f" {word}: {count}")
|
|
671
|
+
|
|
672
|
+
return "\n".join(lines)
|
|
673
|
+
|
|
674
|
+
def _register_json_xml_tool(self) -> None:
|
|
675
|
+
"""Register the JSON/XML Tool."""
|
|
676
|
+
self.register(MCPToolAdapter(
|
|
677
|
+
name="pomera_json_xml",
|
|
678
|
+
description="Convert between JSON and XML, prettify, minify, or validate JSON/XML.",
|
|
679
|
+
input_schema={
|
|
680
|
+
"type": "object",
|
|
681
|
+
"properties": {
|
|
682
|
+
"text": {
|
|
683
|
+
"type": "string",
|
|
684
|
+
"description": "JSON or XML text to process"
|
|
685
|
+
},
|
|
686
|
+
"operation": {
|
|
687
|
+
"type": "string",
|
|
688
|
+
"enum": ["json_prettify", "json_minify", "json_validate",
|
|
689
|
+
"xml_prettify", "xml_minify", "xml_validate",
|
|
690
|
+
"json_to_xml", "xml_to_json"],
|
|
691
|
+
"description": "Operation to perform"
|
|
692
|
+
},
|
|
693
|
+
"indent": {
|
|
694
|
+
"type": "integer",
|
|
695
|
+
"description": "Indentation spaces for prettify",
|
|
696
|
+
"default": 2
|
|
697
|
+
}
|
|
698
|
+
},
|
|
699
|
+
"required": ["text", "operation"]
|
|
700
|
+
},
|
|
701
|
+
handler=self._handle_json_xml
|
|
702
|
+
))
|
|
703
|
+
|
|
704
|
+
def _handle_json_xml(self, args: Dict[str, Any]) -> str:
|
|
705
|
+
"""Handle JSON/XML tool execution."""
|
|
706
|
+
import json
|
|
707
|
+
import xml.etree.ElementTree as ET
|
|
708
|
+
import xml.dom.minidom
|
|
709
|
+
|
|
710
|
+
text = args.get("text", "")
|
|
711
|
+
operation = args.get("operation", "json_prettify")
|
|
712
|
+
indent = args.get("indent", 2)
|
|
713
|
+
|
|
714
|
+
try:
|
|
715
|
+
if operation == "json_prettify":
|
|
716
|
+
data = json.loads(text)
|
|
717
|
+
return json.dumps(data, indent=indent, ensure_ascii=False)
|
|
718
|
+
|
|
719
|
+
elif operation == "json_minify":
|
|
720
|
+
data = json.loads(text)
|
|
721
|
+
return json.dumps(data, separators=(',', ':'), ensure_ascii=False)
|
|
722
|
+
|
|
723
|
+
elif operation == "json_validate":
|
|
724
|
+
json.loads(text)
|
|
725
|
+
return "Valid JSON"
|
|
726
|
+
|
|
727
|
+
elif operation == "xml_prettify":
|
|
728
|
+
dom = xml.dom.minidom.parseString(text)
|
|
729
|
+
return dom.toprettyxml(indent=" " * indent)
|
|
730
|
+
|
|
731
|
+
elif operation == "xml_minify":
|
|
732
|
+
root = ET.fromstring(text)
|
|
733
|
+
return ET.tostring(root, encoding='unicode')
|
|
734
|
+
|
|
735
|
+
elif operation == "xml_validate":
|
|
736
|
+
ET.fromstring(text)
|
|
737
|
+
return "Valid XML"
|
|
738
|
+
|
|
739
|
+
elif operation == "json_to_xml":
|
|
740
|
+
data = json.loads(text)
|
|
741
|
+
return self._dict_to_xml(data, "root")
|
|
742
|
+
|
|
743
|
+
elif operation == "xml_to_json":
|
|
744
|
+
root = ET.fromstring(text)
|
|
745
|
+
data = self._xml_to_dict(root)
|
|
746
|
+
return json.dumps(data, indent=indent, ensure_ascii=False)
|
|
747
|
+
|
|
748
|
+
else:
|
|
749
|
+
return f"Unknown operation: {operation}"
|
|
750
|
+
|
|
751
|
+
except json.JSONDecodeError as e:
|
|
752
|
+
return f"JSON Error: {str(e)}"
|
|
753
|
+
except ET.ParseError as e:
|
|
754
|
+
return f"XML Error: {str(e)}"
|
|
755
|
+
except Exception as e:
|
|
756
|
+
return f"Error: {str(e)}"
|
|
757
|
+
|
|
758
|
+
def _dict_to_xml(self, data: Any, root_name: str = "root") -> str:
|
|
759
|
+
"""Convert dictionary to XML string."""
|
|
760
|
+
import xml.etree.ElementTree as ET
|
|
761
|
+
|
|
762
|
+
def build_element(parent, data):
|
|
763
|
+
if isinstance(data, dict):
|
|
764
|
+
for key, value in data.items():
|
|
765
|
+
child = ET.SubElement(parent, str(key))
|
|
766
|
+
build_element(child, value)
|
|
767
|
+
elif isinstance(data, list):
|
|
768
|
+
for item in data:
|
|
769
|
+
child = ET.SubElement(parent, "item")
|
|
770
|
+
build_element(child, item)
|
|
771
|
+
else:
|
|
772
|
+
parent.text = str(data) if data is not None else ""
|
|
773
|
+
|
|
774
|
+
root = ET.Element(root_name)
|
|
775
|
+
build_element(root, data)
|
|
776
|
+
return ET.tostring(root, encoding='unicode')
|
|
777
|
+
|
|
778
|
+
def _xml_to_dict(self, element) -> Dict[str, Any]:
|
|
779
|
+
"""Convert XML element to dictionary."""
|
|
780
|
+
result = {}
|
|
781
|
+
|
|
782
|
+
for child in element:
|
|
783
|
+
if len(child) == 0:
|
|
784
|
+
result[child.tag] = child.text or ""
|
|
785
|
+
else:
|
|
786
|
+
child_data = self._xml_to_dict(child)
|
|
787
|
+
if child.tag in result:
|
|
788
|
+
if not isinstance(result[child.tag], list):
|
|
789
|
+
result[child.tag] = [result[child.tag]]
|
|
790
|
+
result[child.tag].append(child_data)
|
|
791
|
+
else:
|
|
792
|
+
result[child.tag] = child_data
|
|
793
|
+
|
|
794
|
+
return result if result else (element.text or "")
|
|
795
|
+
|
|
796
|
+
def _register_url_parser_tool(self) -> None:
|
|
797
|
+
"""Register the URL Parser Tool."""
|
|
798
|
+
self.register(MCPToolAdapter(
|
|
799
|
+
name="pomera_url_parse",
|
|
800
|
+
description="Parse a URL and extract its components: scheme, host, port, path, query, fragment.",
|
|
801
|
+
input_schema={
|
|
802
|
+
"type": "object",
|
|
803
|
+
"properties": {
|
|
804
|
+
"url": {
|
|
805
|
+
"type": "string",
|
|
806
|
+
"description": "URL to parse"
|
|
807
|
+
}
|
|
808
|
+
},
|
|
809
|
+
"required": ["url"]
|
|
810
|
+
},
|
|
811
|
+
handler=self._handle_url_parse
|
|
812
|
+
))
|
|
813
|
+
|
|
814
|
+
def _handle_url_parse(self, args: Dict[str, Any]) -> str:
|
|
815
|
+
"""Handle URL parser tool execution."""
|
|
816
|
+
from urllib.parse import urlparse, parse_qs
|
|
817
|
+
|
|
818
|
+
url = args.get("url", "")
|
|
819
|
+
|
|
820
|
+
try:
|
|
821
|
+
parsed = urlparse(url)
|
|
822
|
+
query_params = parse_qs(parsed.query)
|
|
823
|
+
|
|
824
|
+
lines = [
|
|
825
|
+
"=== URL Components ===",
|
|
826
|
+
f"Scheme: {parsed.scheme or '(none)'}",
|
|
827
|
+
f"Host: {parsed.hostname or '(none)'}",
|
|
828
|
+
f"Port: {parsed.port or '(default)'}",
|
|
829
|
+
f"Path: {parsed.path or '/'}",
|
|
830
|
+
f"Query: {parsed.query or '(none)'}",
|
|
831
|
+
f"Fragment: {parsed.fragment or '(none)'}",
|
|
832
|
+
]
|
|
833
|
+
|
|
834
|
+
if query_params:
|
|
835
|
+
lines.append("\nQuery Parameters:")
|
|
836
|
+
for key, values in query_params.items():
|
|
837
|
+
for value in values:
|
|
838
|
+
lines.append(f" {key} = {value}")
|
|
839
|
+
|
|
840
|
+
return "\n".join(lines)
|
|
841
|
+
|
|
842
|
+
except Exception as e:
|
|
843
|
+
return f"Error parsing URL: {str(e)}"
|
|
844
|
+
|
|
845
|
+
def _register_text_wrapper_tool(self) -> None:
|
|
846
|
+
"""Register the Text Wrapper Tool."""
|
|
847
|
+
self.register(MCPToolAdapter(
|
|
848
|
+
name="pomera_text_wrap",
|
|
849
|
+
description="Wrap text to a specified width, preserving words.",
|
|
850
|
+
input_schema={
|
|
851
|
+
"type": "object",
|
|
852
|
+
"properties": {
|
|
853
|
+
"text": {
|
|
854
|
+
"type": "string",
|
|
855
|
+
"description": "Text to wrap"
|
|
856
|
+
},
|
|
857
|
+
"width": {
|
|
858
|
+
"type": "integer",
|
|
859
|
+
"description": "Maximum line width",
|
|
860
|
+
"default": 80
|
|
861
|
+
}
|
|
862
|
+
},
|
|
863
|
+
"required": ["text"]
|
|
864
|
+
},
|
|
865
|
+
handler=self._handle_text_wrap
|
|
866
|
+
))
|
|
867
|
+
|
|
868
|
+
def _handle_text_wrap(self, args: Dict[str, Any]) -> str:
|
|
869
|
+
"""Handle text wrapper tool execution."""
|
|
870
|
+
import textwrap
|
|
871
|
+
|
|
872
|
+
text = args.get("text", "")
|
|
873
|
+
width = args.get("width", 80)
|
|
874
|
+
|
|
875
|
+
# Wrap each paragraph separately
|
|
876
|
+
paragraphs = text.split('\n\n')
|
|
877
|
+
wrapped = []
|
|
878
|
+
|
|
879
|
+
for para in paragraphs:
|
|
880
|
+
if para.strip():
|
|
881
|
+
wrapped.append(textwrap.fill(para, width=width))
|
|
882
|
+
else:
|
|
883
|
+
wrapped.append("")
|
|
884
|
+
|
|
885
|
+
return '\n\n'.join(wrapped)
|
|
886
|
+
|
|
887
|
+
def _register_number_base_tool(self) -> None:
|
|
888
|
+
"""Register the Number Base Converter Tool."""
|
|
889
|
+
self.register(MCPToolAdapter(
|
|
890
|
+
name="pomera_number_base",
|
|
891
|
+
description="Convert numbers between bases: binary, octal, decimal, hexadecimal.",
|
|
892
|
+
input_schema={
|
|
893
|
+
"type": "object",
|
|
894
|
+
"properties": {
|
|
895
|
+
"value": {
|
|
896
|
+
"type": "string",
|
|
897
|
+
"description": "Number to convert (can include 0x, 0b, 0o prefix)"
|
|
898
|
+
},
|
|
899
|
+
"from_base": {
|
|
900
|
+
"type": "string",
|
|
901
|
+
"enum": ["binary", "octal", "decimal", "hex", "auto"],
|
|
902
|
+
"description": "Source base (auto detects from prefix)",
|
|
903
|
+
"default": "auto"
|
|
904
|
+
},
|
|
905
|
+
"to_base": {
|
|
906
|
+
"type": "string",
|
|
907
|
+
"enum": ["binary", "octal", "decimal", "hex", "all"],
|
|
908
|
+
"description": "Target base (all shows all bases)",
|
|
909
|
+
"default": "all"
|
|
910
|
+
}
|
|
911
|
+
},
|
|
912
|
+
"required": ["value"]
|
|
913
|
+
},
|
|
914
|
+
handler=self._handle_number_base
|
|
915
|
+
))
|
|
916
|
+
|
|
917
|
+
def _handle_number_base(self, args: Dict[str, Any]) -> str:
|
|
918
|
+
"""Handle number base converter tool execution."""
|
|
919
|
+
value = args.get("value", "").strip()
|
|
920
|
+
from_base = args.get("from_base", "auto")
|
|
921
|
+
to_base = args.get("to_base", "all")
|
|
922
|
+
|
|
923
|
+
try:
|
|
924
|
+
# Parse input number
|
|
925
|
+
if from_base == "auto":
|
|
926
|
+
if value.startswith('0x') or value.startswith('0X'):
|
|
927
|
+
num = int(value, 16)
|
|
928
|
+
elif value.startswith('0b') or value.startswith('0B'):
|
|
929
|
+
num = int(value, 2)
|
|
930
|
+
elif value.startswith('0o') or value.startswith('0O'):
|
|
931
|
+
num = int(value, 8)
|
|
932
|
+
else:
|
|
933
|
+
num = int(value, 10)
|
|
934
|
+
else:
|
|
935
|
+
bases = {"binary": 2, "octal": 8, "decimal": 10, "hex": 16}
|
|
936
|
+
num = int(value.replace('0x', '').replace('0b', '').replace('0o', ''), bases[from_base])
|
|
937
|
+
|
|
938
|
+
# Convert to target base(s)
|
|
939
|
+
if to_base == "all":
|
|
940
|
+
return (f"Decimal: {num}\n"
|
|
941
|
+
f"Binary: 0b{bin(num)[2:]}\n"
|
|
942
|
+
f"Octal: 0o{oct(num)[2:]}\n"
|
|
943
|
+
f"Hexadecimal: 0x{hex(num)[2:]}")
|
|
944
|
+
elif to_base == "binary":
|
|
945
|
+
return f"0b{bin(num)[2:]}"
|
|
946
|
+
elif to_base == "octal":
|
|
947
|
+
return f"0o{oct(num)[2:]}"
|
|
948
|
+
elif to_base == "decimal":
|
|
949
|
+
return str(num)
|
|
950
|
+
elif to_base == "hex":
|
|
951
|
+
return f"0x{hex(num)[2:]}"
|
|
952
|
+
else:
|
|
953
|
+
return f"Unknown target base: {to_base}"
|
|
954
|
+
|
|
955
|
+
except ValueError as e:
|
|
956
|
+
return f"Error: Invalid number format - {str(e)}"
|
|
957
|
+
|
|
958
|
+
def _register_timestamp_tool(self) -> None:
|
|
959
|
+
"""Register the Timestamp Converter Tool."""
|
|
960
|
+
self.register(MCPToolAdapter(
|
|
961
|
+
name="pomera_timestamp",
|
|
962
|
+
description="Convert between Unix timestamps and human-readable dates.",
|
|
963
|
+
input_schema={
|
|
964
|
+
"type": "object",
|
|
965
|
+
"properties": {
|
|
966
|
+
"value": {
|
|
967
|
+
"type": "string",
|
|
968
|
+
"description": "Unix timestamp or date string to convert"
|
|
969
|
+
},
|
|
970
|
+
"operation": {
|
|
971
|
+
"type": "string",
|
|
972
|
+
"enum": ["to_date", "to_timestamp", "now"],
|
|
973
|
+
"description": "Conversion direction or get current time",
|
|
974
|
+
"default": "to_date"
|
|
975
|
+
},
|
|
976
|
+
"format": {
|
|
977
|
+
"type": "string",
|
|
978
|
+
"enum": ["iso", "us", "eu", "long", "short"],
|
|
979
|
+
"description": "Output date format",
|
|
980
|
+
"default": "iso"
|
|
981
|
+
}
|
|
982
|
+
},
|
|
983
|
+
"required": ["value"]
|
|
984
|
+
},
|
|
985
|
+
handler=self._handle_timestamp
|
|
986
|
+
))
|
|
987
|
+
|
|
988
|
+
def _handle_timestamp(self, args: Dict[str, Any]) -> str:
|
|
989
|
+
"""Handle timestamp converter tool execution."""
|
|
990
|
+
from datetime import datetime
|
|
991
|
+
import time
|
|
992
|
+
|
|
993
|
+
value = args.get("value", "").strip()
|
|
994
|
+
operation = args.get("operation", "to_date")
|
|
995
|
+
date_format = args.get("format", "iso")
|
|
996
|
+
|
|
997
|
+
formats = {
|
|
998
|
+
"iso": "%Y-%m-%dT%H:%M:%S",
|
|
999
|
+
"us": "%m/%d/%Y %I:%M:%S %p",
|
|
1000
|
+
"eu": "%d/%m/%Y %H:%M:%S",
|
|
1001
|
+
"long": "%B %d, %Y %H:%M:%S",
|
|
1002
|
+
"short": "%b %d, %Y %H:%M"
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
try:
|
|
1006
|
+
if operation == "now":
|
|
1007
|
+
now = datetime.now()
|
|
1008
|
+
ts = int(time.time())
|
|
1009
|
+
return (f"Current time:\n"
|
|
1010
|
+
f" Unix timestamp: {ts}\n"
|
|
1011
|
+
f" ISO: {now.strftime(formats['iso'])}\n"
|
|
1012
|
+
f" US: {now.strftime(formats['us'])}\n"
|
|
1013
|
+
f" EU: {now.strftime(formats['eu'])}")
|
|
1014
|
+
|
|
1015
|
+
elif operation == "to_date":
|
|
1016
|
+
ts = float(value)
|
|
1017
|
+
# Handle milliseconds
|
|
1018
|
+
if ts > 1e12:
|
|
1019
|
+
ts = ts / 1000
|
|
1020
|
+
dt = datetime.fromtimestamp(ts)
|
|
1021
|
+
return dt.strftime(formats.get(date_format, formats['iso']))
|
|
1022
|
+
|
|
1023
|
+
elif operation == "to_timestamp":
|
|
1024
|
+
# Try common date formats
|
|
1025
|
+
for fmt in ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%m/%d/%Y", "%d/%m/%Y"]:
|
|
1026
|
+
try:
|
|
1027
|
+
dt = datetime.strptime(value, fmt)
|
|
1028
|
+
return str(int(dt.timestamp()))
|
|
1029
|
+
except ValueError:
|
|
1030
|
+
continue
|
|
1031
|
+
return "Error: Could not parse date. Try formats: YYYY-MM-DD, MM/DD/YYYY"
|
|
1032
|
+
|
|
1033
|
+
else:
|
|
1034
|
+
return f"Unknown operation: {operation}"
|
|
1035
|
+
|
|
1036
|
+
except ValueError as e:
|
|
1037
|
+
return f"Error: {str(e)}"
|
|
1038
|
+
|
|
1039
|
+
# =========================================================================
|
|
1040
|
+
# Phase 2 Tools - Additional Pomera Tools
|
|
1041
|
+
# =========================================================================
|
|
1042
|
+
|
|
1043
|
+
def _register_extract_tool(self) -> None:
|
|
1044
|
+
"""Register unified Extraction Tool."""
|
|
1045
|
+
self.register(MCPToolAdapter(
|
|
1046
|
+
name="pomera_extract",
|
|
1047
|
+
description="Extract content from text. Types: regex (pattern matching), emails (email addresses), "
|
|
1048
|
+
"urls (web links). All types support deduplication and sorting.",
|
|
1049
|
+
input_schema={
|
|
1050
|
+
"type": "object",
|
|
1051
|
+
"properties": {
|
|
1052
|
+
"text": {
|
|
1053
|
+
"type": "string",
|
|
1054
|
+
"description": "Text to extract from"
|
|
1055
|
+
},
|
|
1056
|
+
"type": {
|
|
1057
|
+
"type": "string",
|
|
1058
|
+
"enum": ["regex", "emails", "urls"],
|
|
1059
|
+
"description": "Extraction type"
|
|
1060
|
+
},
|
|
1061
|
+
"pattern": {
|
|
1062
|
+
"type": "string",
|
|
1063
|
+
"description": "For regex: regular expression pattern"
|
|
1064
|
+
},
|
|
1065
|
+
"match_mode": {
|
|
1066
|
+
"type": "string",
|
|
1067
|
+
"enum": ["all_per_line", "first_per_line"],
|
|
1068
|
+
"description": "For regex: match all occurrences or first per line",
|
|
1069
|
+
"default": "all_per_line"
|
|
1070
|
+
},
|
|
1071
|
+
"omit_duplicates": {
|
|
1072
|
+
"type": "boolean",
|
|
1073
|
+
"description": "Remove duplicate matches",
|
|
1074
|
+
"default": False
|
|
1075
|
+
},
|
|
1076
|
+
"sort_results": {
|
|
1077
|
+
"type": "boolean",
|
|
1078
|
+
"description": "Sort results alphabetically",
|
|
1079
|
+
"default": False
|
|
1080
|
+
},
|
|
1081
|
+
"case_sensitive": {
|
|
1082
|
+
"type": "boolean",
|
|
1083
|
+
"description": "For regex: case-sensitive matching",
|
|
1084
|
+
"default": False
|
|
1085
|
+
},
|
|
1086
|
+
"only_domain": {
|
|
1087
|
+
"type": "boolean",
|
|
1088
|
+
"description": "For emails: extract only domains",
|
|
1089
|
+
"default": False
|
|
1090
|
+
},
|
|
1091
|
+
"extract_href": {
|
|
1092
|
+
"type": "boolean",
|
|
1093
|
+
"description": "For urls: extract from HTML href",
|
|
1094
|
+
"default": False
|
|
1095
|
+
},
|
|
1096
|
+
"extract_https": {
|
|
1097
|
+
"type": "boolean",
|
|
1098
|
+
"description": "For urls: extract http/https URLs",
|
|
1099
|
+
"default": True
|
|
1100
|
+
},
|
|
1101
|
+
"extract_any_protocol": {
|
|
1102
|
+
"type": "boolean",
|
|
1103
|
+
"description": "For urls: extract any protocol",
|
|
1104
|
+
"default": False
|
|
1105
|
+
},
|
|
1106
|
+
"extract_markdown": {
|
|
1107
|
+
"type": "boolean",
|
|
1108
|
+
"description": "For urls: extract markdown links",
|
|
1109
|
+
"default": False
|
|
1110
|
+
},
|
|
1111
|
+
"filter_text": {
|
|
1112
|
+
"type": "string",
|
|
1113
|
+
"description": "For urls: filter by text",
|
|
1114
|
+
"default": ""
|
|
1115
|
+
}
|
|
1116
|
+
},
|
|
1117
|
+
"required": ["text", "type"]
|
|
1118
|
+
},
|
|
1119
|
+
handler=self._handle_extract
|
|
1120
|
+
))
|
|
1121
|
+
|
|
1122
|
+
def _handle_extract(self, args: Dict[str, Any]) -> str:
|
|
1123
|
+
"""Route extraction to appropriate handler."""
|
|
1124
|
+
extract_type = args.get("type", "")
|
|
1125
|
+
|
|
1126
|
+
if extract_type == "regex":
|
|
1127
|
+
return self._handle_regex_extract(args)
|
|
1128
|
+
elif extract_type == "emails":
|
|
1129
|
+
return self._handle_email_extraction(args)
|
|
1130
|
+
elif extract_type == "urls":
|
|
1131
|
+
return self._handle_url_extraction(args)
|
|
1132
|
+
else:
|
|
1133
|
+
return f"Unknown extraction type: {extract_type}. Valid types: regex, emails, urls"
|
|
1134
|
+
|
|
1135
|
+
def _handle_regex_extract(self, args: Dict[str, Any]) -> str:
|
|
1136
|
+
"""Handle regex extractor tool execution."""
|
|
1137
|
+
from tools.regex_extractor import RegexExtractorProcessor
|
|
1138
|
+
|
|
1139
|
+
text = args.get("text", "")
|
|
1140
|
+
pattern = args.get("pattern", "")
|
|
1141
|
+
if not pattern:
|
|
1142
|
+
return "Error: 'pattern' is required for regex extraction"
|
|
1143
|
+
match_mode = args.get("match_mode", "all_per_line")
|
|
1144
|
+
omit_duplicates = args.get("omit_duplicates", False)
|
|
1145
|
+
sort_results = args.get("sort_results", False)
|
|
1146
|
+
case_sensitive = args.get("case_sensitive", False)
|
|
1147
|
+
|
|
1148
|
+
return RegexExtractorProcessor.extract_matches(
|
|
1149
|
+
text, pattern, match_mode, omit_duplicates,
|
|
1150
|
+
hide_counts=True, sort_results=sort_results,
|
|
1151
|
+
case_sensitive=case_sensitive
|
|
1152
|
+
)
|
|
1153
|
+
|
|
1154
|
+
def _register_markdown_tools(self) -> None:
|
|
1155
|
+
"""Register the Markdown Tools."""
|
|
1156
|
+
self.register(MCPToolAdapter(
|
|
1157
|
+
name="pomera_markdown",
|
|
1158
|
+
description="Markdown processing: strip formatting, extract links, extract headers, "
|
|
1159
|
+
"convert tables to CSV, format tables.",
|
|
1160
|
+
input_schema={
|
|
1161
|
+
"type": "object",
|
|
1162
|
+
"properties": {
|
|
1163
|
+
"text": {
|
|
1164
|
+
"type": "string",
|
|
1165
|
+
"description": "Markdown text to process"
|
|
1166
|
+
},
|
|
1167
|
+
"operation": {
|
|
1168
|
+
"type": "string",
|
|
1169
|
+
"enum": ["strip", "extract_links", "extract_headers",
|
|
1170
|
+
"table_to_csv", "format_table"],
|
|
1171
|
+
"description": "Operation to perform"
|
|
1172
|
+
},
|
|
1173
|
+
"preserve_links_text": {
|
|
1174
|
+
"type": "boolean",
|
|
1175
|
+
"description": "For strip: keep link text",
|
|
1176
|
+
"default": True
|
|
1177
|
+
},
|
|
1178
|
+
"include_images": {
|
|
1179
|
+
"type": "boolean",
|
|
1180
|
+
"description": "For extract_links: include image links",
|
|
1181
|
+
"default": False
|
|
1182
|
+
},
|
|
1183
|
+
"header_format": {
|
|
1184
|
+
"type": "string",
|
|
1185
|
+
"enum": ["indented", "flat", "numbered"],
|
|
1186
|
+
"description": "For extract_headers: output format",
|
|
1187
|
+
"default": "indented"
|
|
1188
|
+
}
|
|
1189
|
+
},
|
|
1190
|
+
"required": ["text", "operation"]
|
|
1191
|
+
},
|
|
1192
|
+
handler=self._handle_markdown_tools
|
|
1193
|
+
))
|
|
1194
|
+
|
|
1195
|
+
def _handle_markdown_tools(self, args: Dict[str, Any]) -> str:
|
|
1196
|
+
"""Handle markdown tools execution."""
|
|
1197
|
+
from tools.markdown_tools import MarkdownToolsProcessor
|
|
1198
|
+
|
|
1199
|
+
text = args.get("text", "")
|
|
1200
|
+
operation = args.get("operation", "strip")
|
|
1201
|
+
|
|
1202
|
+
if operation == "strip":
|
|
1203
|
+
preserve_links_text = args.get("preserve_links_text", True)
|
|
1204
|
+
return MarkdownToolsProcessor.strip_markdown(text, preserve_links_text)
|
|
1205
|
+
elif operation == "extract_links":
|
|
1206
|
+
include_images = args.get("include_images", False)
|
|
1207
|
+
return MarkdownToolsProcessor.extract_links(text, include_images)
|
|
1208
|
+
elif operation == "extract_headers":
|
|
1209
|
+
header_format = args.get("header_format", "indented")
|
|
1210
|
+
return MarkdownToolsProcessor.extract_headers(text, header_format)
|
|
1211
|
+
elif operation == "table_to_csv":
|
|
1212
|
+
return MarkdownToolsProcessor.table_to_csv(text)
|
|
1213
|
+
elif operation == "format_table":
|
|
1214
|
+
return MarkdownToolsProcessor.format_table(text)
|
|
1215
|
+
else:
|
|
1216
|
+
return f"Unknown operation: {operation}"
|
|
1217
|
+
|
|
1218
|
+
def _register_translator_tools(self) -> None:
|
|
1219
|
+
"""Register the Translator Tools (Morse/Binary)."""
|
|
1220
|
+
self.register(MCPToolAdapter(
|
|
1221
|
+
name="pomera_translator",
|
|
1222
|
+
description="Translate text to/from Morse code or binary.",
|
|
1223
|
+
input_schema={
|
|
1224
|
+
"type": "object",
|
|
1225
|
+
"properties": {
|
|
1226
|
+
"text": {
|
|
1227
|
+
"type": "string",
|
|
1228
|
+
"description": "Text to translate"
|
|
1229
|
+
},
|
|
1230
|
+
"format": {
|
|
1231
|
+
"type": "string",
|
|
1232
|
+
"enum": ["morse", "binary"],
|
|
1233
|
+
"description": "Translation format"
|
|
1234
|
+
},
|
|
1235
|
+
"direction": {
|
|
1236
|
+
"type": "string",
|
|
1237
|
+
"enum": ["encode", "decode", "auto"],
|
|
1238
|
+
"description": "Translation direction (auto-detects for binary)",
|
|
1239
|
+
"default": "encode"
|
|
1240
|
+
}
|
|
1241
|
+
},
|
|
1242
|
+
"required": ["text", "format"]
|
|
1243
|
+
},
|
|
1244
|
+
handler=self._handle_translator
|
|
1245
|
+
))
|
|
1246
|
+
|
|
1247
|
+
def _handle_translator(self, args: Dict[str, Any]) -> str:
|
|
1248
|
+
"""Handle translator tools execution."""
|
|
1249
|
+
from tools.translator_tools import TranslatorToolsProcessor
|
|
1250
|
+
|
|
1251
|
+
text = args.get("text", "")
|
|
1252
|
+
fmt = args.get("format", "morse")
|
|
1253
|
+
direction = args.get("direction", "encode")
|
|
1254
|
+
|
|
1255
|
+
if fmt == "morse":
|
|
1256
|
+
mode = "morse" if direction == "encode" else "text"
|
|
1257
|
+
return TranslatorToolsProcessor.morse_translator(text, mode)
|
|
1258
|
+
elif fmt == "binary":
|
|
1259
|
+
# Binary translator auto-detects direction
|
|
1260
|
+
return TranslatorToolsProcessor.binary_translator(text)
|
|
1261
|
+
else:
|
|
1262
|
+
return f"Unknown format: {fmt}"
|
|
1263
|
+
|
|
1264
|
+
def _register_cron_tool(self) -> None:
|
|
1265
|
+
"""Register the Cron Expression Tool."""
|
|
1266
|
+
self.register(MCPToolAdapter(
|
|
1267
|
+
name="pomera_cron",
|
|
1268
|
+
description="Parse and explain cron expressions, validate syntax, calculate next run times.",
|
|
1269
|
+
input_schema={
|
|
1270
|
+
"type": "object",
|
|
1271
|
+
"properties": {
|
|
1272
|
+
"expression": {
|
|
1273
|
+
"type": "string",
|
|
1274
|
+
"description": "Cron expression (5 fields: minute hour day month weekday)"
|
|
1275
|
+
},
|
|
1276
|
+
"operation": {
|
|
1277
|
+
"type": "string",
|
|
1278
|
+
"enum": ["explain", "validate", "next_runs"],
|
|
1279
|
+
"description": "Operation to perform"
|
|
1280
|
+
},
|
|
1281
|
+
"count": {
|
|
1282
|
+
"type": "integer",
|
|
1283
|
+
"description": "For next_runs: number of runs to calculate",
|
|
1284
|
+
"default": 5
|
|
1285
|
+
}
|
|
1286
|
+
},
|
|
1287
|
+
"required": ["expression", "operation"]
|
|
1288
|
+
},
|
|
1289
|
+
handler=self._handle_cron
|
|
1290
|
+
))
|
|
1291
|
+
|
|
1292
|
+
def _handle_cron(self, args: Dict[str, Any]) -> str:
|
|
1293
|
+
"""Handle cron tool execution."""
|
|
1294
|
+
from datetime import datetime, timedelta
|
|
1295
|
+
|
|
1296
|
+
expression = args.get("expression", "").strip()
|
|
1297
|
+
operation = args.get("operation", "explain")
|
|
1298
|
+
count = args.get("count", 5)
|
|
1299
|
+
|
|
1300
|
+
parts = expression.split()
|
|
1301
|
+
if len(parts) != 5:
|
|
1302
|
+
return f"Error: Invalid cron expression. Expected 5 fields, got {len(parts)}.\nFormat: minute hour day month weekday"
|
|
1303
|
+
|
|
1304
|
+
minute, hour, day, month, weekday = parts
|
|
1305
|
+
|
|
1306
|
+
if operation == "explain":
|
|
1307
|
+
return self._explain_cron(minute, hour, day, month, weekday)
|
|
1308
|
+
elif operation == "validate":
|
|
1309
|
+
return self._validate_cron(minute, hour, day, month, weekday)
|
|
1310
|
+
elif operation == "next_runs":
|
|
1311
|
+
return self._calculate_cron_runs(expression, count)
|
|
1312
|
+
else:
|
|
1313
|
+
return f"Unknown operation: {operation}"
|
|
1314
|
+
|
|
1315
|
+
def _explain_cron(self, minute: str, hour: str, day: str, month: str, weekday: str) -> str:
|
|
1316
|
+
"""Generate human-readable explanation of cron expression."""
|
|
1317
|
+
def explain_field(value: str, field_type: str) -> str:
|
|
1318
|
+
ranges = {
|
|
1319
|
+
"minute": (0, 59), "hour": (0, 23),
|
|
1320
|
+
"day": (1, 31), "month": (1, 12), "weekday": (0, 6)
|
|
1321
|
+
}
|
|
1322
|
+
min_val, max_val = ranges[field_type]
|
|
1323
|
+
|
|
1324
|
+
if value == "*":
|
|
1325
|
+
return f"every {field_type}"
|
|
1326
|
+
elif value.startswith("*/"):
|
|
1327
|
+
step = value[2:]
|
|
1328
|
+
return f"every {step} {field_type}s"
|
|
1329
|
+
elif "-" in value:
|
|
1330
|
+
return f"{field_type}s {value}"
|
|
1331
|
+
elif "," in value:
|
|
1332
|
+
return f"{field_type}s {value}"
|
|
1333
|
+
else:
|
|
1334
|
+
return f"{field_type} {value}"
|
|
1335
|
+
|
|
1336
|
+
lines = [
|
|
1337
|
+
f"Cron Expression: {minute} {hour} {day} {month} {weekday}",
|
|
1338
|
+
"=" * 50,
|
|
1339
|
+
"",
|
|
1340
|
+
"Field Breakdown:",
|
|
1341
|
+
f" Minute: {minute:10} - {explain_field(minute, 'minute')}",
|
|
1342
|
+
f" Hour: {hour:10} - {explain_field(hour, 'hour')}",
|
|
1343
|
+
f" Day: {day:10} - {explain_field(day, 'day')}",
|
|
1344
|
+
f" Month: {month:10} - {explain_field(month, 'month')}",
|
|
1345
|
+
f" Weekday: {weekday:10} - {explain_field(weekday, 'weekday')} (0=Sun, 6=Sat)"
|
|
1346
|
+
]
|
|
1347
|
+
return "\n".join(lines)
|
|
1348
|
+
|
|
1349
|
+
def _validate_cron(self, minute: str, hour: str, day: str, month: str, weekday: str) -> str:
|
|
1350
|
+
"""Validate cron expression fields."""
|
|
1351
|
+
import re
|
|
1352
|
+
|
|
1353
|
+
def validate_field(value: str, min_val: int, max_val: int, name: str) -> List[str]:
|
|
1354
|
+
errors = []
|
|
1355
|
+
cron_pattern = r'^(\*|(\d+(-\d+)?)(,\d+(-\d+)?)*|(\*/\d+))$'
|
|
1356
|
+
|
|
1357
|
+
if not re.match(cron_pattern, value):
|
|
1358
|
+
errors.append(f"{name}: Invalid format '{value}'")
|
|
1359
|
+
else:
|
|
1360
|
+
# Check numeric ranges
|
|
1361
|
+
nums = re.findall(r'\d+', value)
|
|
1362
|
+
for n in nums:
|
|
1363
|
+
if int(n) < min_val or int(n) > max_val:
|
|
1364
|
+
errors.append(f"{name}: Value {n} out of range ({min_val}-{max_val})")
|
|
1365
|
+
return errors
|
|
1366
|
+
|
|
1367
|
+
all_errors = []
|
|
1368
|
+
all_errors.extend(validate_field(minute, 0, 59, "Minute"))
|
|
1369
|
+
all_errors.extend(validate_field(hour, 0, 23, "Hour"))
|
|
1370
|
+
all_errors.extend(validate_field(day, 1, 31, "Day"))
|
|
1371
|
+
all_errors.extend(validate_field(month, 1, 12, "Month"))
|
|
1372
|
+
all_errors.extend(validate_field(weekday, 0, 6, "Weekday"))
|
|
1373
|
+
|
|
1374
|
+
if all_errors:
|
|
1375
|
+
return "❌ INVALID\n" + "\n".join(all_errors)
|
|
1376
|
+
return "✓ Valid cron expression"
|
|
1377
|
+
|
|
1378
|
+
def _calculate_cron_runs(self, expression: str, count: int) -> str:
|
|
1379
|
+
"""Calculate next scheduled runs for a cron expression."""
|
|
1380
|
+
from datetime import datetime, timedelta
|
|
1381
|
+
import re
|
|
1382
|
+
|
|
1383
|
+
parts = expression.split()
|
|
1384
|
+
minute, hour, day, month, weekday = parts
|
|
1385
|
+
|
|
1386
|
+
def matches_field(value: int, field: str) -> bool:
|
|
1387
|
+
if field == "*":
|
|
1388
|
+
return True
|
|
1389
|
+
if field.startswith("*/"):
|
|
1390
|
+
step = int(field[2:])
|
|
1391
|
+
return value % step == 0
|
|
1392
|
+
if "-" in field:
|
|
1393
|
+
start, end = map(int, field.split("-"))
|
|
1394
|
+
return start <= value <= end
|
|
1395
|
+
if "," in field:
|
|
1396
|
+
return value in [int(x) for x in field.split(",")]
|
|
1397
|
+
return value == int(field)
|
|
1398
|
+
|
|
1399
|
+
runs = []
|
|
1400
|
+
current = datetime.now().replace(second=0, microsecond=0) + timedelta(minutes=1)
|
|
1401
|
+
max_iterations = 525600 # One year of minutes
|
|
1402
|
+
|
|
1403
|
+
for _ in range(max_iterations):
|
|
1404
|
+
if (matches_field(current.minute, minute) and
|
|
1405
|
+
matches_field(current.hour, hour) and
|
|
1406
|
+
matches_field(current.day, day) and
|
|
1407
|
+
matches_field(current.month, month) and
|
|
1408
|
+
matches_field(current.weekday(), weekday.replace("7", "0"))):
|
|
1409
|
+
runs.append(current)
|
|
1410
|
+
if len(runs) >= count:
|
|
1411
|
+
break
|
|
1412
|
+
current += timedelta(minutes=1)
|
|
1413
|
+
|
|
1414
|
+
if not runs:
|
|
1415
|
+
return "Could not calculate next runs (expression may never match)"
|
|
1416
|
+
|
|
1417
|
+
lines = [f"Next {len(runs)} scheduled runs:", ""]
|
|
1418
|
+
for i, run in enumerate(runs, 1):
|
|
1419
|
+
lines.append(f" {i}. {run.strftime('%Y-%m-%d %H:%M')} ({run.strftime('%A')})")
|
|
1420
|
+
return "\n".join(lines)
|
|
1421
|
+
|
|
1422
|
+
def _register_email_extraction_tool(self) -> None:
|
|
1423
|
+
"""Register the Email Extraction Tool."""
|
|
1424
|
+
self.register(MCPToolAdapter(
|
|
1425
|
+
name="pomera_extract_emails",
|
|
1426
|
+
description="Extract email addresses from text with options for deduplication and sorting.",
|
|
1427
|
+
input_schema={
|
|
1428
|
+
"type": "object",
|
|
1429
|
+
"properties": {
|
|
1430
|
+
"text": {
|
|
1431
|
+
"type": "string",
|
|
1432
|
+
"description": "Text to extract emails from"
|
|
1433
|
+
},
|
|
1434
|
+
"omit_duplicates": {
|
|
1435
|
+
"type": "boolean",
|
|
1436
|
+
"description": "Remove duplicate emails",
|
|
1437
|
+
"default": True
|
|
1438
|
+
},
|
|
1439
|
+
"sort_emails": {
|
|
1440
|
+
"type": "boolean",
|
|
1441
|
+
"description": "Sort emails alphabetically",
|
|
1442
|
+
"default": False
|
|
1443
|
+
},
|
|
1444
|
+
"only_domain": {
|
|
1445
|
+
"type": "boolean",
|
|
1446
|
+
"description": "Extract only domains, not full addresses",
|
|
1447
|
+
"default": False
|
|
1448
|
+
}
|
|
1449
|
+
},
|
|
1450
|
+
"required": ["text"]
|
|
1451
|
+
},
|
|
1452
|
+
handler=self._handle_email_extraction
|
|
1453
|
+
))
|
|
1454
|
+
|
|
1455
|
+
def _handle_email_extraction(self, args: Dict[str, Any]) -> str:
|
|
1456
|
+
"""Handle email extraction tool execution."""
|
|
1457
|
+
from tools.email_extraction_tool import EmailExtractionProcessor
|
|
1458
|
+
|
|
1459
|
+
text = args.get("text", "")
|
|
1460
|
+
omit_duplicates = args.get("omit_duplicates", True)
|
|
1461
|
+
sort_emails = args.get("sort_emails", False)
|
|
1462
|
+
only_domain = args.get("only_domain", False)
|
|
1463
|
+
|
|
1464
|
+
return EmailExtractionProcessor.extract_emails_advanced(
|
|
1465
|
+
text, omit_duplicates, hide_counts=True,
|
|
1466
|
+
sort_emails=sort_emails, only_domain=only_domain
|
|
1467
|
+
)
|
|
1468
|
+
|
|
1469
|
+
def _register_url_extractor_tool(self) -> None:
|
|
1470
|
+
"""Register the URL Extractor Tool."""
|
|
1471
|
+
self.register(MCPToolAdapter(
|
|
1472
|
+
name="pomera_extract_urls",
|
|
1473
|
+
description="Extract URLs from text with options for different URL types.",
|
|
1474
|
+
input_schema={
|
|
1475
|
+
"type": "object",
|
|
1476
|
+
"properties": {
|
|
1477
|
+
"text": {
|
|
1478
|
+
"type": "string",
|
|
1479
|
+
"description": "Text to extract URLs from"
|
|
1480
|
+
},
|
|
1481
|
+
"extract_href": {
|
|
1482
|
+
"type": "boolean",
|
|
1483
|
+
"description": "Extract from HTML href attributes",
|
|
1484
|
+
"default": False
|
|
1485
|
+
},
|
|
1486
|
+
"extract_https": {
|
|
1487
|
+
"type": "boolean",
|
|
1488
|
+
"description": "Extract http/https URLs",
|
|
1489
|
+
"default": True
|
|
1490
|
+
},
|
|
1491
|
+
"extract_any_protocol": {
|
|
1492
|
+
"type": "boolean",
|
|
1493
|
+
"description": "Extract URLs with any protocol",
|
|
1494
|
+
"default": False
|
|
1495
|
+
},
|
|
1496
|
+
"extract_markdown": {
|
|
1497
|
+
"type": "boolean",
|
|
1498
|
+
"description": "Extract markdown links",
|
|
1499
|
+
"default": False
|
|
1500
|
+
},
|
|
1501
|
+
"filter_text": {
|
|
1502
|
+
"type": "string",
|
|
1503
|
+
"description": "Filter URLs containing this text",
|
|
1504
|
+
"default": ""
|
|
1505
|
+
}
|
|
1506
|
+
},
|
|
1507
|
+
"required": ["text"]
|
|
1508
|
+
},
|
|
1509
|
+
handler=self._handle_url_extraction
|
|
1510
|
+
))
|
|
1511
|
+
|
|
1512
|
+
def _handle_url_extraction(self, args: Dict[str, Any]) -> str:
|
|
1513
|
+
"""Handle URL extraction tool execution."""
|
|
1514
|
+
from tools.url_link_extractor import URLLinkExtractorProcessor
|
|
1515
|
+
|
|
1516
|
+
text = args.get("text", "")
|
|
1517
|
+
extract_href = args.get("extract_href", False)
|
|
1518
|
+
extract_https = args.get("extract_https", True)
|
|
1519
|
+
extract_any_protocol = args.get("extract_any_protocol", False)
|
|
1520
|
+
extract_markdown = args.get("extract_markdown", False)
|
|
1521
|
+
filter_text = args.get("filter_text", "")
|
|
1522
|
+
|
|
1523
|
+
return URLLinkExtractorProcessor.extract_urls(
|
|
1524
|
+
text, extract_href, extract_https,
|
|
1525
|
+
extract_any_protocol, extract_markdown, filter_text
|
|
1526
|
+
)
|
|
1527
|
+
|
|
1528
|
+
def _register_word_frequency_tool(self) -> None:
|
|
1529
|
+
"""Register the Word Frequency Counter Tool."""
|
|
1530
|
+
self.register(MCPToolAdapter(
|
|
1531
|
+
name="pomera_word_frequency",
|
|
1532
|
+
description="Count word frequencies in text, showing count and percentage for each word.",
|
|
1533
|
+
input_schema={
|
|
1534
|
+
"type": "object",
|
|
1535
|
+
"properties": {
|
|
1536
|
+
"text": {
|
|
1537
|
+
"type": "string",
|
|
1538
|
+
"description": "Text to analyze"
|
|
1539
|
+
}
|
|
1540
|
+
},
|
|
1541
|
+
"required": ["text"]
|
|
1542
|
+
},
|
|
1543
|
+
handler=self._handle_word_frequency
|
|
1544
|
+
))
|
|
1545
|
+
|
|
1546
|
+
def _handle_word_frequency(self, args: Dict[str, Any]) -> str:
|
|
1547
|
+
"""Handle word frequency counter tool execution."""
|
|
1548
|
+
from tools.word_frequency_counter import WordFrequencyCounterProcessor
|
|
1549
|
+
|
|
1550
|
+
text = args.get("text", "")
|
|
1551
|
+
return WordFrequencyCounterProcessor.word_frequency(text)
|
|
1552
|
+
|
|
1553
|
+
def _register_column_tools(self) -> None:
|
|
1554
|
+
"""Register the Column/CSV Tools."""
|
|
1555
|
+
self.register(MCPToolAdapter(
|
|
1556
|
+
name="pomera_column_tools",
|
|
1557
|
+
description="CSV/column manipulation: extract column, reorder columns, delete column, "
|
|
1558
|
+
"transpose, convert to fixed width.",
|
|
1559
|
+
input_schema={
|
|
1560
|
+
"type": "object",
|
|
1561
|
+
"properties": {
|
|
1562
|
+
"text": {
|
|
1563
|
+
"type": "string",
|
|
1564
|
+
"description": "CSV or delimited text"
|
|
1565
|
+
},
|
|
1566
|
+
"operation": {
|
|
1567
|
+
"type": "string",
|
|
1568
|
+
"enum": ["extract", "reorder", "delete", "transpose", "to_fixed_width"],
|
|
1569
|
+
"description": "Operation to perform"
|
|
1570
|
+
},
|
|
1571
|
+
"column_index": {
|
|
1572
|
+
"type": "integer",
|
|
1573
|
+
"description": "For extract/delete: column index (0-based)",
|
|
1574
|
+
"default": 0
|
|
1575
|
+
},
|
|
1576
|
+
"column_order": {
|
|
1577
|
+
"type": "string",
|
|
1578
|
+
"description": "For reorder: comma-separated indices (e.g., '2,0,1')"
|
|
1579
|
+
},
|
|
1580
|
+
"delimiter": {
|
|
1581
|
+
"type": "string",
|
|
1582
|
+
"description": "Column delimiter",
|
|
1583
|
+
"default": ","
|
|
1584
|
+
}
|
|
1585
|
+
},
|
|
1586
|
+
"required": ["text", "operation"]
|
|
1587
|
+
},
|
|
1588
|
+
handler=self._handle_column_tools
|
|
1589
|
+
))
|
|
1590
|
+
|
|
1591
|
+
def _handle_column_tools(self, args: Dict[str, Any]) -> str:
|
|
1592
|
+
"""Handle column tools execution."""
|
|
1593
|
+
from tools.column_tools import ColumnToolsProcessor
|
|
1594
|
+
|
|
1595
|
+
text = args.get("text", "")
|
|
1596
|
+
operation = args.get("operation", "extract")
|
|
1597
|
+
delimiter = args.get("delimiter", ",")
|
|
1598
|
+
column_index = args.get("column_index", 0)
|
|
1599
|
+
column_order = args.get("column_order", "")
|
|
1600
|
+
|
|
1601
|
+
if operation == "extract":
|
|
1602
|
+
return ColumnToolsProcessor.extract_column(text, column_index, delimiter)
|
|
1603
|
+
elif operation == "reorder":
|
|
1604
|
+
if not column_order:
|
|
1605
|
+
return "Error: column_order is required for reorder operation"
|
|
1606
|
+
return ColumnToolsProcessor.reorder_columns(text, column_order, delimiter)
|
|
1607
|
+
elif operation == "delete":
|
|
1608
|
+
return ColumnToolsProcessor.delete_column(text, column_index, delimiter)
|
|
1609
|
+
elif operation == "transpose":
|
|
1610
|
+
return ColumnToolsProcessor.transpose(text, delimiter)
|
|
1611
|
+
elif operation == "to_fixed_width":
|
|
1612
|
+
return ColumnToolsProcessor.to_fixed_width(text, delimiter)
|
|
1613
|
+
else:
|
|
1614
|
+
return f"Unknown operation: {operation}"
|
|
1615
|
+
|
|
1616
|
+
def _register_generator_tools(self) -> None:
|
|
1617
|
+
"""Register the Generator Tools."""
|
|
1618
|
+
self.register(MCPToolAdapter(
|
|
1619
|
+
name="pomera_generators",
|
|
1620
|
+
description="Generate passwords, UUIDs, Lorem Ipsum text, random emails, or URL slugs.",
|
|
1621
|
+
input_schema={
|
|
1622
|
+
"type": "object",
|
|
1623
|
+
"properties": {
|
|
1624
|
+
"generator": {
|
|
1625
|
+
"type": "string",
|
|
1626
|
+
"enum": ["password", "uuid", "lorem_ipsum", "random_email", "slug"],
|
|
1627
|
+
"description": "Generator type"
|
|
1628
|
+
},
|
|
1629
|
+
"text": {
|
|
1630
|
+
"type": "string",
|
|
1631
|
+
"description": "For slug: text to convert to URL-friendly slug"
|
|
1632
|
+
},
|
|
1633
|
+
"length": {
|
|
1634
|
+
"type": "integer",
|
|
1635
|
+
"description": "For password: length in characters",
|
|
1636
|
+
"default": 20
|
|
1637
|
+
},
|
|
1638
|
+
"count": {
|
|
1639
|
+
"type": "integer",
|
|
1640
|
+
"description": "Number of items to generate",
|
|
1641
|
+
"default": 1
|
|
1642
|
+
},
|
|
1643
|
+
"uuid_version": {
|
|
1644
|
+
"type": "integer",
|
|
1645
|
+
"enum": [1, 4],
|
|
1646
|
+
"description": "UUID version (1=time-based, 4=random)",
|
|
1647
|
+
"default": 4
|
|
1648
|
+
},
|
|
1649
|
+
"lorem_type": {
|
|
1650
|
+
"type": "string",
|
|
1651
|
+
"enum": ["words", "sentences", "paragraphs"],
|
|
1652
|
+
"description": "For lorem_ipsum: unit type",
|
|
1653
|
+
"default": "paragraphs"
|
|
1654
|
+
},
|
|
1655
|
+
"separator": {
|
|
1656
|
+
"type": "string",
|
|
1657
|
+
"description": "For slug: word separator character",
|
|
1658
|
+
"default": "-"
|
|
1659
|
+
},
|
|
1660
|
+
"lowercase": {
|
|
1661
|
+
"type": "boolean",
|
|
1662
|
+
"description": "For slug: convert to lowercase",
|
|
1663
|
+
"default": True
|
|
1664
|
+
},
|
|
1665
|
+
"transliterate": {
|
|
1666
|
+
"type": "boolean",
|
|
1667
|
+
"description": "For slug: convert accented characters to ASCII",
|
|
1668
|
+
"default": True
|
|
1669
|
+
},
|
|
1670
|
+
"max_length": {
|
|
1671
|
+
"type": "integer",
|
|
1672
|
+
"description": "For slug: maximum slug length (0 = unlimited)",
|
|
1673
|
+
"default": 0
|
|
1674
|
+
},
|
|
1675
|
+
"remove_stopwords": {
|
|
1676
|
+
"type": "boolean",
|
|
1677
|
+
"description": "For slug: remove common stop words",
|
|
1678
|
+
"default": False
|
|
1679
|
+
}
|
|
1680
|
+
},
|
|
1681
|
+
"required": ["generator"]
|
|
1682
|
+
},
|
|
1683
|
+
handler=self._handle_generators
|
|
1684
|
+
))
|
|
1685
|
+
|
|
1686
|
+
def _handle_generators(self, args: Dict[str, Any]) -> str:
|
|
1687
|
+
"""Handle generator tools execution."""
|
|
1688
|
+
import uuid
|
|
1689
|
+
import string
|
|
1690
|
+
import random
|
|
1691
|
+
|
|
1692
|
+
generator = args.get("generator", "uuid")
|
|
1693
|
+
count = args.get("count", 1)
|
|
1694
|
+
|
|
1695
|
+
if generator == "password":
|
|
1696
|
+
length = args.get("length", 20)
|
|
1697
|
+
results = []
|
|
1698
|
+
chars = string.ascii_letters + string.digits + string.punctuation
|
|
1699
|
+
for _ in range(count):
|
|
1700
|
+
results.append(''.join(random.choices(chars, k=length)))
|
|
1701
|
+
return "\n".join(results)
|
|
1702
|
+
|
|
1703
|
+
elif generator == "uuid":
|
|
1704
|
+
version = args.get("uuid_version", 4)
|
|
1705
|
+
results = []
|
|
1706
|
+
for _ in range(count):
|
|
1707
|
+
if version == 1:
|
|
1708
|
+
results.append(str(uuid.uuid1()))
|
|
1709
|
+
else:
|
|
1710
|
+
results.append(str(uuid.uuid4()))
|
|
1711
|
+
return "\n".join(results)
|
|
1712
|
+
|
|
1713
|
+
elif generator == "lorem_ipsum":
|
|
1714
|
+
lorem_type = args.get("lorem_type", "paragraphs")
|
|
1715
|
+
lorem_words = [
|
|
1716
|
+
"lorem", "ipsum", "dolor", "sit", "amet", "consectetur", "adipiscing",
|
|
1717
|
+
"elit", "sed", "do", "eiusmod", "tempor", "incididunt", "ut", "labore",
|
|
1718
|
+
"et", "dolore", "magna", "aliqua", "enim", "ad", "minim", "veniam",
|
|
1719
|
+
"quis", "nostrud", "exercitation", "ullamco", "laboris", "nisi", "aliquip",
|
|
1720
|
+
"ex", "ea", "commodo", "consequat", "duis", "aute", "irure", "in",
|
|
1721
|
+
"reprehenderit", "voluptate", "velit", "esse", "cillum", "fugiat", "nulla"
|
|
1722
|
+
]
|
|
1723
|
+
|
|
1724
|
+
if lorem_type == "words":
|
|
1725
|
+
return " ".join(random.choices(lorem_words, k=count))
|
|
1726
|
+
elif lorem_type == "sentences":
|
|
1727
|
+
sentences = []
|
|
1728
|
+
for _ in range(count):
|
|
1729
|
+
words = random.choices(lorem_words, k=random.randint(8, 15))
|
|
1730
|
+
words[0] = words[0].capitalize()
|
|
1731
|
+
sentences.append(" ".join(words) + ".")
|
|
1732
|
+
return " ".join(sentences)
|
|
1733
|
+
else: # paragraphs
|
|
1734
|
+
paragraphs = []
|
|
1735
|
+
for _ in range(count):
|
|
1736
|
+
sentences = []
|
|
1737
|
+
for _ in range(random.randint(3, 6)):
|
|
1738
|
+
words = random.choices(lorem_words, k=random.randint(8, 15))
|
|
1739
|
+
words[0] = words[0].capitalize()
|
|
1740
|
+
sentences.append(" ".join(words) + ".")
|
|
1741
|
+
paragraphs.append(" ".join(sentences))
|
|
1742
|
+
return "\n\n".join(paragraphs)
|
|
1743
|
+
|
|
1744
|
+
elif generator == "random_email":
|
|
1745
|
+
domains = ["example.com", "test.org", "sample.net", "demo.io"]
|
|
1746
|
+
results = []
|
|
1747
|
+
for _ in range(count):
|
|
1748
|
+
name = ''.join(random.choices(string.ascii_lowercase, k=8))
|
|
1749
|
+
domain = random.choice(domains)
|
|
1750
|
+
results.append(f"{name}@{domain}")
|
|
1751
|
+
return "\n".join(results)
|
|
1752
|
+
|
|
1753
|
+
elif generator == "slug":
|
|
1754
|
+
from tools.slug_generator import SlugGeneratorProcessor
|
|
1755
|
+
|
|
1756
|
+
text = args.get("text", "")
|
|
1757
|
+
if not text:
|
|
1758
|
+
return "Error: 'text' is required for slug generator"
|
|
1759
|
+
separator = args.get("separator", "-")
|
|
1760
|
+
lowercase = args.get("lowercase", True)
|
|
1761
|
+
transliterate = args.get("transliterate", True)
|
|
1762
|
+
max_length = args.get("max_length", 0)
|
|
1763
|
+
remove_stopwords = args.get("remove_stopwords", False)
|
|
1764
|
+
|
|
1765
|
+
return SlugGeneratorProcessor.generate_slug(
|
|
1766
|
+
text, separator, lowercase, transliterate,
|
|
1767
|
+
max_length, remove_stopwords
|
|
1768
|
+
)
|
|
1769
|
+
|
|
1770
|
+
else:
|
|
1771
|
+
return f"Unknown generator: {generator}"
|
|
1772
|
+
|
|
1773
|
+
# =========================================================================
|
|
1774
|
+
# Phase 3 Tools - Notes Widget Integration
|
|
1775
|
+
# =========================================================================
|
|
1776
|
+
|
|
1777
|
+
def _register_notes_tools(self) -> None:
|
|
1778
|
+
"""Register unified Notes tool for MCP access."""
|
|
1779
|
+
self.register(MCPToolAdapter(
|
|
1780
|
+
name="pomera_notes",
|
|
1781
|
+
description="Manage notes in Pomera's database. Actions: save (create new note), get (retrieve by ID), "
|
|
1782
|
+
"list (list/filter notes), search (full-text search with content), update (modify existing), "
|
|
1783
|
+
"delete (remove note).",
|
|
1784
|
+
input_schema={
|
|
1785
|
+
"type": "object",
|
|
1786
|
+
"properties": {
|
|
1787
|
+
"action": {
|
|
1788
|
+
"type": "string",
|
|
1789
|
+
"enum": ["save", "get", "list", "search", "update", "delete"],
|
|
1790
|
+
"description": "Action to perform on notes"
|
|
1791
|
+
},
|
|
1792
|
+
"note_id": {
|
|
1793
|
+
"type": "integer",
|
|
1794
|
+
"description": "Note ID (required for get/update/delete)"
|
|
1795
|
+
},
|
|
1796
|
+
"title": {
|
|
1797
|
+
"type": "string",
|
|
1798
|
+
"description": "Note title (required for save, optional for update)"
|
|
1799
|
+
},
|
|
1800
|
+
"input_content": {
|
|
1801
|
+
"type": "string",
|
|
1802
|
+
"description": "Input/source content",
|
|
1803
|
+
"default": ""
|
|
1804
|
+
},
|
|
1805
|
+
"output_content": {
|
|
1806
|
+
"type": "string",
|
|
1807
|
+
"description": "Output/result content",
|
|
1808
|
+
"default": ""
|
|
1809
|
+
},
|
|
1810
|
+
"search_term": {
|
|
1811
|
+
"type": "string",
|
|
1812
|
+
"description": "FTS5 search term for list/search. Use * for wildcards.",
|
|
1813
|
+
"default": ""
|
|
1814
|
+
},
|
|
1815
|
+
"limit": {
|
|
1816
|
+
"type": "integer",
|
|
1817
|
+
"description": "Max results for list/search",
|
|
1818
|
+
"default": 50
|
|
1819
|
+
}
|
|
1820
|
+
},
|
|
1821
|
+
"required": ["action"]
|
|
1822
|
+
},
|
|
1823
|
+
handler=self._handle_notes
|
|
1824
|
+
))
|
|
1825
|
+
|
|
1826
|
+
def _handle_notes(self, args: Dict[str, Any]) -> str:
|
|
1827
|
+
"""Route notes action to appropriate handler."""
|
|
1828
|
+
action = args.get("action", "")
|
|
1829
|
+
|
|
1830
|
+
if action == "save":
|
|
1831
|
+
return self._handle_notes_save(args)
|
|
1832
|
+
elif action == "get":
|
|
1833
|
+
return self._handle_notes_get(args)
|
|
1834
|
+
elif action == "list":
|
|
1835
|
+
return self._handle_notes_list(args)
|
|
1836
|
+
elif action == "search":
|
|
1837
|
+
return self._handle_notes_search(args)
|
|
1838
|
+
elif action == "update":
|
|
1839
|
+
return self._handle_notes_update(args)
|
|
1840
|
+
elif action == "delete":
|
|
1841
|
+
return self._handle_notes_delete(args)
|
|
1842
|
+
else:
|
|
1843
|
+
return f"Unknown action: {action}. Valid actions: save, get, list, search, update, delete"
|
|
1844
|
+
|
|
1845
|
+
def _get_notes_db_path(self) -> str:
|
|
1846
|
+
"""Get the path to the notes database."""
|
|
1847
|
+
import os
|
|
1848
|
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
1849
|
+
return os.path.join(project_root, 'notes.db')
|
|
1850
|
+
|
|
1851
|
+
def _get_notes_connection(self):
|
|
1852
|
+
"""Get a connection to the notes database."""
|
|
1853
|
+
import sqlite3
|
|
1854
|
+
db_path = self._get_notes_db_path()
|
|
1855
|
+
conn = sqlite3.connect(db_path, timeout=10.0)
|
|
1856
|
+
conn.row_factory = sqlite3.Row
|
|
1857
|
+
return conn
|
|
1858
|
+
|
|
1859
|
+
def _handle_notes_save(self, args: Dict[str, Any]) -> str:
|
|
1860
|
+
"""Handle saving a new note."""
|
|
1861
|
+
from datetime import datetime
|
|
1862
|
+
|
|
1863
|
+
title = args.get("title", "")
|
|
1864
|
+
input_content = args.get("input_content", "")
|
|
1865
|
+
output_content = args.get("output_content", "")
|
|
1866
|
+
|
|
1867
|
+
if not title:
|
|
1868
|
+
return "Error: Title is required"
|
|
1869
|
+
|
|
1870
|
+
try:
|
|
1871
|
+
conn = self._get_notes_connection()
|
|
1872
|
+
now = datetime.now().isoformat()
|
|
1873
|
+
cursor = conn.execute('''
|
|
1874
|
+
INSERT INTO notes (Created, Modified, Title, Input, Output)
|
|
1875
|
+
VALUES (?, ?, ?, ?, ?)
|
|
1876
|
+
''', (now, now, title, input_content, output_content))
|
|
1877
|
+
note_id = cursor.lastrowid
|
|
1878
|
+
conn.commit()
|
|
1879
|
+
conn.close()
|
|
1880
|
+
|
|
1881
|
+
return f"Note saved successfully with ID: {note_id}"
|
|
1882
|
+
except Exception as e:
|
|
1883
|
+
return f"Error saving note: {str(e)}"
|
|
1884
|
+
|
|
1885
|
+
def _handle_notes_get(self, args: Dict[str, Any]) -> str:
|
|
1886
|
+
"""Handle getting a note by ID."""
|
|
1887
|
+
note_id = args.get("note_id")
|
|
1888
|
+
|
|
1889
|
+
if note_id is None:
|
|
1890
|
+
return "Error: note_id is required"
|
|
1891
|
+
|
|
1892
|
+
try:
|
|
1893
|
+
conn = self._get_notes_connection()
|
|
1894
|
+
row = conn.execute('SELECT * FROM notes WHERE id = ?', (note_id,)).fetchone()
|
|
1895
|
+
conn.close()
|
|
1896
|
+
|
|
1897
|
+
if not row:
|
|
1898
|
+
return f"Note with ID {note_id} not found"
|
|
1899
|
+
|
|
1900
|
+
lines = [
|
|
1901
|
+
f"=== Note #{row['id']} ===",
|
|
1902
|
+
f"Title: {row['Title'] or '(no title)'}",
|
|
1903
|
+
f"Created: {row['Created']}",
|
|
1904
|
+
f"Modified: {row['Modified']}",
|
|
1905
|
+
"",
|
|
1906
|
+
"--- INPUT ---",
|
|
1907
|
+
row['Input'] or "(empty)",
|
|
1908
|
+
"",
|
|
1909
|
+
"--- OUTPUT ---",
|
|
1910
|
+
row['Output'] or "(empty)"
|
|
1911
|
+
]
|
|
1912
|
+
return "\n".join(lines)
|
|
1913
|
+
except Exception as e:
|
|
1914
|
+
return f"Error retrieving note: {str(e)}"
|
|
1915
|
+
|
|
1916
|
+
def _handle_notes_list(self, args: Dict[str, Any]) -> str:
|
|
1917
|
+
"""Handle listing notes."""
|
|
1918
|
+
search_term = args.get("search_term", "").strip()
|
|
1919
|
+
limit = args.get("limit", 50)
|
|
1920
|
+
|
|
1921
|
+
try:
|
|
1922
|
+
conn = self._get_notes_connection()
|
|
1923
|
+
|
|
1924
|
+
if search_term:
|
|
1925
|
+
cursor = conn.execute('''
|
|
1926
|
+
SELECT n.id, n.Created, n.Modified, n.Title
|
|
1927
|
+
FROM notes n JOIN notes_fts fts ON n.id = fts.rowid
|
|
1928
|
+
WHERE notes_fts MATCH ?
|
|
1929
|
+
ORDER BY rank
|
|
1930
|
+
LIMIT ?
|
|
1931
|
+
''', (search_term + '*', limit))
|
|
1932
|
+
else:
|
|
1933
|
+
cursor = conn.execute('''
|
|
1934
|
+
SELECT id, Created, Modified, Title
|
|
1935
|
+
FROM notes
|
|
1936
|
+
ORDER BY Modified DESC
|
|
1937
|
+
LIMIT ?
|
|
1938
|
+
''', (limit,))
|
|
1939
|
+
|
|
1940
|
+
rows = cursor.fetchall()
|
|
1941
|
+
conn.close()
|
|
1942
|
+
|
|
1943
|
+
if not rows:
|
|
1944
|
+
return "No notes found" + (f" matching '{search_term}'" if search_term else "")
|
|
1945
|
+
|
|
1946
|
+
lines = [f"Found {len(rows)} note(s):", ""]
|
|
1947
|
+
for row in rows:
|
|
1948
|
+
title = row['Title'][:50] + "..." if len(row['Title'] or '') > 50 else (row['Title'] or '(no title)')
|
|
1949
|
+
lines.append(f" [{row['id']:4}] {title}")
|
|
1950
|
+
lines.append(f" Modified: {row['Modified']}")
|
|
1951
|
+
|
|
1952
|
+
return "\n".join(lines)
|
|
1953
|
+
except Exception as e:
|
|
1954
|
+
return f"Error listing notes: {str(e)}"
|
|
1955
|
+
|
|
1956
|
+
def _handle_notes_search(self, args: Dict[str, Any]) -> str:
|
|
1957
|
+
"""Handle searching notes with full content."""
|
|
1958
|
+
search_term = args.get("search_term", "").strip()
|
|
1959
|
+
limit = args.get("limit", 10)
|
|
1960
|
+
|
|
1961
|
+
if not search_term:
|
|
1962
|
+
return "Error: search_term is required"
|
|
1963
|
+
|
|
1964
|
+
try:
|
|
1965
|
+
conn = self._get_notes_connection()
|
|
1966
|
+
cursor = conn.execute('''
|
|
1967
|
+
SELECT n.id, n.Created, n.Modified, n.Title, n.Input, n.Output
|
|
1968
|
+
FROM notes n JOIN notes_fts fts ON n.id = fts.rowid
|
|
1969
|
+
WHERE notes_fts MATCH ?
|
|
1970
|
+
ORDER BY rank
|
|
1971
|
+
LIMIT ?
|
|
1972
|
+
''', (search_term + '*', limit))
|
|
1973
|
+
|
|
1974
|
+
rows = cursor.fetchall()
|
|
1975
|
+
conn.close()
|
|
1976
|
+
|
|
1977
|
+
if not rows:
|
|
1978
|
+
return f"No notes found matching '{search_term}'"
|
|
1979
|
+
|
|
1980
|
+
lines = [f"Found {len(rows)} note(s) matching '{search_term}':", ""]
|
|
1981
|
+
|
|
1982
|
+
for row in rows:
|
|
1983
|
+
lines.append(f"=== Note #{row['id']}: {row['Title'] or '(no title)'} ===")
|
|
1984
|
+
lines.append(f"Modified: {row['Modified']}")
|
|
1985
|
+
lines.append("")
|
|
1986
|
+
|
|
1987
|
+
# Truncate long content
|
|
1988
|
+
input_preview = (row['Input'] or '')[:500]
|
|
1989
|
+
if len(row['Input'] or '') > 500:
|
|
1990
|
+
input_preview += "... (truncated)"
|
|
1991
|
+
|
|
1992
|
+
output_preview = (row['Output'] or '')[:500]
|
|
1993
|
+
if len(row['Output'] or '') > 500:
|
|
1994
|
+
output_preview += "... (truncated)"
|
|
1995
|
+
|
|
1996
|
+
lines.append("INPUT:")
|
|
1997
|
+
lines.append(input_preview or "(empty)")
|
|
1998
|
+
lines.append("")
|
|
1999
|
+
lines.append("OUTPUT:")
|
|
2000
|
+
lines.append(output_preview or "(empty)")
|
|
2001
|
+
lines.append("")
|
|
2002
|
+
lines.append("-" * 50)
|
|
2003
|
+
lines.append("")
|
|
2004
|
+
|
|
2005
|
+
return "\n".join(lines)
|
|
2006
|
+
except Exception as e:
|
|
2007
|
+
return f"Error searching notes: {str(e)}"
|
|
2008
|
+
|
|
2009
|
+
def _handle_notes_update(self, args: Dict[str, Any]) -> str:
|
|
2010
|
+
"""Handle updating an existing note."""
|
|
2011
|
+
from datetime import datetime
|
|
2012
|
+
|
|
2013
|
+
note_id = args.get("note_id")
|
|
2014
|
+
|
|
2015
|
+
if note_id is None:
|
|
2016
|
+
return "Error: note_id is required"
|
|
2017
|
+
|
|
2018
|
+
try:
|
|
2019
|
+
conn = self._get_notes_connection()
|
|
2020
|
+
|
|
2021
|
+
# Check if note exists
|
|
2022
|
+
existing = conn.execute('SELECT * FROM notes WHERE id = ?', (note_id,)).fetchone()
|
|
2023
|
+
if not existing:
|
|
2024
|
+
conn.close()
|
|
2025
|
+
return f"Note with ID {note_id} not found"
|
|
2026
|
+
|
|
2027
|
+
# Build update query
|
|
2028
|
+
updates = []
|
|
2029
|
+
values = []
|
|
2030
|
+
|
|
2031
|
+
if "title" in args:
|
|
2032
|
+
updates.append("Title = ?")
|
|
2033
|
+
values.append(args["title"])
|
|
2034
|
+
|
|
2035
|
+
if "input_content" in args:
|
|
2036
|
+
updates.append("Input = ?")
|
|
2037
|
+
values.append(args["input_content"])
|
|
2038
|
+
|
|
2039
|
+
if "output_content" in args:
|
|
2040
|
+
updates.append("Output = ?")
|
|
2041
|
+
values.append(args["output_content"])
|
|
2042
|
+
|
|
2043
|
+
if not updates:
|
|
2044
|
+
conn.close()
|
|
2045
|
+
return "No fields to update"
|
|
2046
|
+
|
|
2047
|
+
# Always update Modified timestamp
|
|
2048
|
+
updates.append("Modified = ?")
|
|
2049
|
+
values.append(datetime.now().isoformat())
|
|
2050
|
+
|
|
2051
|
+
values.append(note_id)
|
|
2052
|
+
|
|
2053
|
+
conn.execute(f'''
|
|
2054
|
+
UPDATE notes SET {', '.join(updates)} WHERE id = ?
|
|
2055
|
+
''', values)
|
|
2056
|
+
conn.commit()
|
|
2057
|
+
conn.close()
|
|
2058
|
+
|
|
2059
|
+
return f"Note {note_id} updated successfully"
|
|
2060
|
+
except Exception as e:
|
|
2061
|
+
return f"Error updating note: {str(e)}"
|
|
2062
|
+
|
|
2063
|
+
def _handle_notes_delete(self, args: Dict[str, Any]) -> str:
|
|
2064
|
+
"""Handle deleting a note."""
|
|
2065
|
+
note_id = args.get("note_id")
|
|
2066
|
+
|
|
2067
|
+
if note_id is None:
|
|
2068
|
+
return "Error: note_id is required"
|
|
2069
|
+
|
|
2070
|
+
try:
|
|
2071
|
+
conn = self._get_notes_connection()
|
|
2072
|
+
|
|
2073
|
+
# Check if note exists
|
|
2074
|
+
existing = conn.execute('SELECT id FROM notes WHERE id = ?', (note_id,)).fetchone()
|
|
2075
|
+
if not existing:
|
|
2076
|
+
conn.close()
|
|
2077
|
+
return f"Note with ID {note_id} not found"
|
|
2078
|
+
|
|
2079
|
+
conn.execute('DELETE FROM notes WHERE id = ?', (note_id,))
|
|
2080
|
+
conn.commit()
|
|
2081
|
+
conn.close()
|
|
2082
|
+
|
|
2083
|
+
return f"Note {note_id} deleted successfully"
|
|
2084
|
+
except Exception as e:
|
|
2085
|
+
return f"Error deleting note: {str(e)}"
|
|
2086
|
+
|
|
2087
|
+
# =========================================================================
|
|
2088
|
+
# Phase 4 Tools - Additional Tools
|
|
2089
|
+
# =========================================================================
|
|
2090
|
+
|
|
2091
|
+
def _register_email_header_analyzer_tool(self) -> None:
|
|
2092
|
+
"""Register the Email Header Analyzer Tool."""
|
|
2093
|
+
self.register(MCPToolAdapter(
|
|
2094
|
+
name="pomera_email_header_analyzer",
|
|
2095
|
+
description="Analyze email headers to extract routing information, authentication results (SPF, DKIM, DMARC), "
|
|
2096
|
+
"server hops, delivery timing, and spam scores.",
|
|
2097
|
+
input_schema={
|
|
2098
|
+
"type": "object",
|
|
2099
|
+
"properties": {
|
|
2100
|
+
"text": {
|
|
2101
|
+
"type": "string",
|
|
2102
|
+
"description": "Raw email headers to analyze"
|
|
2103
|
+
},
|
|
2104
|
+
"show_timestamps": {
|
|
2105
|
+
"type": "boolean",
|
|
2106
|
+
"description": "Show timestamp information for each server hop",
|
|
2107
|
+
"default": True
|
|
2108
|
+
},
|
|
2109
|
+
"show_delays": {
|
|
2110
|
+
"type": "boolean",
|
|
2111
|
+
"description": "Show delay calculations between server hops",
|
|
2112
|
+
"default": True
|
|
2113
|
+
},
|
|
2114
|
+
"show_authentication": {
|
|
2115
|
+
"type": "boolean",
|
|
2116
|
+
"description": "Show SPF, DKIM, DMARC authentication results",
|
|
2117
|
+
"default": True
|
|
2118
|
+
},
|
|
2119
|
+
"show_spam_score": {
|
|
2120
|
+
"type": "boolean",
|
|
2121
|
+
"description": "Show spam score if available",
|
|
2122
|
+
"default": True
|
|
2123
|
+
}
|
|
2124
|
+
},
|
|
2125
|
+
"required": ["text"]
|
|
2126
|
+
},
|
|
2127
|
+
handler=self._handle_email_header_analyzer
|
|
2128
|
+
))
|
|
2129
|
+
|
|
2130
|
+
def _handle_email_header_analyzer(self, args: Dict[str, Any]) -> str:
|
|
2131
|
+
"""Handle email header analyzer tool execution."""
|
|
2132
|
+
from tools.email_header_analyzer import EmailHeaderAnalyzerProcessor
|
|
2133
|
+
|
|
2134
|
+
text = args.get("text", "")
|
|
2135
|
+
show_timestamps = args.get("show_timestamps", True)
|
|
2136
|
+
show_delays = args.get("show_delays", True)
|
|
2137
|
+
show_authentication = args.get("show_authentication", True)
|
|
2138
|
+
show_spam_score = args.get("show_spam_score", True)
|
|
2139
|
+
|
|
2140
|
+
return EmailHeaderAnalyzerProcessor.analyze_email_headers(
|
|
2141
|
+
text, show_timestamps, show_delays, show_authentication, show_spam_score
|
|
2142
|
+
)
|
|
2143
|
+
|
|
2144
|
+
def _register_html_tool(self) -> None:
|
|
2145
|
+
"""Register the HTML Extraction Tool."""
|
|
2146
|
+
self.register(MCPToolAdapter(
|
|
2147
|
+
name="pomera_html",
|
|
2148
|
+
description="Process HTML content: extract visible text, clean HTML, extract links, images, headings, tables, or forms.",
|
|
2149
|
+
input_schema={
|
|
2150
|
+
"type": "object",
|
|
2151
|
+
"properties": {
|
|
2152
|
+
"text": {
|
|
2153
|
+
"type": "string",
|
|
2154
|
+
"description": "HTML content to process"
|
|
2155
|
+
},
|
|
2156
|
+
"operation": {
|
|
2157
|
+
"type": "string",
|
|
2158
|
+
"enum": ["visible_text", "clean_html", "extract_links", "extract_images",
|
|
2159
|
+
"extract_headings", "extract_tables", "extract_forms"],
|
|
2160
|
+
"description": "Extraction/processing operation to perform",
|
|
2161
|
+
"default": "visible_text"
|
|
2162
|
+
},
|
|
2163
|
+
"preserve_links": {
|
|
2164
|
+
"type": "boolean",
|
|
2165
|
+
"description": "For visible_text: add link references at the end",
|
|
2166
|
+
"default": False
|
|
2167
|
+
},
|
|
2168
|
+
"remove_scripts": {
|
|
2169
|
+
"type": "boolean",
|
|
2170
|
+
"description": "For clean_html: remove script and style tags",
|
|
2171
|
+
"default": True
|
|
2172
|
+
},
|
|
2173
|
+
"remove_comments": {
|
|
2174
|
+
"type": "boolean",
|
|
2175
|
+
"description": "For clean_html: remove HTML comments",
|
|
2176
|
+
"default": True
|
|
2177
|
+
},
|
|
2178
|
+
"remove_style_attrs": {
|
|
2179
|
+
"type": "boolean",
|
|
2180
|
+
"description": "For clean_html: remove style attributes",
|
|
2181
|
+
"default": True
|
|
2182
|
+
},
|
|
2183
|
+
"remove_class_attrs": {
|
|
2184
|
+
"type": "boolean",
|
|
2185
|
+
"description": "For clean_html: remove class attributes",
|
|
2186
|
+
"default": False
|
|
2187
|
+
},
|
|
2188
|
+
"remove_empty_tags": {
|
|
2189
|
+
"type": "boolean",
|
|
2190
|
+
"description": "For clean_html: remove empty tags",
|
|
2191
|
+
"default": True
|
|
2192
|
+
},
|
|
2193
|
+
"include_link_text": {
|
|
2194
|
+
"type": "boolean",
|
|
2195
|
+
"description": "For extract_links: include the link text",
|
|
2196
|
+
"default": True
|
|
2197
|
+
},
|
|
2198
|
+
"absolute_links_only": {
|
|
2199
|
+
"type": "boolean",
|
|
2200
|
+
"description": "For extract_links: only extract http/https links",
|
|
2201
|
+
"default": False
|
|
2202
|
+
},
|
|
2203
|
+
"include_alt_text": {
|
|
2204
|
+
"type": "boolean",
|
|
2205
|
+
"description": "For extract_images: include alt text",
|
|
2206
|
+
"default": True
|
|
2207
|
+
},
|
|
2208
|
+
"include_heading_level": {
|
|
2209
|
+
"type": "boolean",
|
|
2210
|
+
"description": "For extract_headings: include heading level (H1, H2, etc.)",
|
|
2211
|
+
"default": True
|
|
2212
|
+
},
|
|
2213
|
+
"column_separator": {
|
|
2214
|
+
"type": "string",
|
|
2215
|
+
"description": "For extract_tables: column separator character",
|
|
2216
|
+
"default": "\t"
|
|
2217
|
+
}
|
|
2218
|
+
},
|
|
2219
|
+
"required": ["text"]
|
|
2220
|
+
},
|
|
2221
|
+
handler=self._handle_html_tool
|
|
2222
|
+
))
|
|
2223
|
+
|
|
2224
|
+
def _handle_html_tool(self, args: Dict[str, Any]) -> str:
|
|
2225
|
+
"""Handle HTML tool execution."""
|
|
2226
|
+
from tools.html_tool import HTMLExtractionTool
|
|
2227
|
+
|
|
2228
|
+
text = args.get("text", "")
|
|
2229
|
+
operation = args.get("operation", "visible_text")
|
|
2230
|
+
|
|
2231
|
+
# Build settings dict from args
|
|
2232
|
+
settings = {
|
|
2233
|
+
"extraction_method": operation,
|
|
2234
|
+
"preserve_links": args.get("preserve_links", False),
|
|
2235
|
+
"remove_scripts": args.get("remove_scripts", True),
|
|
2236
|
+
"remove_comments": args.get("remove_comments", True),
|
|
2237
|
+
"remove_style_attrs": args.get("remove_style_attrs", True),
|
|
2238
|
+
"remove_class_attrs": args.get("remove_class_attrs", False),
|
|
2239
|
+
"remove_id_attrs": args.get("remove_id_attrs", False),
|
|
2240
|
+
"remove_empty_tags": args.get("remove_empty_tags", True),
|
|
2241
|
+
"include_link_text": args.get("include_link_text", True),
|
|
2242
|
+
"absolute_links_only": args.get("absolute_links_only", False),
|
|
2243
|
+
"include_alt_text": args.get("include_alt_text", True),
|
|
2244
|
+
"include_title": args.get("include_title", False),
|
|
2245
|
+
"include_heading_level": args.get("include_heading_level", True),
|
|
2246
|
+
"column_separator": args.get("column_separator", "\t")
|
|
2247
|
+
}
|
|
2248
|
+
|
|
2249
|
+
tool = HTMLExtractionTool()
|
|
2250
|
+
return tool.process_text(text, settings)
|
|
2251
|
+
|
|
2252
|
+
def _register_list_comparator_tool(self) -> None:
|
|
2253
|
+
"""Register the List Comparator Tool."""
|
|
2254
|
+
self.register(MCPToolAdapter(
|
|
2255
|
+
name="pomera_list_compare",
|
|
2256
|
+
description="Compare two lists and find items unique to each list or common to both. "
|
|
2257
|
+
"Useful for finding differences between datasets, configurations, or any line-based content.",
|
|
2258
|
+
input_schema={
|
|
2259
|
+
"type": "object",
|
|
2260
|
+
"properties": {
|
|
2261
|
+
"list_a": {
|
|
2262
|
+
"type": "string",
|
|
2263
|
+
"description": "First list (one item per line)"
|
|
2264
|
+
},
|
|
2265
|
+
"list_b": {
|
|
2266
|
+
"type": "string",
|
|
2267
|
+
"description": "Second list (one item per line)"
|
|
2268
|
+
},
|
|
2269
|
+
"case_insensitive": {
|
|
2270
|
+
"type": "boolean",
|
|
2271
|
+
"description": "Perform case-insensitive comparison",
|
|
2272
|
+
"default": False
|
|
2273
|
+
},
|
|
2274
|
+
"output_format": {
|
|
2275
|
+
"type": "string",
|
|
2276
|
+
"enum": ["all", "only_a", "only_b", "in_both"],
|
|
2277
|
+
"description": "What to return: all results, only items unique to A, only items unique to B, or only common items",
|
|
2278
|
+
"default": "all"
|
|
2279
|
+
}
|
|
2280
|
+
},
|
|
2281
|
+
"required": ["list_a", "list_b"]
|
|
2282
|
+
},
|
|
2283
|
+
handler=self._handle_list_comparator
|
|
2284
|
+
))
|
|
2285
|
+
|
|
2286
|
+
def _handle_list_comparator(self, args: Dict[str, Any]) -> str:
|
|
2287
|
+
"""Handle list comparator tool execution."""
|
|
2288
|
+
list_a_text = args.get("list_a", "")
|
|
2289
|
+
list_b_text = args.get("list_b", "")
|
|
2290
|
+
case_insensitive = args.get("case_insensitive", False)
|
|
2291
|
+
output_format = args.get("output_format", "all")
|
|
2292
|
+
|
|
2293
|
+
# Parse lists
|
|
2294
|
+
list_a = [line.strip() for line in list_a_text.strip().splitlines() if line.strip()]
|
|
2295
|
+
list_b = [line.strip() for line in list_b_text.strip().splitlines() if line.strip()]
|
|
2296
|
+
|
|
2297
|
+
if not list_a and not list_b:
|
|
2298
|
+
return "Both lists are empty."
|
|
2299
|
+
|
|
2300
|
+
# Perform comparison
|
|
2301
|
+
if case_insensitive:
|
|
2302
|
+
set_a_lower = {item.lower() for item in list_a}
|
|
2303
|
+
set_b_lower = {item.lower() for item in list_b}
|
|
2304
|
+
|
|
2305
|
+
map_a = {item.lower(): item for item in reversed(list_a)}
|
|
2306
|
+
map_b = {item.lower(): item for item in reversed(list_b)}
|
|
2307
|
+
|
|
2308
|
+
unique_a_lower = set_a_lower - set_b_lower
|
|
2309
|
+
unique_b_lower = set_b_lower - set_a_lower
|
|
2310
|
+
in_both_lower = set_a_lower & set_b_lower
|
|
2311
|
+
|
|
2312
|
+
unique_a = sorted([map_a[item] for item in unique_a_lower])
|
|
2313
|
+
unique_b = sorted([map_b[item] for item in unique_b_lower])
|
|
2314
|
+
in_both = sorted([map_a.get(item, map_b.get(item)) for item in in_both_lower])
|
|
2315
|
+
else:
|
|
2316
|
+
set_a = set(list_a)
|
|
2317
|
+
set_b = set(list_b)
|
|
2318
|
+
unique_a = sorted(list(set_a - set_b))
|
|
2319
|
+
unique_b = sorted(list(set_b - set_a))
|
|
2320
|
+
in_both = sorted(list(set_a & set_b))
|
|
2321
|
+
|
|
2322
|
+
# Build output based on format
|
|
2323
|
+
result_lines = []
|
|
2324
|
+
|
|
2325
|
+
if output_format == "only_a":
|
|
2326
|
+
result_lines.append(f"=== Items only in List A ({len(unique_a)}) ===")
|
|
2327
|
+
result_lines.extend(unique_a if unique_a else ["(none)"])
|
|
2328
|
+
elif output_format == "only_b":
|
|
2329
|
+
result_lines.append(f"=== Items only in List B ({len(unique_b)}) ===")
|
|
2330
|
+
result_lines.extend(unique_b if unique_b else ["(none)"])
|
|
2331
|
+
elif output_format == "in_both":
|
|
2332
|
+
result_lines.append(f"=== Items in both lists ({len(in_both)}) ===")
|
|
2333
|
+
result_lines.extend(in_both if in_both else ["(none)"])
|
|
2334
|
+
else: # "all"
|
|
2335
|
+
result_lines.append(f"=== Comparison Summary ===")
|
|
2336
|
+
result_lines.append(f"List A: {len(list_a)} items")
|
|
2337
|
+
result_lines.append(f"List B: {len(list_b)} items")
|
|
2338
|
+
result_lines.append(f"Only in A: {len(unique_a)}")
|
|
2339
|
+
result_lines.append(f"Only in B: {len(unique_b)}")
|
|
2340
|
+
result_lines.append(f"In both: {len(in_both)}")
|
|
2341
|
+
result_lines.append("")
|
|
2342
|
+
|
|
2343
|
+
result_lines.append(f"=== Only in List A ({len(unique_a)}) ===")
|
|
2344
|
+
result_lines.extend(unique_a if unique_a else ["(none)"])
|
|
2345
|
+
result_lines.append("")
|
|
2346
|
+
|
|
2347
|
+
result_lines.append(f"=== Only in List B ({len(unique_b)}) ===")
|
|
2348
|
+
result_lines.extend(unique_b if unique_b else ["(none)"])
|
|
2349
|
+
result_lines.append("")
|
|
2350
|
+
|
|
2351
|
+
result_lines.append(f"=== In Both Lists ({len(in_both)}) ===")
|
|
2352
|
+
result_lines.extend(in_both if in_both else ["(none)"])
|
|
2353
|
+
|
|
2354
|
+
return "\n".join(result_lines)
|
|
2355
|
+
|
|
2356
|
+
|
|
2357
|
+
# Singleton instance for convenience
|
|
2358
|
+
_default_registry: Optional[ToolRegistry] = None
|
|
2359
|
+
|
|
2360
|
+
|
|
2361
|
+
def get_registry() -> ToolRegistry:
|
|
2362
|
+
"""
|
|
2363
|
+
Get the default tool registry instance.
|
|
2364
|
+
|
|
2365
|
+
Returns:
|
|
2366
|
+
ToolRegistry singleton
|
|
2367
|
+
"""
|
|
2368
|
+
global _default_registry
|
|
2369
|
+
if _default_registry is None:
|
|
2370
|
+
_default_registry = ToolRegistry()
|
|
2371
|
+
return _default_registry
|
|
2372
|
+
|