pomera-ai-commander 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +680 -0
  3. package/bin/pomera-ai-commander.js +62 -0
  4. package/core/__init__.py +66 -0
  5. package/core/__pycache__/__init__.cpython-313.pyc +0 -0
  6. package/core/__pycache__/app_context.cpython-313.pyc +0 -0
  7. package/core/__pycache__/async_text_processor.cpython-313.pyc +0 -0
  8. package/core/__pycache__/backup_manager.cpython-313.pyc +0 -0
  9. package/core/__pycache__/backup_recovery_manager.cpython-313.pyc +0 -0
  10. package/core/__pycache__/content_hash_cache.cpython-313.pyc +0 -0
  11. package/core/__pycache__/context_menu.cpython-313.pyc +0 -0
  12. package/core/__pycache__/data_validator.cpython-313.pyc +0 -0
  13. package/core/__pycache__/database_connection_manager.cpython-313.pyc +0 -0
  14. package/core/__pycache__/database_curl_settings_manager.cpython-313.pyc +0 -0
  15. package/core/__pycache__/database_promera_ai_settings_manager.cpython-313.pyc +0 -0
  16. package/core/__pycache__/database_schema.cpython-313.pyc +0 -0
  17. package/core/__pycache__/database_schema_manager.cpython-313.pyc +0 -0
  18. package/core/__pycache__/database_settings_manager.cpython-313.pyc +0 -0
  19. package/core/__pycache__/database_settings_manager_interface.cpython-313.pyc +0 -0
  20. package/core/__pycache__/dialog_manager.cpython-313.pyc +0 -0
  21. package/core/__pycache__/efficient_line_numbers.cpython-313.pyc +0 -0
  22. package/core/__pycache__/error_handler.cpython-313.pyc +0 -0
  23. package/core/__pycache__/error_service.cpython-313.pyc +0 -0
  24. package/core/__pycache__/event_consolidator.cpython-313.pyc +0 -0
  25. package/core/__pycache__/memory_efficient_text_widget.cpython-313.pyc +0 -0
  26. package/core/__pycache__/migration_manager.cpython-313.pyc +0 -0
  27. package/core/__pycache__/migration_test_suite.cpython-313.pyc +0 -0
  28. package/core/__pycache__/migration_validator.cpython-313.pyc +0 -0
  29. package/core/__pycache__/optimized_find_replace.cpython-313.pyc +0 -0
  30. package/core/__pycache__/optimized_pattern_engine.cpython-313.pyc +0 -0
  31. package/core/__pycache__/optimized_search_highlighter.cpython-313.pyc +0 -0
  32. package/core/__pycache__/performance_monitor.cpython-313.pyc +0 -0
  33. package/core/__pycache__/persistence_manager.cpython-313.pyc +0 -0
  34. package/core/__pycache__/progressive_stats_calculator.cpython-313.pyc +0 -0
  35. package/core/__pycache__/regex_pattern_cache.cpython-313.pyc +0 -0
  36. package/core/__pycache__/regex_pattern_library.cpython-313.pyc +0 -0
  37. package/core/__pycache__/search_operation_manager.cpython-313.pyc +0 -0
  38. package/core/__pycache__/settings_defaults_registry.cpython-313.pyc +0 -0
  39. package/core/__pycache__/settings_integrity_validator.cpython-313.pyc +0 -0
  40. package/core/__pycache__/settings_serializer.cpython-313.pyc +0 -0
  41. package/core/__pycache__/settings_validator.cpython-313.pyc +0 -0
  42. package/core/__pycache__/smart_stats_calculator.cpython-313.pyc +0 -0
  43. package/core/__pycache__/statistics_update_manager.cpython-313.pyc +0 -0
  44. package/core/__pycache__/stats_config_manager.cpython-313.pyc +0 -0
  45. package/core/__pycache__/streaming_text_handler.cpython-313.pyc +0 -0
  46. package/core/__pycache__/task_scheduler.cpython-313.pyc +0 -0
  47. package/core/__pycache__/visibility_monitor.cpython-313.pyc +0 -0
  48. package/core/__pycache__/widget_cache.cpython-313.pyc +0 -0
  49. package/core/app_context.py +482 -0
  50. package/core/async_text_processor.py +422 -0
  51. package/core/backup_manager.py +656 -0
  52. package/core/backup_recovery_manager.py +1034 -0
  53. package/core/content_hash_cache.py +509 -0
  54. package/core/context_menu.py +313 -0
  55. package/core/data_validator.py +1067 -0
  56. package/core/database_connection_manager.py +745 -0
  57. package/core/database_curl_settings_manager.py +609 -0
  58. package/core/database_promera_ai_settings_manager.py +447 -0
  59. package/core/database_schema.py +412 -0
  60. package/core/database_schema_manager.py +396 -0
  61. package/core/database_settings_manager.py +1508 -0
  62. package/core/database_settings_manager_interface.py +457 -0
  63. package/core/dialog_manager.py +735 -0
  64. package/core/efficient_line_numbers.py +511 -0
  65. package/core/error_handler.py +747 -0
  66. package/core/error_service.py +431 -0
  67. package/core/event_consolidator.py +512 -0
  68. package/core/mcp/__init__.py +43 -0
  69. package/core/mcp/__pycache__/__init__.cpython-313.pyc +0 -0
  70. package/core/mcp/__pycache__/protocol.cpython-313.pyc +0 -0
  71. package/core/mcp/__pycache__/schema.cpython-313.pyc +0 -0
  72. package/core/mcp/__pycache__/server_stdio.cpython-313.pyc +0 -0
  73. package/core/mcp/__pycache__/tool_registry.cpython-313.pyc +0 -0
  74. package/core/mcp/protocol.py +288 -0
  75. package/core/mcp/schema.py +251 -0
  76. package/core/mcp/server_stdio.py +299 -0
  77. package/core/mcp/tool_registry.py +2345 -0
  78. package/core/memory_efficient_text_widget.py +712 -0
  79. package/core/migration_manager.py +915 -0
  80. package/core/migration_test_suite.py +1086 -0
  81. package/core/migration_validator.py +1144 -0
  82. package/core/optimized_find_replace.py +715 -0
  83. package/core/optimized_pattern_engine.py +424 -0
  84. package/core/optimized_search_highlighter.py +553 -0
  85. package/core/performance_monitor.py +675 -0
  86. package/core/persistence_manager.py +713 -0
  87. package/core/progressive_stats_calculator.py +632 -0
  88. package/core/regex_pattern_cache.py +530 -0
  89. package/core/regex_pattern_library.py +351 -0
  90. package/core/search_operation_manager.py +435 -0
  91. package/core/settings_defaults_registry.py +1087 -0
  92. package/core/settings_integrity_validator.py +1112 -0
  93. package/core/settings_serializer.py +558 -0
  94. package/core/settings_validator.py +1824 -0
  95. package/core/smart_stats_calculator.py +710 -0
  96. package/core/statistics_update_manager.py +619 -0
  97. package/core/stats_config_manager.py +858 -0
  98. package/core/streaming_text_handler.py +723 -0
  99. package/core/task_scheduler.py +596 -0
  100. package/core/update_pattern_library.py +169 -0
  101. package/core/visibility_monitor.py +596 -0
  102. package/core/widget_cache.py +498 -0
  103. package/mcp.json +61 -0
  104. package/package.json +57 -0
  105. package/pomera.py +7483 -0
  106. package/pomera_mcp_server.py +144 -0
  107. package/tools/__init__.py +5 -0
  108. package/tools/__pycache__/__init__.cpython-313.pyc +0 -0
  109. package/tools/__pycache__/ai_tools.cpython-313.pyc +0 -0
  110. package/tools/__pycache__/ascii_art_generator.cpython-313.pyc +0 -0
  111. package/tools/__pycache__/base64_tools.cpython-313.pyc +0 -0
  112. package/tools/__pycache__/base_tool.cpython-313.pyc +0 -0
  113. package/tools/__pycache__/case_tool.cpython-313.pyc +0 -0
  114. package/tools/__pycache__/column_tools.cpython-313.pyc +0 -0
  115. package/tools/__pycache__/cron_tool.cpython-313.pyc +0 -0
  116. package/tools/__pycache__/curl_history.cpython-313.pyc +0 -0
  117. package/tools/__pycache__/curl_processor.cpython-313.pyc +0 -0
  118. package/tools/__pycache__/curl_settings.cpython-313.pyc +0 -0
  119. package/tools/__pycache__/curl_tool.cpython-313.pyc +0 -0
  120. package/tools/__pycache__/diff_viewer.cpython-313.pyc +0 -0
  121. package/tools/__pycache__/email_extraction_tool.cpython-313.pyc +0 -0
  122. package/tools/__pycache__/email_header_analyzer.cpython-313.pyc +0 -0
  123. package/tools/__pycache__/extraction_tools.cpython-313.pyc +0 -0
  124. package/tools/__pycache__/find_replace.cpython-313.pyc +0 -0
  125. package/tools/__pycache__/folder_file_reporter.cpython-313.pyc +0 -0
  126. package/tools/__pycache__/folder_file_reporter_adapter.cpython-313.pyc +0 -0
  127. package/tools/__pycache__/generator_tools.cpython-313.pyc +0 -0
  128. package/tools/__pycache__/hash_generator.cpython-313.pyc +0 -0
  129. package/tools/__pycache__/html_tool.cpython-313.pyc +0 -0
  130. package/tools/__pycache__/huggingface_helper.cpython-313.pyc +0 -0
  131. package/tools/__pycache__/jsonxml_tool.cpython-313.pyc +0 -0
  132. package/tools/__pycache__/line_tools.cpython-313.pyc +0 -0
  133. package/tools/__pycache__/list_comparator.cpython-313.pyc +0 -0
  134. package/tools/__pycache__/markdown_tools.cpython-313.pyc +0 -0
  135. package/tools/__pycache__/mcp_widget.cpython-313.pyc +0 -0
  136. package/tools/__pycache__/notes_widget.cpython-313.pyc +0 -0
  137. package/tools/__pycache__/number_base_converter.cpython-313.pyc +0 -0
  138. package/tools/__pycache__/regex_extractor.cpython-313.pyc +0 -0
  139. package/tools/__pycache__/slug_generator.cpython-313.pyc +0 -0
  140. package/tools/__pycache__/sorter_tools.cpython-313.pyc +0 -0
  141. package/tools/__pycache__/string_escape_tool.cpython-313.pyc +0 -0
  142. package/tools/__pycache__/text_statistics_tool.cpython-313.pyc +0 -0
  143. package/tools/__pycache__/text_wrapper.cpython-313.pyc +0 -0
  144. package/tools/__pycache__/timestamp_converter.cpython-313.pyc +0 -0
  145. package/tools/__pycache__/tool_loader.cpython-313.pyc +0 -0
  146. package/tools/__pycache__/translator_tools.cpython-313.pyc +0 -0
  147. package/tools/__pycache__/url_link_extractor.cpython-313.pyc +0 -0
  148. package/tools/__pycache__/url_parser.cpython-313.pyc +0 -0
  149. package/tools/__pycache__/whitespace_tools.cpython-313.pyc +0 -0
  150. package/tools/__pycache__/word_frequency_counter.cpython-313.pyc +0 -0
  151. package/tools/ai_tools.py +2892 -0
  152. package/tools/ascii_art_generator.py +353 -0
  153. package/tools/base64_tools.py +184 -0
  154. package/tools/base_tool.py +511 -0
  155. package/tools/case_tool.py +309 -0
  156. package/tools/column_tools.py +396 -0
  157. package/tools/cron_tool.py +885 -0
  158. package/tools/curl_history.py +601 -0
  159. package/tools/curl_processor.py +1208 -0
  160. package/tools/curl_settings.py +503 -0
  161. package/tools/curl_tool.py +5467 -0
  162. package/tools/diff_viewer.py +1072 -0
  163. package/tools/email_extraction_tool.py +249 -0
  164. package/tools/email_header_analyzer.py +426 -0
  165. package/tools/extraction_tools.py +250 -0
  166. package/tools/find_replace.py +1751 -0
  167. package/tools/folder_file_reporter.py +1463 -0
  168. package/tools/folder_file_reporter_adapter.py +480 -0
  169. package/tools/generator_tools.py +1217 -0
  170. package/tools/hash_generator.py +256 -0
  171. package/tools/html_tool.py +657 -0
  172. package/tools/huggingface_helper.py +449 -0
  173. package/tools/jsonxml_tool.py +730 -0
  174. package/tools/line_tools.py +419 -0
  175. package/tools/list_comparator.py +720 -0
  176. package/tools/markdown_tools.py +562 -0
  177. package/tools/mcp_widget.py +1417 -0
  178. package/tools/notes_widget.py +973 -0
  179. package/tools/number_base_converter.py +373 -0
  180. package/tools/regex_extractor.py +572 -0
  181. package/tools/slug_generator.py +311 -0
  182. package/tools/sorter_tools.py +459 -0
  183. package/tools/string_escape_tool.py +393 -0
  184. package/tools/text_statistics_tool.py +366 -0
  185. package/tools/text_wrapper.py +431 -0
  186. package/tools/timestamp_converter.py +422 -0
  187. package/tools/tool_loader.py +710 -0
  188. package/tools/translator_tools.py +523 -0
  189. package/tools/url_link_extractor.py +262 -0
  190. package/tools/url_parser.py +205 -0
  191. package/tools/whitespace_tools.py +356 -0
  192. package/tools/word_frequency_counter.py +147 -0
@@ -0,0 +1,2345 @@
1
+ """
2
+ MCP Tool Registry - Maps Pomera tools to MCP tool definitions
3
+
4
+ This module provides:
5
+ - MCPToolAdapter: Wrapper for Pomera tools to expose them via MCP
6
+ - ToolRegistry: Central registry for all MCP-exposed tools
7
+
8
+ Tools are registered with their input schemas and handlers,
9
+ allowing external MCP clients to discover and execute them.
10
+ """
11
+
12
+ import logging
13
+ from typing import Dict, Any, List, Callable, Optional
14
+ from dataclasses import dataclass
15
+
16
+ from .schema import MCPTool, MCPToolResult
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ @dataclass
22
+ class MCPToolAdapter:
23
+ """
24
+ Adapter that wraps a Pomera tool for MCP exposure.
25
+
26
+ Attributes:
27
+ name: MCP tool name (e.g., 'pomera_case_transform')
28
+ description: Human-readable description
29
+ input_schema: JSON Schema for input validation
30
+ handler: Function that executes the tool
31
+ """
32
+ name: str
33
+ description: str
34
+ input_schema: Dict[str, Any]
35
+ handler: Callable[[Dict[str, Any]], str]
36
+
37
+ def to_mcp_tool(self) -> MCPTool:
38
+ """Convert to MCPTool definition."""
39
+ return MCPTool(
40
+ name=self.name,
41
+ description=self.description,
42
+ inputSchema=self.input_schema
43
+ )
44
+
45
+ def execute(self, arguments: Dict[str, Any]) -> MCPToolResult:
46
+ """
47
+ Execute the tool with given arguments.
48
+
49
+ Args:
50
+ arguments: Tool arguments matching input_schema
51
+
52
+ Returns:
53
+ MCPToolResult with execution output
54
+ """
55
+ try:
56
+ result = self.handler(arguments)
57
+ return MCPToolResult.text(result)
58
+ except Exception as e:
59
+ logger.exception(f"Tool execution failed: {self.name}")
60
+ return MCPToolResult.error(f"Tool execution failed: {str(e)}")
61
+
62
+
63
+ class ToolRegistry:
64
+ """
65
+ Central registry for MCP-exposed tools.
66
+
67
+ Manages tool registration, discovery, and execution.
68
+ Automatically registers built-in Pomera tools on initialization.
69
+ """
70
+
71
+ def __init__(self, register_builtins: bool = True):
72
+ """
73
+ Initialize the tool registry.
74
+
75
+ Args:
76
+ register_builtins: Whether to register built-in tools
77
+ """
78
+ self._tools: Dict[str, MCPToolAdapter] = {}
79
+ self._logger = logging.getLogger(__name__)
80
+
81
+ if register_builtins:
82
+ self._register_builtin_tools()
83
+
84
+ def register(self, adapter: MCPToolAdapter) -> None:
85
+ """
86
+ Register a tool adapter.
87
+
88
+ Args:
89
+ adapter: MCPToolAdapter to register
90
+ """
91
+ self._tools[adapter.name] = adapter
92
+ self._logger.info(f"Registered MCP tool: {adapter.name}")
93
+
94
+ def unregister(self, name: str) -> bool:
95
+ """
96
+ Unregister a tool by name.
97
+
98
+ Args:
99
+ name: Tool name to unregister
100
+
101
+ Returns:
102
+ True if tool was removed, False if not found
103
+ """
104
+ if name in self._tools:
105
+ del self._tools[name]
106
+ self._logger.info(f"Unregistered MCP tool: {name}")
107
+ return True
108
+ return False
109
+
110
+ def get_tool(self, name: str) -> Optional[MCPToolAdapter]:
111
+ """
112
+ Get a tool adapter by name.
113
+
114
+ Args:
115
+ name: Tool name
116
+
117
+ Returns:
118
+ MCPToolAdapter or None if not found
119
+ """
120
+ return self._tools.get(name)
121
+
122
+ def list_tools(self) -> List[MCPTool]:
123
+ """
124
+ Get list of all registered tools as MCPTool definitions.
125
+
126
+ Returns:
127
+ List of MCPTool objects
128
+ """
129
+ return [adapter.to_mcp_tool() for adapter in self._tools.values()]
130
+
131
+ def execute(self, name: str, arguments: Dict[str, Any]) -> MCPToolResult:
132
+ """
133
+ Execute a tool by name.
134
+
135
+ Args:
136
+ name: Tool name
137
+ arguments: Tool arguments
138
+
139
+ Returns:
140
+ MCPToolResult with execution output
141
+
142
+ Raises:
143
+ KeyError: If tool not found
144
+ """
145
+ adapter = self._tools.get(name)
146
+ if adapter is None:
147
+ return MCPToolResult.error(f"Tool not found: {name}")
148
+
149
+ return adapter.execute(arguments)
150
+
151
+ def get_tool_names(self) -> List[str]:
152
+ """Get list of all registered tool names."""
153
+ return list(self._tools.keys())
154
+
155
+ def __len__(self) -> int:
156
+ """Return number of registered tools."""
157
+ return len(self._tools)
158
+
159
+ def __contains__(self, name: str) -> bool:
160
+ """Check if tool is registered."""
161
+ return name in self._tools
162
+
163
+ # =========================================================================
164
+ # Built-in Tool Registration
165
+ # =========================================================================
166
+
167
+ def _register_builtin_tools(self) -> None:
168
+ """Register all built-in Pomera tools."""
169
+ # Core text transformation tools
170
+ self._register_case_tool()
171
+ self._register_base64_tool()
172
+ self._register_hash_tool()
173
+ self._register_line_tools()
174
+ self._register_whitespace_tools()
175
+ self._register_string_escape_tool()
176
+ self._register_sorter_tools()
177
+ self._register_text_stats_tool()
178
+ self._register_json_xml_tool()
179
+ self._register_url_parser_tool()
180
+ self._register_text_wrapper_tool()
181
+ self._register_number_base_tool()
182
+ self._register_timestamp_tool()
183
+
184
+ # Additional tools (Phase 2)
185
+ self._register_regex_extractor_tool()
186
+ self._register_markdown_tools()
187
+ self._register_translator_tools()
188
+ self._register_cron_tool()
189
+ self._register_email_extraction_tool()
190
+ self._register_url_extractor_tool()
191
+ self._register_word_frequency_tool()
192
+ self._register_column_tools()
193
+ self._register_generator_tools()
194
+ self._register_slug_generator_tool()
195
+
196
+ # Notes tools (Phase 3)
197
+ self._register_notes_tools()
198
+
199
+ # Additional tools (Phase 4)
200
+ self._register_email_header_analyzer_tool()
201
+ self._register_html_tool()
202
+ self._register_list_comparator_tool()
203
+
204
+ self._logger.info(f"Registered {len(self._tools)} built-in MCP tools")
205
+
206
+ def _register_case_tool(self) -> None:
207
+ """Register the Case Tool."""
208
+ self.register(MCPToolAdapter(
209
+ name="pomera_case_transform",
210
+ description="Transform text case. Modes: sentence (capitalize first letter of sentences), "
211
+ "lower (all lowercase), upper (all uppercase), capitalized (title case), "
212
+ "title (title case with exclusions for articles/prepositions).",
213
+ input_schema={
214
+ "type": "object",
215
+ "properties": {
216
+ "text": {
217
+ "type": "string",
218
+ "description": "The text to transform"
219
+ },
220
+ "mode": {
221
+ "type": "string",
222
+ "enum": ["sentence", "lower", "upper", "capitalized", "title"],
223
+ "description": "Case transformation mode"
224
+ },
225
+ "exclusions": {
226
+ "type": "string",
227
+ "description": "Words to exclude from title case (one per line). "
228
+ "Only used when mode is 'title'.",
229
+ "default": "a\nan\nthe\nand\nbut\nor\nfor\nnor\non\nat\nto\nfrom\nby\nwith\nin\nof"
230
+ }
231
+ },
232
+ "required": ["text", "mode"]
233
+ },
234
+ handler=self._handle_case_transform
235
+ ))
236
+
237
+ def _handle_case_transform(self, args: Dict[str, Any]) -> str:
238
+ """Handle case transformation tool execution."""
239
+ from tools.case_tool import CaseToolProcessor
240
+
241
+ text = args.get("text", "")
242
+ mode = args.get("mode", "sentence")
243
+ exclusions = args.get("exclusions", "a\nan\nthe\nand\nbut\nor\nfor\nnor\non\nat\nto\nfrom\nby\nwith\nin\nof")
244
+
245
+ # Map lowercase mode names to processor's expected format
246
+ mode_map = {
247
+ "sentence": "Sentence",
248
+ "lower": "Lower",
249
+ "upper": "Upper",
250
+ "capitalized": "Capitalized",
251
+ "title": "Title"
252
+ }
253
+ processor_mode = mode_map.get(mode.lower(), "Sentence")
254
+
255
+ return CaseToolProcessor.process_text(text, processor_mode, exclusions)
256
+
257
+ def _register_base64_tool(self) -> None:
258
+ """Register the Base64 Tool."""
259
+ self.register(MCPToolAdapter(
260
+ name="pomera_base64",
261
+ description="Encode or decode text using Base64 encoding. "
262
+ "Encode converts text to Base64, decode converts Base64 back to text.",
263
+ input_schema={
264
+ "type": "object",
265
+ "properties": {
266
+ "text": {
267
+ "type": "string",
268
+ "description": "The text to encode or decode"
269
+ },
270
+ "operation": {
271
+ "type": "string",
272
+ "enum": ["encode", "decode"],
273
+ "description": "Operation to perform"
274
+ }
275
+ },
276
+ "required": ["text", "operation"]
277
+ },
278
+ handler=self._handle_base64
279
+ ))
280
+
281
+ def _handle_base64(self, args: Dict[str, Any]) -> str:
282
+ """Handle Base64 tool execution."""
283
+ from tools.base64_tools import Base64Tools
284
+
285
+ text = args.get("text", "")
286
+ operation = args.get("operation", "encode")
287
+
288
+ return Base64Tools.base64_processor(text, operation)
289
+
290
+ def _register_hash_tool(self) -> None:
291
+ """Register the Hash Generator Tool."""
292
+ self.register(MCPToolAdapter(
293
+ name="pomera_hash",
294
+ description="Generate cryptographic hashes of text. "
295
+ "Supports MD5, SHA-1, SHA-256, SHA-512, and CRC32 algorithms.",
296
+ input_schema={
297
+ "type": "object",
298
+ "properties": {
299
+ "text": {
300
+ "type": "string",
301
+ "description": "The text to hash"
302
+ },
303
+ "algorithm": {
304
+ "type": "string",
305
+ "enum": ["md5", "sha1", "sha256", "sha512", "crc32"],
306
+ "description": "Hash algorithm to use"
307
+ },
308
+ "uppercase": {
309
+ "type": "boolean",
310
+ "description": "Output hash in uppercase",
311
+ "default": False
312
+ }
313
+ },
314
+ "required": ["text", "algorithm"]
315
+ },
316
+ handler=self._handle_hash
317
+ ))
318
+
319
+ def _handle_hash(self, args: Dict[str, Any]) -> str:
320
+ """Handle hash generation tool execution."""
321
+ from tools.hash_generator import HashGeneratorProcessor
322
+
323
+ text = args.get("text", "")
324
+ algorithm = args.get("algorithm", "sha256")
325
+ uppercase = args.get("uppercase", False)
326
+
327
+ return HashGeneratorProcessor.generate_hash(text, algorithm, uppercase)
328
+
329
+ def _register_line_tools(self) -> None:
330
+ """Register the Line Tools."""
331
+ self.register(MCPToolAdapter(
332
+ name="pomera_line_tools",
333
+ description="Line manipulation tools: remove duplicates, remove empty lines, "
334
+ "add/remove line numbers, reverse lines, shuffle lines.",
335
+ input_schema={
336
+ "type": "object",
337
+ "properties": {
338
+ "text": {
339
+ "type": "string",
340
+ "description": "The text to process (line by line)"
341
+ },
342
+ "operation": {
343
+ "type": "string",
344
+ "enum": ["remove_duplicates", "remove_empty", "add_numbers",
345
+ "remove_numbers", "reverse", "shuffle"],
346
+ "description": "Operation to perform"
347
+ },
348
+ "keep_mode": {
349
+ "type": "string",
350
+ "enum": ["keep_first", "keep_last"],
351
+ "description": "For remove_duplicates: which duplicate to keep",
352
+ "default": "keep_first"
353
+ },
354
+ "case_sensitive": {
355
+ "type": "boolean",
356
+ "description": "For remove_duplicates: case-sensitive comparison",
357
+ "default": True
358
+ },
359
+ "number_format": {
360
+ "type": "string",
361
+ "enum": ["1. ", "1) ", "[1] ", "1: "],
362
+ "description": "For add_numbers: number format style",
363
+ "default": "1. "
364
+ }
365
+ },
366
+ "required": ["text", "operation"]
367
+ },
368
+ handler=self._handle_line_tools
369
+ ))
370
+
371
+ def _handle_line_tools(self, args: Dict[str, Any]) -> str:
372
+ """Handle line tools execution."""
373
+ from tools.line_tools import LineToolsProcessor
374
+
375
+ text = args.get("text", "")
376
+ operation = args.get("operation", "remove_duplicates")
377
+
378
+ if operation == "remove_duplicates":
379
+ mode = args.get("keep_mode", "keep_first")
380
+ case_sensitive = args.get("case_sensitive", True)
381
+ return LineToolsProcessor.remove_duplicates(text, mode, case_sensitive)
382
+ elif operation == "remove_empty":
383
+ return LineToolsProcessor.remove_empty_lines(text)
384
+ elif operation == "add_numbers":
385
+ format_style = args.get("number_format", "1. ")
386
+ return LineToolsProcessor.add_line_numbers(text, format_style)
387
+ elif operation == "remove_numbers":
388
+ return LineToolsProcessor.remove_line_numbers(text)
389
+ elif operation == "reverse":
390
+ return LineToolsProcessor.reverse_lines(text)
391
+ elif operation == "shuffle":
392
+ return LineToolsProcessor.shuffle_lines(text)
393
+ else:
394
+ return f"Unknown operation: {operation}"
395
+
396
+ def _register_whitespace_tools(self) -> None:
397
+ """Register the Whitespace Tools."""
398
+ self.register(MCPToolAdapter(
399
+ name="pomera_whitespace",
400
+ description="Whitespace manipulation: trim lines, remove extra spaces, "
401
+ "convert tabs/spaces, normalize line endings.",
402
+ input_schema={
403
+ "type": "object",
404
+ "properties": {
405
+ "text": {
406
+ "type": "string",
407
+ "description": "The text to process"
408
+ },
409
+ "operation": {
410
+ "type": "string",
411
+ "enum": ["trim", "remove_extra_spaces", "tabs_to_spaces",
412
+ "spaces_to_tabs", "normalize_endings"],
413
+ "description": "Operation to perform"
414
+ },
415
+ "trim_mode": {
416
+ "type": "string",
417
+ "enum": ["both", "leading", "trailing"],
418
+ "description": "For trim: which whitespace to remove",
419
+ "default": "both"
420
+ },
421
+ "tab_size": {
422
+ "type": "integer",
423
+ "description": "Tab width in spaces",
424
+ "default": 4
425
+ },
426
+ "line_ending": {
427
+ "type": "string",
428
+ "enum": ["lf", "crlf", "cr"],
429
+ "description": "For normalize_endings: target line ending",
430
+ "default": "lf"
431
+ }
432
+ },
433
+ "required": ["text", "operation"]
434
+ },
435
+ handler=self._handle_whitespace_tools
436
+ ))
437
+
438
+ def _handle_whitespace_tools(self, args: Dict[str, Any]) -> str:
439
+ """Handle whitespace tools execution."""
440
+ from tools.whitespace_tools import WhitespaceToolsProcessor
441
+
442
+ text = args.get("text", "")
443
+ operation = args.get("operation", "trim")
444
+
445
+ if operation == "trim":
446
+ mode = args.get("trim_mode", "both")
447
+ return WhitespaceToolsProcessor.trim_lines(text, mode)
448
+ elif operation == "remove_extra_spaces":
449
+ return WhitespaceToolsProcessor.remove_extra_spaces(text)
450
+ elif operation == "tabs_to_spaces":
451
+ tab_size = args.get("tab_size", 4)
452
+ return WhitespaceToolsProcessor.tabs_to_spaces(text, tab_size)
453
+ elif operation == "spaces_to_tabs":
454
+ tab_size = args.get("tab_size", 4)
455
+ return WhitespaceToolsProcessor.spaces_to_tabs(text, tab_size)
456
+ elif operation == "normalize_endings":
457
+ ending = args.get("line_ending", "lf")
458
+ return WhitespaceToolsProcessor.normalize_line_endings(text, ending)
459
+ else:
460
+ return f"Unknown operation: {operation}"
461
+
462
+ def _register_string_escape_tool(self) -> None:
463
+ """Register the String Escape Tool."""
464
+ self.register(MCPToolAdapter(
465
+ name="pomera_string_escape",
466
+ description="Escape/unescape strings for various formats: JSON, HTML, URL, XML, JavaScript, SQL.",
467
+ input_schema={
468
+ "type": "object",
469
+ "properties": {
470
+ "text": {
471
+ "type": "string",
472
+ "description": "The text to escape or unescape"
473
+ },
474
+ "operation": {
475
+ "type": "string",
476
+ "enum": ["json_escape", "json_unescape", "html_escape", "html_unescape",
477
+ "url_encode", "url_decode", "xml_escape", "xml_unescape"],
478
+ "description": "Escape/unescape operation"
479
+ }
480
+ },
481
+ "required": ["text", "operation"]
482
+ },
483
+ handler=self._handle_string_escape
484
+ ))
485
+
486
+ def _handle_string_escape(self, args: Dict[str, Any]) -> str:
487
+ """Handle string escape tool execution."""
488
+ from tools.string_escape_tool import StringEscapeProcessor
489
+
490
+ text = args.get("text", "")
491
+ operation = args.get("operation", "json_escape")
492
+
493
+ operations = {
494
+ "json_escape": StringEscapeProcessor.json_escape,
495
+ "json_unescape": StringEscapeProcessor.json_unescape,
496
+ "html_escape": StringEscapeProcessor.html_escape,
497
+ "html_unescape": StringEscapeProcessor.html_unescape,
498
+ "url_encode": StringEscapeProcessor.url_encode,
499
+ "url_decode": StringEscapeProcessor.url_decode,
500
+ "xml_escape": StringEscapeProcessor.xml_escape,
501
+ "xml_unescape": StringEscapeProcessor.xml_unescape,
502
+ }
503
+
504
+ if operation in operations:
505
+ return operations[operation](text)
506
+ return f"Unknown operation: {operation}"
507
+
508
+ def _register_sorter_tools(self) -> None:
509
+ """Register the Sorter Tools."""
510
+ self.register(MCPToolAdapter(
511
+ name="pomera_sort",
512
+ description="Sort lines numerically or alphabetically, ascending or descending.",
513
+ input_schema={
514
+ "type": "object",
515
+ "properties": {
516
+ "text": {
517
+ "type": "string",
518
+ "description": "Text with lines to sort"
519
+ },
520
+ "sort_type": {
521
+ "type": "string",
522
+ "enum": ["number", "alphabetical"],
523
+ "description": "Type of sorting"
524
+ },
525
+ "order": {
526
+ "type": "string",
527
+ "enum": ["ascending", "descending"],
528
+ "description": "Sort order",
529
+ "default": "ascending"
530
+ },
531
+ "unique_only": {
532
+ "type": "boolean",
533
+ "description": "For alphabetical: remove duplicates",
534
+ "default": False
535
+ },
536
+ "trim": {
537
+ "type": "boolean",
538
+ "description": "For alphabetical: trim whitespace",
539
+ "default": False
540
+ }
541
+ },
542
+ "required": ["text", "sort_type"]
543
+ },
544
+ handler=self._handle_sorter
545
+ ))
546
+
547
+ def _handle_sorter(self, args: Dict[str, Any]) -> str:
548
+ """Handle sorter tool execution."""
549
+ from tools.sorter_tools import SorterToolsProcessor
550
+
551
+ text = args.get("text", "")
552
+ sort_type = args.get("sort_type", "alphabetical")
553
+ order = args.get("order", "ascending")
554
+
555
+ if sort_type == "number":
556
+ return SorterToolsProcessor.number_sorter(text, order)
557
+ else:
558
+ unique_only = args.get("unique_only", False)
559
+ trim = args.get("trim", False)
560
+ return SorterToolsProcessor.alphabetical_sorter(text, order, unique_only, trim)
561
+
562
+ def _register_text_stats_tool(self) -> None:
563
+ """Register the Text Statistics Tool."""
564
+ self.register(MCPToolAdapter(
565
+ name="pomera_text_stats",
566
+ description="Analyze text and return statistics: character count, word count, "
567
+ "line count, sentence count, reading time, and top frequent words.",
568
+ input_schema={
569
+ "type": "object",
570
+ "properties": {
571
+ "text": {
572
+ "type": "string",
573
+ "description": "Text to analyze"
574
+ },
575
+ "words_per_minute": {
576
+ "type": "integer",
577
+ "description": "Reading speed for time estimate",
578
+ "default": 200
579
+ }
580
+ },
581
+ "required": ["text"]
582
+ },
583
+ handler=self._handle_text_stats
584
+ ))
585
+
586
+ def _handle_text_stats(self, args: Dict[str, Any]) -> str:
587
+ """Handle text statistics tool execution."""
588
+ from tools.text_statistics_tool import TextStatisticsProcessor
589
+ import json
590
+
591
+ text = args.get("text", "")
592
+ wpm = args.get("words_per_minute", 200)
593
+
594
+ stats = TextStatisticsProcessor.analyze_text(text, wpm)
595
+
596
+ # Format as readable output
597
+ lines = [
598
+ "=== Text Statistics ===",
599
+ f"Characters: {stats['char_count']} (without spaces: {stats['char_count_no_spaces']})",
600
+ f"Words: {stats['word_count']} (unique: {stats['unique_words']})",
601
+ f"Lines: {stats['line_count']} (non-empty: {stats.get('non_empty_lines', stats['line_count'])})",
602
+ f"Sentences: {stats['sentence_count']}",
603
+ f"Paragraphs: {stats['paragraph_count']}",
604
+ f"Average word length: {stats['avg_word_length']} characters",
605
+ f"Reading time: {stats['reading_time_seconds']} seconds (~{stats['reading_time_seconds']//60} min)",
606
+ ]
607
+
608
+ if stats['top_words']:
609
+ lines.append("\nTop words:")
610
+ for word, count in stats['top_words'][:10]:
611
+ lines.append(f" {word}: {count}")
612
+
613
+ return "\n".join(lines)
614
+
615
+ def _register_json_xml_tool(self) -> None:
616
+ """Register the JSON/XML Tool."""
617
+ self.register(MCPToolAdapter(
618
+ name="pomera_json_xml",
619
+ description="Convert between JSON and XML, prettify, minify, or validate JSON/XML.",
620
+ input_schema={
621
+ "type": "object",
622
+ "properties": {
623
+ "text": {
624
+ "type": "string",
625
+ "description": "JSON or XML text to process"
626
+ },
627
+ "operation": {
628
+ "type": "string",
629
+ "enum": ["json_prettify", "json_minify", "json_validate",
630
+ "xml_prettify", "xml_minify", "xml_validate",
631
+ "json_to_xml", "xml_to_json"],
632
+ "description": "Operation to perform"
633
+ },
634
+ "indent": {
635
+ "type": "integer",
636
+ "description": "Indentation spaces for prettify",
637
+ "default": 2
638
+ }
639
+ },
640
+ "required": ["text", "operation"]
641
+ },
642
+ handler=self._handle_json_xml
643
+ ))
644
+
645
+ def _handle_json_xml(self, args: Dict[str, Any]) -> str:
646
+ """Handle JSON/XML tool execution."""
647
+ import json
648
+ import xml.etree.ElementTree as ET
649
+ import xml.dom.minidom
650
+
651
+ text = args.get("text", "")
652
+ operation = args.get("operation", "json_prettify")
653
+ indent = args.get("indent", 2)
654
+
655
+ try:
656
+ if operation == "json_prettify":
657
+ data = json.loads(text)
658
+ return json.dumps(data, indent=indent, ensure_ascii=False)
659
+
660
+ elif operation == "json_minify":
661
+ data = json.loads(text)
662
+ return json.dumps(data, separators=(',', ':'), ensure_ascii=False)
663
+
664
+ elif operation == "json_validate":
665
+ json.loads(text)
666
+ return "Valid JSON"
667
+
668
+ elif operation == "xml_prettify":
669
+ dom = xml.dom.minidom.parseString(text)
670
+ return dom.toprettyxml(indent=" " * indent)
671
+
672
+ elif operation == "xml_minify":
673
+ root = ET.fromstring(text)
674
+ return ET.tostring(root, encoding='unicode')
675
+
676
+ elif operation == "xml_validate":
677
+ ET.fromstring(text)
678
+ return "Valid XML"
679
+
680
+ elif operation == "json_to_xml":
681
+ data = json.loads(text)
682
+ return self._dict_to_xml(data, "root")
683
+
684
+ elif operation == "xml_to_json":
685
+ root = ET.fromstring(text)
686
+ data = self._xml_to_dict(root)
687
+ return json.dumps(data, indent=indent, ensure_ascii=False)
688
+
689
+ else:
690
+ return f"Unknown operation: {operation}"
691
+
692
+ except json.JSONDecodeError as e:
693
+ return f"JSON Error: {str(e)}"
694
+ except ET.ParseError as e:
695
+ return f"XML Error: {str(e)}"
696
+ except Exception as e:
697
+ return f"Error: {str(e)}"
698
+
699
+ def _dict_to_xml(self, data: Any, root_name: str = "root") -> str:
700
+ """Convert dictionary to XML string."""
701
+ import xml.etree.ElementTree as ET
702
+
703
+ def build_element(parent, data):
704
+ if isinstance(data, dict):
705
+ for key, value in data.items():
706
+ child = ET.SubElement(parent, str(key))
707
+ build_element(child, value)
708
+ elif isinstance(data, list):
709
+ for item in data:
710
+ child = ET.SubElement(parent, "item")
711
+ build_element(child, item)
712
+ else:
713
+ parent.text = str(data) if data is not None else ""
714
+
715
+ root = ET.Element(root_name)
716
+ build_element(root, data)
717
+ return ET.tostring(root, encoding='unicode')
718
+
719
+ def _xml_to_dict(self, element) -> Dict[str, Any]:
720
+ """Convert XML element to dictionary."""
721
+ result = {}
722
+
723
+ for child in element:
724
+ if len(child) == 0:
725
+ result[child.tag] = child.text or ""
726
+ else:
727
+ child_data = self._xml_to_dict(child)
728
+ if child.tag in result:
729
+ if not isinstance(result[child.tag], list):
730
+ result[child.tag] = [result[child.tag]]
731
+ result[child.tag].append(child_data)
732
+ else:
733
+ result[child.tag] = child_data
734
+
735
+ return result if result else (element.text or "")
736
+
737
+ def _register_url_parser_tool(self) -> None:
738
+ """Register the URL Parser Tool."""
739
+ self.register(MCPToolAdapter(
740
+ name="pomera_url_parse",
741
+ description="Parse a URL and extract its components: scheme, host, port, path, query, fragment.",
742
+ input_schema={
743
+ "type": "object",
744
+ "properties": {
745
+ "url": {
746
+ "type": "string",
747
+ "description": "URL to parse"
748
+ }
749
+ },
750
+ "required": ["url"]
751
+ },
752
+ handler=self._handle_url_parse
753
+ ))
754
+
755
+ def _handle_url_parse(self, args: Dict[str, Any]) -> str:
756
+ """Handle URL parser tool execution."""
757
+ from urllib.parse import urlparse, parse_qs
758
+
759
+ url = args.get("url", "")
760
+
761
+ try:
762
+ parsed = urlparse(url)
763
+ query_params = parse_qs(parsed.query)
764
+
765
+ lines = [
766
+ "=== URL Components ===",
767
+ f"Scheme: {parsed.scheme or '(none)'}",
768
+ f"Host: {parsed.hostname or '(none)'}",
769
+ f"Port: {parsed.port or '(default)'}",
770
+ f"Path: {parsed.path or '/'}",
771
+ f"Query: {parsed.query or '(none)'}",
772
+ f"Fragment: {parsed.fragment or '(none)'}",
773
+ ]
774
+
775
+ if query_params:
776
+ lines.append("\nQuery Parameters:")
777
+ for key, values in query_params.items():
778
+ for value in values:
779
+ lines.append(f" {key} = {value}")
780
+
781
+ return "\n".join(lines)
782
+
783
+ except Exception as e:
784
+ return f"Error parsing URL: {str(e)}"
785
+
786
+ def _register_text_wrapper_tool(self) -> None:
787
+ """Register the Text Wrapper Tool."""
788
+ self.register(MCPToolAdapter(
789
+ name="pomera_text_wrap",
790
+ description="Wrap text to a specified width, preserving words.",
791
+ input_schema={
792
+ "type": "object",
793
+ "properties": {
794
+ "text": {
795
+ "type": "string",
796
+ "description": "Text to wrap"
797
+ },
798
+ "width": {
799
+ "type": "integer",
800
+ "description": "Maximum line width",
801
+ "default": 80
802
+ }
803
+ },
804
+ "required": ["text"]
805
+ },
806
+ handler=self._handle_text_wrap
807
+ ))
808
+
809
+ def _handle_text_wrap(self, args: Dict[str, Any]) -> str:
810
+ """Handle text wrapper tool execution."""
811
+ import textwrap
812
+
813
+ text = args.get("text", "")
814
+ width = args.get("width", 80)
815
+
816
+ # Wrap each paragraph separately
817
+ paragraphs = text.split('\n\n')
818
+ wrapped = []
819
+
820
+ for para in paragraphs:
821
+ if para.strip():
822
+ wrapped.append(textwrap.fill(para, width=width))
823
+ else:
824
+ wrapped.append("")
825
+
826
+ return '\n\n'.join(wrapped)
827
+
828
+ def _register_number_base_tool(self) -> None:
829
+ """Register the Number Base Converter Tool."""
830
+ self.register(MCPToolAdapter(
831
+ name="pomera_number_base",
832
+ description="Convert numbers between bases: binary, octal, decimal, hexadecimal.",
833
+ input_schema={
834
+ "type": "object",
835
+ "properties": {
836
+ "value": {
837
+ "type": "string",
838
+ "description": "Number to convert (can include 0x, 0b, 0o prefix)"
839
+ },
840
+ "from_base": {
841
+ "type": "string",
842
+ "enum": ["binary", "octal", "decimal", "hex", "auto"],
843
+ "description": "Source base (auto detects from prefix)",
844
+ "default": "auto"
845
+ },
846
+ "to_base": {
847
+ "type": "string",
848
+ "enum": ["binary", "octal", "decimal", "hex", "all"],
849
+ "description": "Target base (all shows all bases)",
850
+ "default": "all"
851
+ }
852
+ },
853
+ "required": ["value"]
854
+ },
855
+ handler=self._handle_number_base
856
+ ))
857
+
858
+ def _handle_number_base(self, args: Dict[str, Any]) -> str:
859
+ """Handle number base converter tool execution."""
860
+ value = args.get("value", "").strip()
861
+ from_base = args.get("from_base", "auto")
862
+ to_base = args.get("to_base", "all")
863
+
864
+ try:
865
+ # Parse input number
866
+ if from_base == "auto":
867
+ if value.startswith('0x') or value.startswith('0X'):
868
+ num = int(value, 16)
869
+ elif value.startswith('0b') or value.startswith('0B'):
870
+ num = int(value, 2)
871
+ elif value.startswith('0o') or value.startswith('0O'):
872
+ num = int(value, 8)
873
+ else:
874
+ num = int(value, 10)
875
+ else:
876
+ bases = {"binary": 2, "octal": 8, "decimal": 10, "hex": 16}
877
+ num = int(value.replace('0x', '').replace('0b', '').replace('0o', ''), bases[from_base])
878
+
879
+ # Convert to target base(s)
880
+ if to_base == "all":
881
+ return (f"Decimal: {num}\n"
882
+ f"Binary: 0b{bin(num)[2:]}\n"
883
+ f"Octal: 0o{oct(num)[2:]}\n"
884
+ f"Hexadecimal: 0x{hex(num)[2:]}")
885
+ elif to_base == "binary":
886
+ return f"0b{bin(num)[2:]}"
887
+ elif to_base == "octal":
888
+ return f"0o{oct(num)[2:]}"
889
+ elif to_base == "decimal":
890
+ return str(num)
891
+ elif to_base == "hex":
892
+ return f"0x{hex(num)[2:]}"
893
+ else:
894
+ return f"Unknown target base: {to_base}"
895
+
896
+ except ValueError as e:
897
+ return f"Error: Invalid number format - {str(e)}"
898
+
899
+ def _register_timestamp_tool(self) -> None:
900
+ """Register the Timestamp Converter Tool."""
901
+ self.register(MCPToolAdapter(
902
+ name="pomera_timestamp",
903
+ description="Convert between Unix timestamps and human-readable dates.",
904
+ input_schema={
905
+ "type": "object",
906
+ "properties": {
907
+ "value": {
908
+ "type": "string",
909
+ "description": "Unix timestamp or date string to convert"
910
+ },
911
+ "operation": {
912
+ "type": "string",
913
+ "enum": ["to_date", "to_timestamp", "now"],
914
+ "description": "Conversion direction or get current time",
915
+ "default": "to_date"
916
+ },
917
+ "format": {
918
+ "type": "string",
919
+ "enum": ["iso", "us", "eu", "long", "short"],
920
+ "description": "Output date format",
921
+ "default": "iso"
922
+ }
923
+ },
924
+ "required": ["value"]
925
+ },
926
+ handler=self._handle_timestamp
927
+ ))
928
+
929
+ def _handle_timestamp(self, args: Dict[str, Any]) -> str:
930
+ """Handle timestamp converter tool execution."""
931
+ from datetime import datetime
932
+ import time
933
+
934
+ value = args.get("value", "").strip()
935
+ operation = args.get("operation", "to_date")
936
+ date_format = args.get("format", "iso")
937
+
938
+ formats = {
939
+ "iso": "%Y-%m-%dT%H:%M:%S",
940
+ "us": "%m/%d/%Y %I:%M:%S %p",
941
+ "eu": "%d/%m/%Y %H:%M:%S",
942
+ "long": "%B %d, %Y %H:%M:%S",
943
+ "short": "%b %d, %Y %H:%M"
944
+ }
945
+
946
+ try:
947
+ if operation == "now":
948
+ now = datetime.now()
949
+ ts = int(time.time())
950
+ return (f"Current time:\n"
951
+ f" Unix timestamp: {ts}\n"
952
+ f" ISO: {now.strftime(formats['iso'])}\n"
953
+ f" US: {now.strftime(formats['us'])}\n"
954
+ f" EU: {now.strftime(formats['eu'])}")
955
+
956
+ elif operation == "to_date":
957
+ ts = float(value)
958
+ # Handle milliseconds
959
+ if ts > 1e12:
960
+ ts = ts / 1000
961
+ dt = datetime.fromtimestamp(ts)
962
+ return dt.strftime(formats.get(date_format, formats['iso']))
963
+
964
+ elif operation == "to_timestamp":
965
+ # Try common date formats
966
+ for fmt in ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%m/%d/%Y", "%d/%m/%Y"]:
967
+ try:
968
+ dt = datetime.strptime(value, fmt)
969
+ return str(int(dt.timestamp()))
970
+ except ValueError:
971
+ continue
972
+ return "Error: Could not parse date. Try formats: YYYY-MM-DD, MM/DD/YYYY"
973
+
974
+ else:
975
+ return f"Unknown operation: {operation}"
976
+
977
+ except ValueError as e:
978
+ return f"Error: {str(e)}"
979
+
980
+ # =========================================================================
981
+ # Phase 2 Tools - Additional Pomera Tools
982
+ # =========================================================================
983
+
984
+ def _register_regex_extractor_tool(self) -> None:
985
+ """Register the Regex Extractor Tool."""
986
+ self.register(MCPToolAdapter(
987
+ name="pomera_regex_extract",
988
+ description="Extract text matches using regular expressions. Supports capture groups, "
989
+ "deduplication, and multiple match modes.",
990
+ input_schema={
991
+ "type": "object",
992
+ "properties": {
993
+ "text": {
994
+ "type": "string",
995
+ "description": "Text to search"
996
+ },
997
+ "pattern": {
998
+ "type": "string",
999
+ "description": "Regular expression pattern"
1000
+ },
1001
+ "match_mode": {
1002
+ "type": "string",
1003
+ "enum": ["all_per_line", "first_per_line"],
1004
+ "description": "Match all occurrences or first per line",
1005
+ "default": "all_per_line"
1006
+ },
1007
+ "omit_duplicates": {
1008
+ "type": "boolean",
1009
+ "description": "Remove duplicate matches",
1010
+ "default": False
1011
+ },
1012
+ "sort_results": {
1013
+ "type": "boolean",
1014
+ "description": "Sort results alphabetically",
1015
+ "default": False
1016
+ },
1017
+ "case_sensitive": {
1018
+ "type": "boolean",
1019
+ "description": "Case-sensitive matching",
1020
+ "default": False
1021
+ }
1022
+ },
1023
+ "required": ["text", "pattern"]
1024
+ },
1025
+ handler=self._handle_regex_extract
1026
+ ))
1027
+
1028
+ def _handle_regex_extract(self, args: Dict[str, Any]) -> str:
1029
+ """Handle regex extractor tool execution."""
1030
+ from tools.regex_extractor import RegexExtractorProcessor
1031
+
1032
+ text = args.get("text", "")
1033
+ pattern = args.get("pattern", "")
1034
+ match_mode = args.get("match_mode", "all_per_line")
1035
+ omit_duplicates = args.get("omit_duplicates", False)
1036
+ sort_results = args.get("sort_results", False)
1037
+ case_sensitive = args.get("case_sensitive", False)
1038
+
1039
+ return RegexExtractorProcessor.extract_matches(
1040
+ text, pattern, match_mode, omit_duplicates,
1041
+ hide_counts=True, sort_results=sort_results,
1042
+ case_sensitive=case_sensitive
1043
+ )
1044
+
1045
+ def _register_markdown_tools(self) -> None:
1046
+ """Register the Markdown Tools."""
1047
+ self.register(MCPToolAdapter(
1048
+ name="pomera_markdown",
1049
+ description="Markdown processing: strip formatting, extract links, extract headers, "
1050
+ "convert tables to CSV, format tables.",
1051
+ input_schema={
1052
+ "type": "object",
1053
+ "properties": {
1054
+ "text": {
1055
+ "type": "string",
1056
+ "description": "Markdown text to process"
1057
+ },
1058
+ "operation": {
1059
+ "type": "string",
1060
+ "enum": ["strip", "extract_links", "extract_headers",
1061
+ "table_to_csv", "format_table"],
1062
+ "description": "Operation to perform"
1063
+ },
1064
+ "preserve_links_text": {
1065
+ "type": "boolean",
1066
+ "description": "For strip: keep link text",
1067
+ "default": True
1068
+ },
1069
+ "include_images": {
1070
+ "type": "boolean",
1071
+ "description": "For extract_links: include image links",
1072
+ "default": False
1073
+ },
1074
+ "header_format": {
1075
+ "type": "string",
1076
+ "enum": ["indented", "flat", "numbered"],
1077
+ "description": "For extract_headers: output format",
1078
+ "default": "indented"
1079
+ }
1080
+ },
1081
+ "required": ["text", "operation"]
1082
+ },
1083
+ handler=self._handle_markdown_tools
1084
+ ))
1085
+
1086
+ def _handle_markdown_tools(self, args: Dict[str, Any]) -> str:
1087
+ """Handle markdown tools execution."""
1088
+ from tools.markdown_tools import MarkdownToolsProcessor
1089
+
1090
+ text = args.get("text", "")
1091
+ operation = args.get("operation", "strip")
1092
+
1093
+ if operation == "strip":
1094
+ preserve_links_text = args.get("preserve_links_text", True)
1095
+ return MarkdownToolsProcessor.strip_markdown(text, preserve_links_text)
1096
+ elif operation == "extract_links":
1097
+ include_images = args.get("include_images", False)
1098
+ return MarkdownToolsProcessor.extract_links(text, include_images)
1099
+ elif operation == "extract_headers":
1100
+ header_format = args.get("header_format", "indented")
1101
+ return MarkdownToolsProcessor.extract_headers(text, header_format)
1102
+ elif operation == "table_to_csv":
1103
+ return MarkdownToolsProcessor.table_to_csv(text)
1104
+ elif operation == "format_table":
1105
+ return MarkdownToolsProcessor.format_table(text)
1106
+ else:
1107
+ return f"Unknown operation: {operation}"
1108
+
1109
+ def _register_translator_tools(self) -> None:
1110
+ """Register the Translator Tools (Morse/Binary)."""
1111
+ self.register(MCPToolAdapter(
1112
+ name="pomera_translator",
1113
+ description="Translate text to/from Morse code or binary.",
1114
+ input_schema={
1115
+ "type": "object",
1116
+ "properties": {
1117
+ "text": {
1118
+ "type": "string",
1119
+ "description": "Text to translate"
1120
+ },
1121
+ "format": {
1122
+ "type": "string",
1123
+ "enum": ["morse", "binary"],
1124
+ "description": "Translation format"
1125
+ },
1126
+ "direction": {
1127
+ "type": "string",
1128
+ "enum": ["encode", "decode", "auto"],
1129
+ "description": "Translation direction (auto-detects for binary)",
1130
+ "default": "encode"
1131
+ }
1132
+ },
1133
+ "required": ["text", "format"]
1134
+ },
1135
+ handler=self._handle_translator
1136
+ ))
1137
+
1138
+ def _handle_translator(self, args: Dict[str, Any]) -> str:
1139
+ """Handle translator tools execution."""
1140
+ from tools.translator_tools import TranslatorToolsProcessor
1141
+
1142
+ text = args.get("text", "")
1143
+ fmt = args.get("format", "morse")
1144
+ direction = args.get("direction", "encode")
1145
+
1146
+ if fmt == "morse":
1147
+ mode = "morse" if direction == "encode" else "text"
1148
+ return TranslatorToolsProcessor.morse_translator(text, mode)
1149
+ elif fmt == "binary":
1150
+ # Binary translator auto-detects direction
1151
+ return TranslatorToolsProcessor.binary_translator(text)
1152
+ else:
1153
+ return f"Unknown format: {fmt}"
1154
+
1155
+ def _register_cron_tool(self) -> None:
1156
+ """Register the Cron Expression Tool."""
1157
+ self.register(MCPToolAdapter(
1158
+ name="pomera_cron",
1159
+ description="Parse and explain cron expressions, validate syntax, calculate next run times.",
1160
+ input_schema={
1161
+ "type": "object",
1162
+ "properties": {
1163
+ "expression": {
1164
+ "type": "string",
1165
+ "description": "Cron expression (5 fields: minute hour day month weekday)"
1166
+ },
1167
+ "operation": {
1168
+ "type": "string",
1169
+ "enum": ["explain", "validate", "next_runs"],
1170
+ "description": "Operation to perform"
1171
+ },
1172
+ "count": {
1173
+ "type": "integer",
1174
+ "description": "For next_runs: number of runs to calculate",
1175
+ "default": 5
1176
+ }
1177
+ },
1178
+ "required": ["expression", "operation"]
1179
+ },
1180
+ handler=self._handle_cron
1181
+ ))
1182
+
1183
+ def _handle_cron(self, args: Dict[str, Any]) -> str:
1184
+ """Handle cron tool execution."""
1185
+ from datetime import datetime, timedelta
1186
+
1187
+ expression = args.get("expression", "").strip()
1188
+ operation = args.get("operation", "explain")
1189
+ count = args.get("count", 5)
1190
+
1191
+ parts = expression.split()
1192
+ if len(parts) != 5:
1193
+ return f"Error: Invalid cron expression. Expected 5 fields, got {len(parts)}.\nFormat: minute hour day month weekday"
1194
+
1195
+ minute, hour, day, month, weekday = parts
1196
+
1197
+ if operation == "explain":
1198
+ return self._explain_cron(minute, hour, day, month, weekday)
1199
+ elif operation == "validate":
1200
+ return self._validate_cron(minute, hour, day, month, weekday)
1201
+ elif operation == "next_runs":
1202
+ return self._calculate_cron_runs(expression, count)
1203
+ else:
1204
+ return f"Unknown operation: {operation}"
1205
+
1206
+ def _explain_cron(self, minute: str, hour: str, day: str, month: str, weekday: str) -> str:
1207
+ """Generate human-readable explanation of cron expression."""
1208
+ def explain_field(value: str, field_type: str) -> str:
1209
+ ranges = {
1210
+ "minute": (0, 59), "hour": (0, 23),
1211
+ "day": (1, 31), "month": (1, 12), "weekday": (0, 6)
1212
+ }
1213
+ min_val, max_val = ranges[field_type]
1214
+
1215
+ if value == "*":
1216
+ return f"every {field_type}"
1217
+ elif value.startswith("*/"):
1218
+ step = value[2:]
1219
+ return f"every {step} {field_type}s"
1220
+ elif "-" in value:
1221
+ return f"{field_type}s {value}"
1222
+ elif "," in value:
1223
+ return f"{field_type}s {value}"
1224
+ else:
1225
+ return f"{field_type} {value}"
1226
+
1227
+ lines = [
1228
+ f"Cron Expression: {minute} {hour} {day} {month} {weekday}",
1229
+ "=" * 50,
1230
+ "",
1231
+ "Field Breakdown:",
1232
+ f" Minute: {minute:10} - {explain_field(minute, 'minute')}",
1233
+ f" Hour: {hour:10} - {explain_field(hour, 'hour')}",
1234
+ f" Day: {day:10} - {explain_field(day, 'day')}",
1235
+ f" Month: {month:10} - {explain_field(month, 'month')}",
1236
+ f" Weekday: {weekday:10} - {explain_field(weekday, 'weekday')} (0=Sun, 6=Sat)"
1237
+ ]
1238
+ return "\n".join(lines)
1239
+
1240
+ def _validate_cron(self, minute: str, hour: str, day: str, month: str, weekday: str) -> str:
1241
+ """Validate cron expression fields."""
1242
+ import re
1243
+
1244
+ def validate_field(value: str, min_val: int, max_val: int, name: str) -> List[str]:
1245
+ errors = []
1246
+ cron_pattern = r'^(\*|(\d+(-\d+)?)(,\d+(-\d+)?)*|(\*/\d+))$'
1247
+
1248
+ if not re.match(cron_pattern, value):
1249
+ errors.append(f"{name}: Invalid format '{value}'")
1250
+ else:
1251
+ # Check numeric ranges
1252
+ nums = re.findall(r'\d+', value)
1253
+ for n in nums:
1254
+ if int(n) < min_val or int(n) > max_val:
1255
+ errors.append(f"{name}: Value {n} out of range ({min_val}-{max_val})")
1256
+ return errors
1257
+
1258
+ all_errors = []
1259
+ all_errors.extend(validate_field(minute, 0, 59, "Minute"))
1260
+ all_errors.extend(validate_field(hour, 0, 23, "Hour"))
1261
+ all_errors.extend(validate_field(day, 1, 31, "Day"))
1262
+ all_errors.extend(validate_field(month, 1, 12, "Month"))
1263
+ all_errors.extend(validate_field(weekday, 0, 6, "Weekday"))
1264
+
1265
+ if all_errors:
1266
+ return "❌ INVALID\n" + "\n".join(all_errors)
1267
+ return "✓ Valid cron expression"
1268
+
1269
+ def _calculate_cron_runs(self, expression: str, count: int) -> str:
1270
+ """Calculate next scheduled runs for a cron expression."""
1271
+ from datetime import datetime, timedelta
1272
+ import re
1273
+
1274
+ parts = expression.split()
1275
+ minute, hour, day, month, weekday = parts
1276
+
1277
+ def matches_field(value: int, field: str) -> bool:
1278
+ if field == "*":
1279
+ return True
1280
+ if field.startswith("*/"):
1281
+ step = int(field[2:])
1282
+ return value % step == 0
1283
+ if "-" in field:
1284
+ start, end = map(int, field.split("-"))
1285
+ return start <= value <= end
1286
+ if "," in field:
1287
+ return value in [int(x) for x in field.split(",")]
1288
+ return value == int(field)
1289
+
1290
+ runs = []
1291
+ current = datetime.now().replace(second=0, microsecond=0) + timedelta(minutes=1)
1292
+ max_iterations = 525600 # One year of minutes
1293
+
1294
+ for _ in range(max_iterations):
1295
+ if (matches_field(current.minute, minute) and
1296
+ matches_field(current.hour, hour) and
1297
+ matches_field(current.day, day) and
1298
+ matches_field(current.month, month) and
1299
+ matches_field(current.weekday(), weekday.replace("7", "0"))):
1300
+ runs.append(current)
1301
+ if len(runs) >= count:
1302
+ break
1303
+ current += timedelta(minutes=1)
1304
+
1305
+ if not runs:
1306
+ return "Could not calculate next runs (expression may never match)"
1307
+
1308
+ lines = [f"Next {len(runs)} scheduled runs:", ""]
1309
+ for i, run in enumerate(runs, 1):
1310
+ lines.append(f" {i}. {run.strftime('%Y-%m-%d %H:%M')} ({run.strftime('%A')})")
1311
+ return "\n".join(lines)
1312
+
1313
+ def _register_email_extraction_tool(self) -> None:
1314
+ """Register the Email Extraction Tool."""
1315
+ self.register(MCPToolAdapter(
1316
+ name="pomera_extract_emails",
1317
+ description="Extract email addresses from text with options for deduplication and sorting.",
1318
+ input_schema={
1319
+ "type": "object",
1320
+ "properties": {
1321
+ "text": {
1322
+ "type": "string",
1323
+ "description": "Text to extract emails from"
1324
+ },
1325
+ "omit_duplicates": {
1326
+ "type": "boolean",
1327
+ "description": "Remove duplicate emails",
1328
+ "default": True
1329
+ },
1330
+ "sort_emails": {
1331
+ "type": "boolean",
1332
+ "description": "Sort emails alphabetically",
1333
+ "default": False
1334
+ },
1335
+ "only_domain": {
1336
+ "type": "boolean",
1337
+ "description": "Extract only domains, not full addresses",
1338
+ "default": False
1339
+ }
1340
+ },
1341
+ "required": ["text"]
1342
+ },
1343
+ handler=self._handle_email_extraction
1344
+ ))
1345
+
1346
+ def _handle_email_extraction(self, args: Dict[str, Any]) -> str:
1347
+ """Handle email extraction tool execution."""
1348
+ from tools.email_extraction_tool import EmailExtractionProcessor
1349
+
1350
+ text = args.get("text", "")
1351
+ omit_duplicates = args.get("omit_duplicates", True)
1352
+ sort_emails = args.get("sort_emails", False)
1353
+ only_domain = args.get("only_domain", False)
1354
+
1355
+ return EmailExtractionProcessor.extract_emails_advanced(
1356
+ text, omit_duplicates, hide_counts=True,
1357
+ sort_emails=sort_emails, only_domain=only_domain
1358
+ )
1359
+
1360
+ def _register_url_extractor_tool(self) -> None:
1361
+ """Register the URL Extractor Tool."""
1362
+ self.register(MCPToolAdapter(
1363
+ name="pomera_extract_urls",
1364
+ description="Extract URLs from text with options for different URL types.",
1365
+ input_schema={
1366
+ "type": "object",
1367
+ "properties": {
1368
+ "text": {
1369
+ "type": "string",
1370
+ "description": "Text to extract URLs from"
1371
+ },
1372
+ "extract_href": {
1373
+ "type": "boolean",
1374
+ "description": "Extract from HTML href attributes",
1375
+ "default": False
1376
+ },
1377
+ "extract_https": {
1378
+ "type": "boolean",
1379
+ "description": "Extract http/https URLs",
1380
+ "default": True
1381
+ },
1382
+ "extract_any_protocol": {
1383
+ "type": "boolean",
1384
+ "description": "Extract URLs with any protocol",
1385
+ "default": False
1386
+ },
1387
+ "extract_markdown": {
1388
+ "type": "boolean",
1389
+ "description": "Extract markdown links",
1390
+ "default": False
1391
+ },
1392
+ "filter_text": {
1393
+ "type": "string",
1394
+ "description": "Filter URLs containing this text",
1395
+ "default": ""
1396
+ }
1397
+ },
1398
+ "required": ["text"]
1399
+ },
1400
+ handler=self._handle_url_extraction
1401
+ ))
1402
+
1403
+ def _handle_url_extraction(self, args: Dict[str, Any]) -> str:
1404
+ """Handle URL extraction tool execution."""
1405
+ from tools.url_link_extractor import URLLinkExtractorProcessor
1406
+
1407
+ text = args.get("text", "")
1408
+ extract_href = args.get("extract_href", False)
1409
+ extract_https = args.get("extract_https", True)
1410
+ extract_any_protocol = args.get("extract_any_protocol", False)
1411
+ extract_markdown = args.get("extract_markdown", False)
1412
+ filter_text = args.get("filter_text", "")
1413
+
1414
+ return URLLinkExtractorProcessor.extract_urls(
1415
+ text, extract_href, extract_https,
1416
+ extract_any_protocol, extract_markdown, filter_text
1417
+ )
1418
+
1419
+ def _register_word_frequency_tool(self) -> None:
1420
+ """Register the Word Frequency Counter Tool."""
1421
+ self.register(MCPToolAdapter(
1422
+ name="pomera_word_frequency",
1423
+ description="Count word frequencies in text, showing count and percentage for each word.",
1424
+ input_schema={
1425
+ "type": "object",
1426
+ "properties": {
1427
+ "text": {
1428
+ "type": "string",
1429
+ "description": "Text to analyze"
1430
+ }
1431
+ },
1432
+ "required": ["text"]
1433
+ },
1434
+ handler=self._handle_word_frequency
1435
+ ))
1436
+
1437
+ def _handle_word_frequency(self, args: Dict[str, Any]) -> str:
1438
+ """Handle word frequency counter tool execution."""
1439
+ from tools.word_frequency_counter import WordFrequencyCounterProcessor
1440
+
1441
+ text = args.get("text", "")
1442
+ return WordFrequencyCounterProcessor.word_frequency(text)
1443
+
1444
+ def _register_column_tools(self) -> None:
1445
+ """Register the Column/CSV Tools."""
1446
+ self.register(MCPToolAdapter(
1447
+ name="pomera_column_tools",
1448
+ description="CSV/column manipulation: extract column, reorder columns, delete column, "
1449
+ "transpose, convert to fixed width.",
1450
+ input_schema={
1451
+ "type": "object",
1452
+ "properties": {
1453
+ "text": {
1454
+ "type": "string",
1455
+ "description": "CSV or delimited text"
1456
+ },
1457
+ "operation": {
1458
+ "type": "string",
1459
+ "enum": ["extract", "reorder", "delete", "transpose", "to_fixed_width"],
1460
+ "description": "Operation to perform"
1461
+ },
1462
+ "column_index": {
1463
+ "type": "integer",
1464
+ "description": "For extract/delete: column index (0-based)",
1465
+ "default": 0
1466
+ },
1467
+ "column_order": {
1468
+ "type": "string",
1469
+ "description": "For reorder: comma-separated indices (e.g., '2,0,1')"
1470
+ },
1471
+ "delimiter": {
1472
+ "type": "string",
1473
+ "description": "Column delimiter",
1474
+ "default": ","
1475
+ }
1476
+ },
1477
+ "required": ["text", "operation"]
1478
+ },
1479
+ handler=self._handle_column_tools
1480
+ ))
1481
+
1482
+ def _handle_column_tools(self, args: Dict[str, Any]) -> str:
1483
+ """Handle column tools execution."""
1484
+ from tools.column_tools import ColumnToolsProcessor
1485
+
1486
+ text = args.get("text", "")
1487
+ operation = args.get("operation", "extract")
1488
+ delimiter = args.get("delimiter", ",")
1489
+ column_index = args.get("column_index", 0)
1490
+ column_order = args.get("column_order", "")
1491
+
1492
+ if operation == "extract":
1493
+ return ColumnToolsProcessor.extract_column(text, column_index, delimiter)
1494
+ elif operation == "reorder":
1495
+ if not column_order:
1496
+ return "Error: column_order is required for reorder operation"
1497
+ return ColumnToolsProcessor.reorder_columns(text, column_order, delimiter)
1498
+ elif operation == "delete":
1499
+ return ColumnToolsProcessor.delete_column(text, column_index, delimiter)
1500
+ elif operation == "transpose":
1501
+ return ColumnToolsProcessor.transpose(text, delimiter)
1502
+ elif operation == "to_fixed_width":
1503
+ return ColumnToolsProcessor.to_fixed_width(text, delimiter)
1504
+ else:
1505
+ return f"Unknown operation: {operation}"
1506
+
1507
+ def _register_generator_tools(self) -> None:
1508
+ """Register the Generator Tools."""
1509
+ self.register(MCPToolAdapter(
1510
+ name="pomera_generators",
1511
+ description="Generate passwords, UUIDs, Lorem Ipsum text, or random emails.",
1512
+ input_schema={
1513
+ "type": "object",
1514
+ "properties": {
1515
+ "generator": {
1516
+ "type": "string",
1517
+ "enum": ["password", "uuid", "lorem_ipsum", "random_email"],
1518
+ "description": "Generator type"
1519
+ },
1520
+ "length": {
1521
+ "type": "integer",
1522
+ "description": "For password: length in characters",
1523
+ "default": 20
1524
+ },
1525
+ "count": {
1526
+ "type": "integer",
1527
+ "description": "Number of items to generate",
1528
+ "default": 1
1529
+ },
1530
+ "uuid_version": {
1531
+ "type": "integer",
1532
+ "enum": [1, 4],
1533
+ "description": "UUID version (1=time-based, 4=random)",
1534
+ "default": 4
1535
+ },
1536
+ "lorem_type": {
1537
+ "type": "string",
1538
+ "enum": ["words", "sentences", "paragraphs"],
1539
+ "description": "For lorem_ipsum: unit type",
1540
+ "default": "paragraphs"
1541
+ }
1542
+ },
1543
+ "required": ["generator"]
1544
+ },
1545
+ handler=self._handle_generators
1546
+ ))
1547
+
1548
+ def _handle_generators(self, args: Dict[str, Any]) -> str:
1549
+ """Handle generator tools execution."""
1550
+ import uuid
1551
+ import string
1552
+ import random
1553
+
1554
+ generator = args.get("generator", "uuid")
1555
+ count = args.get("count", 1)
1556
+
1557
+ if generator == "password":
1558
+ length = args.get("length", 20)
1559
+ results = []
1560
+ chars = string.ascii_letters + string.digits + string.punctuation
1561
+ for _ in range(count):
1562
+ results.append(''.join(random.choices(chars, k=length)))
1563
+ return "\n".join(results)
1564
+
1565
+ elif generator == "uuid":
1566
+ version = args.get("uuid_version", 4)
1567
+ results = []
1568
+ for _ in range(count):
1569
+ if version == 1:
1570
+ results.append(str(uuid.uuid1()))
1571
+ else:
1572
+ results.append(str(uuid.uuid4()))
1573
+ return "\n".join(results)
1574
+
1575
+ elif generator == "lorem_ipsum":
1576
+ lorem_type = args.get("lorem_type", "paragraphs")
1577
+ lorem_words = [
1578
+ "lorem", "ipsum", "dolor", "sit", "amet", "consectetur", "adipiscing",
1579
+ "elit", "sed", "do", "eiusmod", "tempor", "incididunt", "ut", "labore",
1580
+ "et", "dolore", "magna", "aliqua", "enim", "ad", "minim", "veniam",
1581
+ "quis", "nostrud", "exercitation", "ullamco", "laboris", "nisi", "aliquip",
1582
+ "ex", "ea", "commodo", "consequat", "duis", "aute", "irure", "in",
1583
+ "reprehenderit", "voluptate", "velit", "esse", "cillum", "fugiat", "nulla"
1584
+ ]
1585
+
1586
+ if lorem_type == "words":
1587
+ return " ".join(random.choices(lorem_words, k=count))
1588
+ elif lorem_type == "sentences":
1589
+ sentences = []
1590
+ for _ in range(count):
1591
+ words = random.choices(lorem_words, k=random.randint(8, 15))
1592
+ words[0] = words[0].capitalize()
1593
+ sentences.append(" ".join(words) + ".")
1594
+ return " ".join(sentences)
1595
+ else: # paragraphs
1596
+ paragraphs = []
1597
+ for _ in range(count):
1598
+ sentences = []
1599
+ for _ in range(random.randint(3, 6)):
1600
+ words = random.choices(lorem_words, k=random.randint(8, 15))
1601
+ words[0] = words[0].capitalize()
1602
+ sentences.append(" ".join(words) + ".")
1603
+ paragraphs.append(" ".join(sentences))
1604
+ return "\n\n".join(paragraphs)
1605
+
1606
+ elif generator == "random_email":
1607
+ domains = ["example.com", "test.org", "sample.net", "demo.io"]
1608
+ results = []
1609
+ for _ in range(count):
1610
+ name = ''.join(random.choices(string.ascii_lowercase, k=8))
1611
+ domain = random.choice(domains)
1612
+ results.append(f"{name}@{domain}")
1613
+ return "\n".join(results)
1614
+
1615
+ else:
1616
+ return f"Unknown generator: {generator}"
1617
+
1618
+ def _register_slug_generator_tool(self) -> None:
1619
+ """Register the Slug Generator Tool."""
1620
+ self.register(MCPToolAdapter(
1621
+ name="pomera_slug",
1622
+ description="Generate URL-friendly slugs from text with transliteration and customization options.",
1623
+ input_schema={
1624
+ "type": "object",
1625
+ "properties": {
1626
+ "text": {
1627
+ "type": "string",
1628
+ "description": "Text to convert to slug"
1629
+ },
1630
+ "separator": {
1631
+ "type": "string",
1632
+ "description": "Word separator character",
1633
+ "default": "-"
1634
+ },
1635
+ "lowercase": {
1636
+ "type": "boolean",
1637
+ "description": "Convert to lowercase",
1638
+ "default": True
1639
+ },
1640
+ "transliterate": {
1641
+ "type": "boolean",
1642
+ "description": "Convert accented characters to ASCII",
1643
+ "default": True
1644
+ },
1645
+ "max_length": {
1646
+ "type": "integer",
1647
+ "description": "Maximum slug length (0 = unlimited)",
1648
+ "default": 0
1649
+ },
1650
+ "remove_stopwords": {
1651
+ "type": "boolean",
1652
+ "description": "Remove common stop words",
1653
+ "default": False
1654
+ }
1655
+ },
1656
+ "required": ["text"]
1657
+ },
1658
+ handler=self._handle_slug_generator
1659
+ ))
1660
+
1661
+ def _handle_slug_generator(self, args: Dict[str, Any]) -> str:
1662
+ """Handle slug generator tool execution."""
1663
+ from tools.slug_generator import SlugGeneratorProcessor
1664
+
1665
+ text = args.get("text", "")
1666
+ separator = args.get("separator", "-")
1667
+ lowercase = args.get("lowercase", True)
1668
+ transliterate = args.get("transliterate", True)
1669
+ max_length = args.get("max_length", 0)
1670
+ remove_stopwords = args.get("remove_stopwords", False)
1671
+
1672
+ return SlugGeneratorProcessor.generate_slug(
1673
+ text, separator, lowercase, transliterate,
1674
+ max_length, remove_stopwords
1675
+ )
1676
+
1677
+ # =========================================================================
1678
+ # Phase 3 Tools - Notes Widget Integration
1679
+ # =========================================================================
1680
+
1681
+ def _register_notes_tools(self) -> None:
1682
+ """Register Notes widget tools for MCP access."""
1683
+ # Save note tool
1684
+ self.register(MCPToolAdapter(
1685
+ name="pomera_notes_save",
1686
+ description="Save a new note with title, input content, and output content to Pomera's notes database.",
1687
+ input_schema={
1688
+ "type": "object",
1689
+ "properties": {
1690
+ "title": {
1691
+ "type": "string",
1692
+ "description": "Title of the note"
1693
+ },
1694
+ "input_content": {
1695
+ "type": "string",
1696
+ "description": "Input/source content to save",
1697
+ "default": ""
1698
+ },
1699
+ "output_content": {
1700
+ "type": "string",
1701
+ "description": "Output/result content to save",
1702
+ "default": ""
1703
+ }
1704
+ },
1705
+ "required": ["title"]
1706
+ },
1707
+ handler=self._handle_notes_save
1708
+ ))
1709
+
1710
+ # Get note by ID tool
1711
+ self.register(MCPToolAdapter(
1712
+ name="pomera_notes_get",
1713
+ description="Get a note by its ID from Pomera's notes database.",
1714
+ input_schema={
1715
+ "type": "object",
1716
+ "properties": {
1717
+ "note_id": {
1718
+ "type": "integer",
1719
+ "description": "ID of the note to retrieve"
1720
+ }
1721
+ },
1722
+ "required": ["note_id"]
1723
+ },
1724
+ handler=self._handle_notes_get
1725
+ ))
1726
+
1727
+ # List notes tool
1728
+ self.register(MCPToolAdapter(
1729
+ name="pomera_notes_list",
1730
+ description="List all notes or search notes in Pomera's database. Returns ID, title, and timestamps.",
1731
+ input_schema={
1732
+ "type": "object",
1733
+ "properties": {
1734
+ "search_term": {
1735
+ "type": "string",
1736
+ "description": "Optional FTS5 search term to filter notes. Use * for wildcards.",
1737
+ "default": ""
1738
+ },
1739
+ "limit": {
1740
+ "type": "integer",
1741
+ "description": "Maximum number of notes to return",
1742
+ "default": 50
1743
+ }
1744
+ },
1745
+ "required": []
1746
+ },
1747
+ handler=self._handle_notes_list
1748
+ ))
1749
+
1750
+ # Search notes (full content) tool
1751
+ self.register(MCPToolAdapter(
1752
+ name="pomera_notes_search",
1753
+ description="Search notes with full content. Returns matching notes with their complete input/output content.",
1754
+ input_schema={
1755
+ "type": "object",
1756
+ "properties": {
1757
+ "search_term": {
1758
+ "type": "string",
1759
+ "description": "FTS5 search term. Examples: 'python', 'python AND tutorial', 'title:refactor'"
1760
+ },
1761
+ "limit": {
1762
+ "type": "integer",
1763
+ "description": "Maximum number of notes to return",
1764
+ "default": 10
1765
+ }
1766
+ },
1767
+ "required": ["search_term"]
1768
+ },
1769
+ handler=self._handle_notes_search
1770
+ ))
1771
+
1772
+ # Update note tool
1773
+ self.register(MCPToolAdapter(
1774
+ name="pomera_notes_update",
1775
+ description="Update an existing note by ID.",
1776
+ input_schema={
1777
+ "type": "object",
1778
+ "properties": {
1779
+ "note_id": {
1780
+ "type": "integer",
1781
+ "description": "ID of the note to update"
1782
+ },
1783
+ "title": {
1784
+ "type": "string",
1785
+ "description": "New title (optional)"
1786
+ },
1787
+ "input_content": {
1788
+ "type": "string",
1789
+ "description": "New input content (optional)"
1790
+ },
1791
+ "output_content": {
1792
+ "type": "string",
1793
+ "description": "New output content (optional)"
1794
+ }
1795
+ },
1796
+ "required": ["note_id"]
1797
+ },
1798
+ handler=self._handle_notes_update
1799
+ ))
1800
+
1801
+ # Delete note tool
1802
+ self.register(MCPToolAdapter(
1803
+ name="pomera_notes_delete",
1804
+ description="Delete a note by ID from Pomera's database.",
1805
+ input_schema={
1806
+ "type": "object",
1807
+ "properties": {
1808
+ "note_id": {
1809
+ "type": "integer",
1810
+ "description": "ID of the note to delete"
1811
+ }
1812
+ },
1813
+ "required": ["note_id"]
1814
+ },
1815
+ handler=self._handle_notes_delete
1816
+ ))
1817
+
1818
+ def _get_notes_db_path(self) -> str:
1819
+ """Get the path to the notes database."""
1820
+ import os
1821
+ project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
1822
+ return os.path.join(project_root, 'notes.db')
1823
+
1824
+ def _get_notes_connection(self):
1825
+ """Get a connection to the notes database."""
1826
+ import sqlite3
1827
+ db_path = self._get_notes_db_path()
1828
+ conn = sqlite3.connect(db_path, timeout=10.0)
1829
+ conn.row_factory = sqlite3.Row
1830
+ return conn
1831
+
1832
+ def _handle_notes_save(self, args: Dict[str, Any]) -> str:
1833
+ """Handle saving a new note."""
1834
+ from datetime import datetime
1835
+
1836
+ title = args.get("title", "")
1837
+ input_content = args.get("input_content", "")
1838
+ output_content = args.get("output_content", "")
1839
+
1840
+ if not title:
1841
+ return "Error: Title is required"
1842
+
1843
+ try:
1844
+ conn = self._get_notes_connection()
1845
+ now = datetime.now().isoformat()
1846
+ cursor = conn.execute('''
1847
+ INSERT INTO notes (Created, Modified, Title, Input, Output)
1848
+ VALUES (?, ?, ?, ?, ?)
1849
+ ''', (now, now, title, input_content, output_content))
1850
+ note_id = cursor.lastrowid
1851
+ conn.commit()
1852
+ conn.close()
1853
+
1854
+ return f"Note saved successfully with ID: {note_id}"
1855
+ except Exception as e:
1856
+ return f"Error saving note: {str(e)}"
1857
+
1858
+ def _handle_notes_get(self, args: Dict[str, Any]) -> str:
1859
+ """Handle getting a note by ID."""
1860
+ note_id = args.get("note_id")
1861
+
1862
+ if note_id is None:
1863
+ return "Error: note_id is required"
1864
+
1865
+ try:
1866
+ conn = self._get_notes_connection()
1867
+ row = conn.execute('SELECT * FROM notes WHERE id = ?', (note_id,)).fetchone()
1868
+ conn.close()
1869
+
1870
+ if not row:
1871
+ return f"Note with ID {note_id} not found"
1872
+
1873
+ lines = [
1874
+ f"=== Note #{row['id']} ===",
1875
+ f"Title: {row['Title'] or '(no title)'}",
1876
+ f"Created: {row['Created']}",
1877
+ f"Modified: {row['Modified']}",
1878
+ "",
1879
+ "--- INPUT ---",
1880
+ row['Input'] or "(empty)",
1881
+ "",
1882
+ "--- OUTPUT ---",
1883
+ row['Output'] or "(empty)"
1884
+ ]
1885
+ return "\n".join(lines)
1886
+ except Exception as e:
1887
+ return f"Error retrieving note: {str(e)}"
1888
+
1889
+ def _handle_notes_list(self, args: Dict[str, Any]) -> str:
1890
+ """Handle listing notes."""
1891
+ search_term = args.get("search_term", "").strip()
1892
+ limit = args.get("limit", 50)
1893
+
1894
+ try:
1895
+ conn = self._get_notes_connection()
1896
+
1897
+ if search_term:
1898
+ cursor = conn.execute('''
1899
+ SELECT n.id, n.Created, n.Modified, n.Title
1900
+ FROM notes n JOIN notes_fts fts ON n.id = fts.rowid
1901
+ WHERE notes_fts MATCH ?
1902
+ ORDER BY rank
1903
+ LIMIT ?
1904
+ ''', (search_term + '*', limit))
1905
+ else:
1906
+ cursor = conn.execute('''
1907
+ SELECT id, Created, Modified, Title
1908
+ FROM notes
1909
+ ORDER BY Modified DESC
1910
+ LIMIT ?
1911
+ ''', (limit,))
1912
+
1913
+ rows = cursor.fetchall()
1914
+ conn.close()
1915
+
1916
+ if not rows:
1917
+ return "No notes found" + (f" matching '{search_term}'" if search_term else "")
1918
+
1919
+ lines = [f"Found {len(rows)} note(s):", ""]
1920
+ for row in rows:
1921
+ title = row['Title'][:50] + "..." if len(row['Title'] or '') > 50 else (row['Title'] or '(no title)')
1922
+ lines.append(f" [{row['id']:4}] {title}")
1923
+ lines.append(f" Modified: {row['Modified']}")
1924
+
1925
+ return "\n".join(lines)
1926
+ except Exception as e:
1927
+ return f"Error listing notes: {str(e)}"
1928
+
1929
+ def _handle_notes_search(self, args: Dict[str, Any]) -> str:
1930
+ """Handle searching notes with full content."""
1931
+ search_term = args.get("search_term", "").strip()
1932
+ limit = args.get("limit", 10)
1933
+
1934
+ if not search_term:
1935
+ return "Error: search_term is required"
1936
+
1937
+ try:
1938
+ conn = self._get_notes_connection()
1939
+ cursor = conn.execute('''
1940
+ SELECT n.id, n.Created, n.Modified, n.Title, n.Input, n.Output
1941
+ FROM notes n JOIN notes_fts fts ON n.id = fts.rowid
1942
+ WHERE notes_fts MATCH ?
1943
+ ORDER BY rank
1944
+ LIMIT ?
1945
+ ''', (search_term + '*', limit))
1946
+
1947
+ rows = cursor.fetchall()
1948
+ conn.close()
1949
+
1950
+ if not rows:
1951
+ return f"No notes found matching '{search_term}'"
1952
+
1953
+ lines = [f"Found {len(rows)} note(s) matching '{search_term}':", ""]
1954
+
1955
+ for row in rows:
1956
+ lines.append(f"=== Note #{row['id']}: {row['Title'] or '(no title)'} ===")
1957
+ lines.append(f"Modified: {row['Modified']}")
1958
+ lines.append("")
1959
+
1960
+ # Truncate long content
1961
+ input_preview = (row['Input'] or '')[:500]
1962
+ if len(row['Input'] or '') > 500:
1963
+ input_preview += "... (truncated)"
1964
+
1965
+ output_preview = (row['Output'] or '')[:500]
1966
+ if len(row['Output'] or '') > 500:
1967
+ output_preview += "... (truncated)"
1968
+
1969
+ lines.append("INPUT:")
1970
+ lines.append(input_preview or "(empty)")
1971
+ lines.append("")
1972
+ lines.append("OUTPUT:")
1973
+ lines.append(output_preview or "(empty)")
1974
+ lines.append("")
1975
+ lines.append("-" * 50)
1976
+ lines.append("")
1977
+
1978
+ return "\n".join(lines)
1979
+ except Exception as e:
1980
+ return f"Error searching notes: {str(e)}"
1981
+
1982
+ def _handle_notes_update(self, args: Dict[str, Any]) -> str:
1983
+ """Handle updating an existing note."""
1984
+ from datetime import datetime
1985
+
1986
+ note_id = args.get("note_id")
1987
+
1988
+ if note_id is None:
1989
+ return "Error: note_id is required"
1990
+
1991
+ try:
1992
+ conn = self._get_notes_connection()
1993
+
1994
+ # Check if note exists
1995
+ existing = conn.execute('SELECT * FROM notes WHERE id = ?', (note_id,)).fetchone()
1996
+ if not existing:
1997
+ conn.close()
1998
+ return f"Note with ID {note_id} not found"
1999
+
2000
+ # Build update query
2001
+ updates = []
2002
+ values = []
2003
+
2004
+ if "title" in args:
2005
+ updates.append("Title = ?")
2006
+ values.append(args["title"])
2007
+
2008
+ if "input_content" in args:
2009
+ updates.append("Input = ?")
2010
+ values.append(args["input_content"])
2011
+
2012
+ if "output_content" in args:
2013
+ updates.append("Output = ?")
2014
+ values.append(args["output_content"])
2015
+
2016
+ if not updates:
2017
+ conn.close()
2018
+ return "No fields to update"
2019
+
2020
+ # Always update Modified timestamp
2021
+ updates.append("Modified = ?")
2022
+ values.append(datetime.now().isoformat())
2023
+
2024
+ values.append(note_id)
2025
+
2026
+ conn.execute(f'''
2027
+ UPDATE notes SET {', '.join(updates)} WHERE id = ?
2028
+ ''', values)
2029
+ conn.commit()
2030
+ conn.close()
2031
+
2032
+ return f"Note {note_id} updated successfully"
2033
+ except Exception as e:
2034
+ return f"Error updating note: {str(e)}"
2035
+
2036
+ def _handle_notes_delete(self, args: Dict[str, Any]) -> str:
2037
+ """Handle deleting a note."""
2038
+ note_id = args.get("note_id")
2039
+
2040
+ if note_id is None:
2041
+ return "Error: note_id is required"
2042
+
2043
+ try:
2044
+ conn = self._get_notes_connection()
2045
+
2046
+ # Check if note exists
2047
+ existing = conn.execute('SELECT id FROM notes WHERE id = ?', (note_id,)).fetchone()
2048
+ if not existing:
2049
+ conn.close()
2050
+ return f"Note with ID {note_id} not found"
2051
+
2052
+ conn.execute('DELETE FROM notes WHERE id = ?', (note_id,))
2053
+ conn.commit()
2054
+ conn.close()
2055
+
2056
+ return f"Note {note_id} deleted successfully"
2057
+ except Exception as e:
2058
+ return f"Error deleting note: {str(e)}"
2059
+
2060
+ # =========================================================================
2061
+ # Phase 4 Tools - Additional Tools
2062
+ # =========================================================================
2063
+
2064
+ def _register_email_header_analyzer_tool(self) -> None:
2065
+ """Register the Email Header Analyzer Tool."""
2066
+ self.register(MCPToolAdapter(
2067
+ name="pomera_email_header_analyzer",
2068
+ description="Analyze email headers to extract routing information, authentication results (SPF, DKIM, DMARC), "
2069
+ "server hops, delivery timing, and spam scores.",
2070
+ input_schema={
2071
+ "type": "object",
2072
+ "properties": {
2073
+ "text": {
2074
+ "type": "string",
2075
+ "description": "Raw email headers to analyze"
2076
+ },
2077
+ "show_timestamps": {
2078
+ "type": "boolean",
2079
+ "description": "Show timestamp information for each server hop",
2080
+ "default": True
2081
+ },
2082
+ "show_delays": {
2083
+ "type": "boolean",
2084
+ "description": "Show delay calculations between server hops",
2085
+ "default": True
2086
+ },
2087
+ "show_authentication": {
2088
+ "type": "boolean",
2089
+ "description": "Show SPF, DKIM, DMARC authentication results",
2090
+ "default": True
2091
+ },
2092
+ "show_spam_score": {
2093
+ "type": "boolean",
2094
+ "description": "Show spam score if available",
2095
+ "default": True
2096
+ }
2097
+ },
2098
+ "required": ["text"]
2099
+ },
2100
+ handler=self._handle_email_header_analyzer
2101
+ ))
2102
+
2103
+ def _handle_email_header_analyzer(self, args: Dict[str, Any]) -> str:
2104
+ """Handle email header analyzer tool execution."""
2105
+ from tools.email_header_analyzer import EmailHeaderAnalyzerProcessor
2106
+
2107
+ text = args.get("text", "")
2108
+ show_timestamps = args.get("show_timestamps", True)
2109
+ show_delays = args.get("show_delays", True)
2110
+ show_authentication = args.get("show_authentication", True)
2111
+ show_spam_score = args.get("show_spam_score", True)
2112
+
2113
+ return EmailHeaderAnalyzerProcessor.analyze_email_headers(
2114
+ text, show_timestamps, show_delays, show_authentication, show_spam_score
2115
+ )
2116
+
2117
+ def _register_html_tool(self) -> None:
2118
+ """Register the HTML Extraction Tool."""
2119
+ self.register(MCPToolAdapter(
2120
+ name="pomera_html",
2121
+ description="Process HTML content: extract visible text, clean HTML, extract links, images, headings, tables, or forms.",
2122
+ input_schema={
2123
+ "type": "object",
2124
+ "properties": {
2125
+ "text": {
2126
+ "type": "string",
2127
+ "description": "HTML content to process"
2128
+ },
2129
+ "operation": {
2130
+ "type": "string",
2131
+ "enum": ["visible_text", "clean_html", "extract_links", "extract_images",
2132
+ "extract_headings", "extract_tables", "extract_forms"],
2133
+ "description": "Extraction/processing operation to perform",
2134
+ "default": "visible_text"
2135
+ },
2136
+ "preserve_links": {
2137
+ "type": "boolean",
2138
+ "description": "For visible_text: add link references at the end",
2139
+ "default": False
2140
+ },
2141
+ "remove_scripts": {
2142
+ "type": "boolean",
2143
+ "description": "For clean_html: remove script and style tags",
2144
+ "default": True
2145
+ },
2146
+ "remove_comments": {
2147
+ "type": "boolean",
2148
+ "description": "For clean_html: remove HTML comments",
2149
+ "default": True
2150
+ },
2151
+ "remove_style_attrs": {
2152
+ "type": "boolean",
2153
+ "description": "For clean_html: remove style attributes",
2154
+ "default": True
2155
+ },
2156
+ "remove_class_attrs": {
2157
+ "type": "boolean",
2158
+ "description": "For clean_html: remove class attributes",
2159
+ "default": False
2160
+ },
2161
+ "remove_empty_tags": {
2162
+ "type": "boolean",
2163
+ "description": "For clean_html: remove empty tags",
2164
+ "default": True
2165
+ },
2166
+ "include_link_text": {
2167
+ "type": "boolean",
2168
+ "description": "For extract_links: include the link text",
2169
+ "default": True
2170
+ },
2171
+ "absolute_links_only": {
2172
+ "type": "boolean",
2173
+ "description": "For extract_links: only extract http/https links",
2174
+ "default": False
2175
+ },
2176
+ "include_alt_text": {
2177
+ "type": "boolean",
2178
+ "description": "For extract_images: include alt text",
2179
+ "default": True
2180
+ },
2181
+ "include_heading_level": {
2182
+ "type": "boolean",
2183
+ "description": "For extract_headings: include heading level (H1, H2, etc.)",
2184
+ "default": True
2185
+ },
2186
+ "column_separator": {
2187
+ "type": "string",
2188
+ "description": "For extract_tables: column separator character",
2189
+ "default": "\t"
2190
+ }
2191
+ },
2192
+ "required": ["text"]
2193
+ },
2194
+ handler=self._handle_html_tool
2195
+ ))
2196
+
2197
+ def _handle_html_tool(self, args: Dict[str, Any]) -> str:
2198
+ """Handle HTML tool execution."""
2199
+ from tools.html_tool import HTMLExtractionTool
2200
+
2201
+ text = args.get("text", "")
2202
+ operation = args.get("operation", "visible_text")
2203
+
2204
+ # Build settings dict from args
2205
+ settings = {
2206
+ "extraction_method": operation,
2207
+ "preserve_links": args.get("preserve_links", False),
2208
+ "remove_scripts": args.get("remove_scripts", True),
2209
+ "remove_comments": args.get("remove_comments", True),
2210
+ "remove_style_attrs": args.get("remove_style_attrs", True),
2211
+ "remove_class_attrs": args.get("remove_class_attrs", False),
2212
+ "remove_id_attrs": args.get("remove_id_attrs", False),
2213
+ "remove_empty_tags": args.get("remove_empty_tags", True),
2214
+ "include_link_text": args.get("include_link_text", True),
2215
+ "absolute_links_only": args.get("absolute_links_only", False),
2216
+ "include_alt_text": args.get("include_alt_text", True),
2217
+ "include_title": args.get("include_title", False),
2218
+ "include_heading_level": args.get("include_heading_level", True),
2219
+ "column_separator": args.get("column_separator", "\t")
2220
+ }
2221
+
2222
+ tool = HTMLExtractionTool()
2223
+ return tool.process_text(text, settings)
2224
+
2225
+ def _register_list_comparator_tool(self) -> None:
2226
+ """Register the List Comparator Tool."""
2227
+ self.register(MCPToolAdapter(
2228
+ name="pomera_list_compare",
2229
+ description="Compare two lists and find items unique to each list or common to both. "
2230
+ "Useful for finding differences between datasets, configurations, or any line-based content.",
2231
+ input_schema={
2232
+ "type": "object",
2233
+ "properties": {
2234
+ "list_a": {
2235
+ "type": "string",
2236
+ "description": "First list (one item per line)"
2237
+ },
2238
+ "list_b": {
2239
+ "type": "string",
2240
+ "description": "Second list (one item per line)"
2241
+ },
2242
+ "case_insensitive": {
2243
+ "type": "boolean",
2244
+ "description": "Perform case-insensitive comparison",
2245
+ "default": False
2246
+ },
2247
+ "output_format": {
2248
+ "type": "string",
2249
+ "enum": ["all", "only_a", "only_b", "in_both"],
2250
+ "description": "What to return: all results, only items unique to A, only items unique to B, or only common items",
2251
+ "default": "all"
2252
+ }
2253
+ },
2254
+ "required": ["list_a", "list_b"]
2255
+ },
2256
+ handler=self._handle_list_comparator
2257
+ ))
2258
+
2259
+ def _handle_list_comparator(self, args: Dict[str, Any]) -> str:
2260
+ """Handle list comparator tool execution."""
2261
+ list_a_text = args.get("list_a", "")
2262
+ list_b_text = args.get("list_b", "")
2263
+ case_insensitive = args.get("case_insensitive", False)
2264
+ output_format = args.get("output_format", "all")
2265
+
2266
+ # Parse lists
2267
+ list_a = [line.strip() for line in list_a_text.strip().splitlines() if line.strip()]
2268
+ list_b = [line.strip() for line in list_b_text.strip().splitlines() if line.strip()]
2269
+
2270
+ if not list_a and not list_b:
2271
+ return "Both lists are empty."
2272
+
2273
+ # Perform comparison
2274
+ if case_insensitive:
2275
+ set_a_lower = {item.lower() for item in list_a}
2276
+ set_b_lower = {item.lower() for item in list_b}
2277
+
2278
+ map_a = {item.lower(): item for item in reversed(list_a)}
2279
+ map_b = {item.lower(): item for item in reversed(list_b)}
2280
+
2281
+ unique_a_lower = set_a_lower - set_b_lower
2282
+ unique_b_lower = set_b_lower - set_a_lower
2283
+ in_both_lower = set_a_lower & set_b_lower
2284
+
2285
+ unique_a = sorted([map_a[item] for item in unique_a_lower])
2286
+ unique_b = sorted([map_b[item] for item in unique_b_lower])
2287
+ in_both = sorted([map_a.get(item, map_b.get(item)) for item in in_both_lower])
2288
+ else:
2289
+ set_a = set(list_a)
2290
+ set_b = set(list_b)
2291
+ unique_a = sorted(list(set_a - set_b))
2292
+ unique_b = sorted(list(set_b - set_a))
2293
+ in_both = sorted(list(set_a & set_b))
2294
+
2295
+ # Build output based on format
2296
+ result_lines = []
2297
+
2298
+ if output_format == "only_a":
2299
+ result_lines.append(f"=== Items only in List A ({len(unique_a)}) ===")
2300
+ result_lines.extend(unique_a if unique_a else ["(none)"])
2301
+ elif output_format == "only_b":
2302
+ result_lines.append(f"=== Items only in List B ({len(unique_b)}) ===")
2303
+ result_lines.extend(unique_b if unique_b else ["(none)"])
2304
+ elif output_format == "in_both":
2305
+ result_lines.append(f"=== Items in both lists ({len(in_both)}) ===")
2306
+ result_lines.extend(in_both if in_both else ["(none)"])
2307
+ else: # "all"
2308
+ result_lines.append(f"=== Comparison Summary ===")
2309
+ result_lines.append(f"List A: {len(list_a)} items")
2310
+ result_lines.append(f"List B: {len(list_b)} items")
2311
+ result_lines.append(f"Only in A: {len(unique_a)}")
2312
+ result_lines.append(f"Only in B: {len(unique_b)}")
2313
+ result_lines.append(f"In both: {len(in_both)}")
2314
+ result_lines.append("")
2315
+
2316
+ result_lines.append(f"=== Only in List A ({len(unique_a)}) ===")
2317
+ result_lines.extend(unique_a if unique_a else ["(none)"])
2318
+ result_lines.append("")
2319
+
2320
+ result_lines.append(f"=== Only in List B ({len(unique_b)}) ===")
2321
+ result_lines.extend(unique_b if unique_b else ["(none)"])
2322
+ result_lines.append("")
2323
+
2324
+ result_lines.append(f"=== In Both Lists ({len(in_both)}) ===")
2325
+ result_lines.extend(in_both if in_both else ["(none)"])
2326
+
2327
+ return "\n".join(result_lines)
2328
+
2329
+
2330
+ # Singleton instance for convenience
2331
+ _default_registry: Optional[ToolRegistry] = None
2332
+
2333
+
2334
+ def get_registry() -> ToolRegistry:
2335
+ """
2336
+ Get the default tool registry instance.
2337
+
2338
+ Returns:
2339
+ ToolRegistry singleton
2340
+ """
2341
+ global _default_registry
2342
+ if _default_registry is None:
2343
+ _default_registry = ToolRegistry()
2344
+ return _default_registry
2345
+