pomera-ai-commander 1.1.1 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (213) hide show
  1. package/LICENSE +21 -21
  2. package/README.md +105 -680
  3. package/bin/pomera-ai-commander.js +62 -62
  4. package/core/__init__.py +65 -65
  5. package/core/app_context.py +482 -482
  6. package/core/async_text_processor.py +421 -421
  7. package/core/backup_manager.py +655 -655
  8. package/core/backup_recovery_manager.py +1199 -1033
  9. package/core/content_hash_cache.py +508 -508
  10. package/core/context_menu.py +313 -313
  11. package/core/data_directory.py +549 -0
  12. package/core/data_validator.py +1066 -1066
  13. package/core/database_connection_manager.py +744 -744
  14. package/core/database_curl_settings_manager.py +608 -608
  15. package/core/database_promera_ai_settings_manager.py +446 -446
  16. package/core/database_schema.py +411 -411
  17. package/core/database_schema_manager.py +395 -395
  18. package/core/database_settings_manager.py +1507 -1507
  19. package/core/database_settings_manager_interface.py +456 -456
  20. package/core/dialog_manager.py +734 -734
  21. package/core/diff_utils.py +239 -0
  22. package/core/efficient_line_numbers.py +540 -510
  23. package/core/error_handler.py +746 -746
  24. package/core/error_service.py +431 -431
  25. package/core/event_consolidator.py +511 -511
  26. package/core/mcp/__init__.py +43 -43
  27. package/core/mcp/find_replace_diff.py +334 -0
  28. package/core/mcp/protocol.py +288 -288
  29. package/core/mcp/schema.py +251 -251
  30. package/core/mcp/server_stdio.py +299 -299
  31. package/core/mcp/tool_registry.py +2699 -2345
  32. package/core/memento.py +275 -0
  33. package/core/memory_efficient_text_widget.py +711 -711
  34. package/core/migration_manager.py +914 -914
  35. package/core/migration_test_suite.py +1085 -1085
  36. package/core/migration_validator.py +1143 -1143
  37. package/core/optimized_find_replace.py +714 -714
  38. package/core/optimized_pattern_engine.py +424 -424
  39. package/core/optimized_search_highlighter.py +552 -552
  40. package/core/performance_monitor.py +674 -674
  41. package/core/persistence_manager.py +712 -712
  42. package/core/progressive_stats_calculator.py +632 -632
  43. package/core/regex_pattern_cache.py +529 -529
  44. package/core/regex_pattern_library.py +350 -350
  45. package/core/search_operation_manager.py +434 -434
  46. package/core/settings_defaults_registry.py +1087 -1087
  47. package/core/settings_integrity_validator.py +1111 -1111
  48. package/core/settings_serializer.py +557 -557
  49. package/core/settings_validator.py +1823 -1823
  50. package/core/smart_stats_calculator.py +709 -709
  51. package/core/statistics_update_manager.py +619 -619
  52. package/core/stats_config_manager.py +858 -858
  53. package/core/streaming_text_handler.py +723 -723
  54. package/core/task_scheduler.py +596 -596
  55. package/core/update_pattern_library.py +168 -168
  56. package/core/visibility_monitor.py +596 -596
  57. package/core/widget_cache.py +498 -498
  58. package/mcp.json +51 -61
  59. package/migrate_data.py +127 -0
  60. package/package.json +64 -57
  61. package/pomera.py +7883 -7482
  62. package/pomera_mcp_server.py +183 -144
  63. package/requirements.txt +33 -0
  64. package/scripts/Dockerfile.alpine +43 -0
  65. package/scripts/Dockerfile.gui-test +54 -0
  66. package/scripts/Dockerfile.linux +43 -0
  67. package/scripts/Dockerfile.test-linux +80 -0
  68. package/scripts/Dockerfile.ubuntu +39 -0
  69. package/scripts/README.md +53 -0
  70. package/scripts/build-all.bat +113 -0
  71. package/scripts/build-docker.bat +53 -0
  72. package/scripts/build-docker.sh +55 -0
  73. package/scripts/build-optimized.bat +101 -0
  74. package/scripts/build.sh +78 -0
  75. package/scripts/docker-compose.test.yml +27 -0
  76. package/scripts/docker-compose.yml +32 -0
  77. package/scripts/postinstall.js +62 -0
  78. package/scripts/requirements-minimal.txt +33 -0
  79. package/scripts/test-linux-simple.bat +28 -0
  80. package/scripts/validate-release-workflow.py +450 -0
  81. package/tools/__init__.py +4 -4
  82. package/tools/ai_tools.py +2891 -2891
  83. package/tools/ascii_art_generator.py +352 -352
  84. package/tools/base64_tools.py +183 -183
  85. package/tools/base_tool.py +511 -511
  86. package/tools/case_tool.py +308 -308
  87. package/tools/column_tools.py +395 -395
  88. package/tools/cron_tool.py +884 -884
  89. package/tools/curl_history.py +600 -600
  90. package/tools/curl_processor.py +1207 -1207
  91. package/tools/curl_settings.py +502 -502
  92. package/tools/curl_tool.py +5467 -5467
  93. package/tools/diff_viewer.py +1817 -1072
  94. package/tools/email_extraction_tool.py +248 -248
  95. package/tools/email_header_analyzer.py +425 -425
  96. package/tools/extraction_tools.py +250 -250
  97. package/tools/find_replace.py +2289 -1750
  98. package/tools/folder_file_reporter.py +1463 -1463
  99. package/tools/folder_file_reporter_adapter.py +480 -480
  100. package/tools/generator_tools.py +1216 -1216
  101. package/tools/hash_generator.py +255 -255
  102. package/tools/html_tool.py +656 -656
  103. package/tools/jsonxml_tool.py +729 -729
  104. package/tools/line_tools.py +419 -419
  105. package/tools/markdown_tools.py +561 -561
  106. package/tools/mcp_widget.py +1417 -1417
  107. package/tools/notes_widget.py +978 -973
  108. package/tools/number_base_converter.py +372 -372
  109. package/tools/regex_extractor.py +571 -571
  110. package/tools/slug_generator.py +310 -310
  111. package/tools/sorter_tools.py +458 -458
  112. package/tools/string_escape_tool.py +392 -392
  113. package/tools/text_statistics_tool.py +365 -365
  114. package/tools/text_wrapper.py +430 -430
  115. package/tools/timestamp_converter.py +421 -421
  116. package/tools/tool_loader.py +710 -710
  117. package/tools/translator_tools.py +522 -522
  118. package/tools/url_link_extractor.py +261 -261
  119. package/tools/url_parser.py +204 -204
  120. package/tools/whitespace_tools.py +355 -355
  121. package/tools/word_frequency_counter.py +146 -146
  122. package/core/__pycache__/__init__.cpython-313.pyc +0 -0
  123. package/core/__pycache__/app_context.cpython-313.pyc +0 -0
  124. package/core/__pycache__/async_text_processor.cpython-313.pyc +0 -0
  125. package/core/__pycache__/backup_manager.cpython-313.pyc +0 -0
  126. package/core/__pycache__/backup_recovery_manager.cpython-313.pyc +0 -0
  127. package/core/__pycache__/content_hash_cache.cpython-313.pyc +0 -0
  128. package/core/__pycache__/context_menu.cpython-313.pyc +0 -0
  129. package/core/__pycache__/data_validator.cpython-313.pyc +0 -0
  130. package/core/__pycache__/database_connection_manager.cpython-313.pyc +0 -0
  131. package/core/__pycache__/database_curl_settings_manager.cpython-313.pyc +0 -0
  132. package/core/__pycache__/database_promera_ai_settings_manager.cpython-313.pyc +0 -0
  133. package/core/__pycache__/database_schema.cpython-313.pyc +0 -0
  134. package/core/__pycache__/database_schema_manager.cpython-313.pyc +0 -0
  135. package/core/__pycache__/database_settings_manager.cpython-313.pyc +0 -0
  136. package/core/__pycache__/database_settings_manager_interface.cpython-313.pyc +0 -0
  137. package/core/__pycache__/dialog_manager.cpython-313.pyc +0 -0
  138. package/core/__pycache__/efficient_line_numbers.cpython-313.pyc +0 -0
  139. package/core/__pycache__/error_handler.cpython-313.pyc +0 -0
  140. package/core/__pycache__/error_service.cpython-313.pyc +0 -0
  141. package/core/__pycache__/event_consolidator.cpython-313.pyc +0 -0
  142. package/core/__pycache__/memory_efficient_text_widget.cpython-313.pyc +0 -0
  143. package/core/__pycache__/migration_manager.cpython-313.pyc +0 -0
  144. package/core/__pycache__/migration_test_suite.cpython-313.pyc +0 -0
  145. package/core/__pycache__/migration_validator.cpython-313.pyc +0 -0
  146. package/core/__pycache__/optimized_find_replace.cpython-313.pyc +0 -0
  147. package/core/__pycache__/optimized_pattern_engine.cpython-313.pyc +0 -0
  148. package/core/__pycache__/optimized_search_highlighter.cpython-313.pyc +0 -0
  149. package/core/__pycache__/performance_monitor.cpython-313.pyc +0 -0
  150. package/core/__pycache__/persistence_manager.cpython-313.pyc +0 -0
  151. package/core/__pycache__/progressive_stats_calculator.cpython-313.pyc +0 -0
  152. package/core/__pycache__/regex_pattern_cache.cpython-313.pyc +0 -0
  153. package/core/__pycache__/regex_pattern_library.cpython-313.pyc +0 -0
  154. package/core/__pycache__/search_operation_manager.cpython-313.pyc +0 -0
  155. package/core/__pycache__/settings_defaults_registry.cpython-313.pyc +0 -0
  156. package/core/__pycache__/settings_integrity_validator.cpython-313.pyc +0 -0
  157. package/core/__pycache__/settings_serializer.cpython-313.pyc +0 -0
  158. package/core/__pycache__/settings_validator.cpython-313.pyc +0 -0
  159. package/core/__pycache__/smart_stats_calculator.cpython-313.pyc +0 -0
  160. package/core/__pycache__/statistics_update_manager.cpython-313.pyc +0 -0
  161. package/core/__pycache__/stats_config_manager.cpython-313.pyc +0 -0
  162. package/core/__pycache__/streaming_text_handler.cpython-313.pyc +0 -0
  163. package/core/__pycache__/task_scheduler.cpython-313.pyc +0 -0
  164. package/core/__pycache__/visibility_monitor.cpython-313.pyc +0 -0
  165. package/core/__pycache__/widget_cache.cpython-313.pyc +0 -0
  166. package/core/mcp/__pycache__/__init__.cpython-313.pyc +0 -0
  167. package/core/mcp/__pycache__/protocol.cpython-313.pyc +0 -0
  168. package/core/mcp/__pycache__/schema.cpython-313.pyc +0 -0
  169. package/core/mcp/__pycache__/server_stdio.cpython-313.pyc +0 -0
  170. package/core/mcp/__pycache__/tool_registry.cpython-313.pyc +0 -0
  171. package/tools/__pycache__/__init__.cpython-313.pyc +0 -0
  172. package/tools/__pycache__/ai_tools.cpython-313.pyc +0 -0
  173. package/tools/__pycache__/ascii_art_generator.cpython-313.pyc +0 -0
  174. package/tools/__pycache__/base64_tools.cpython-313.pyc +0 -0
  175. package/tools/__pycache__/base_tool.cpython-313.pyc +0 -0
  176. package/tools/__pycache__/case_tool.cpython-313.pyc +0 -0
  177. package/tools/__pycache__/column_tools.cpython-313.pyc +0 -0
  178. package/tools/__pycache__/cron_tool.cpython-313.pyc +0 -0
  179. package/tools/__pycache__/curl_history.cpython-313.pyc +0 -0
  180. package/tools/__pycache__/curl_processor.cpython-313.pyc +0 -0
  181. package/tools/__pycache__/curl_settings.cpython-313.pyc +0 -0
  182. package/tools/__pycache__/curl_tool.cpython-313.pyc +0 -0
  183. package/tools/__pycache__/diff_viewer.cpython-313.pyc +0 -0
  184. package/tools/__pycache__/email_extraction_tool.cpython-313.pyc +0 -0
  185. package/tools/__pycache__/email_header_analyzer.cpython-313.pyc +0 -0
  186. package/tools/__pycache__/extraction_tools.cpython-313.pyc +0 -0
  187. package/tools/__pycache__/find_replace.cpython-313.pyc +0 -0
  188. package/tools/__pycache__/folder_file_reporter.cpython-313.pyc +0 -0
  189. package/tools/__pycache__/folder_file_reporter_adapter.cpython-313.pyc +0 -0
  190. package/tools/__pycache__/generator_tools.cpython-313.pyc +0 -0
  191. package/tools/__pycache__/hash_generator.cpython-313.pyc +0 -0
  192. package/tools/__pycache__/html_tool.cpython-313.pyc +0 -0
  193. package/tools/__pycache__/huggingface_helper.cpython-313.pyc +0 -0
  194. package/tools/__pycache__/jsonxml_tool.cpython-313.pyc +0 -0
  195. package/tools/__pycache__/line_tools.cpython-313.pyc +0 -0
  196. package/tools/__pycache__/list_comparator.cpython-313.pyc +0 -0
  197. package/tools/__pycache__/markdown_tools.cpython-313.pyc +0 -0
  198. package/tools/__pycache__/mcp_widget.cpython-313.pyc +0 -0
  199. package/tools/__pycache__/notes_widget.cpython-313.pyc +0 -0
  200. package/tools/__pycache__/number_base_converter.cpython-313.pyc +0 -0
  201. package/tools/__pycache__/regex_extractor.cpython-313.pyc +0 -0
  202. package/tools/__pycache__/slug_generator.cpython-313.pyc +0 -0
  203. package/tools/__pycache__/sorter_tools.cpython-313.pyc +0 -0
  204. package/tools/__pycache__/string_escape_tool.cpython-313.pyc +0 -0
  205. package/tools/__pycache__/text_statistics_tool.cpython-313.pyc +0 -0
  206. package/tools/__pycache__/text_wrapper.cpython-313.pyc +0 -0
  207. package/tools/__pycache__/timestamp_converter.cpython-313.pyc +0 -0
  208. package/tools/__pycache__/tool_loader.cpython-313.pyc +0 -0
  209. package/tools/__pycache__/translator_tools.cpython-313.pyc +0 -0
  210. package/tools/__pycache__/url_link_extractor.cpython-313.pyc +0 -0
  211. package/tools/__pycache__/url_parser.cpython-313.pyc +0 -0
  212. package/tools/__pycache__/whitespace_tools.cpython-313.pyc +0 -0
  213. package/tools/__pycache__/word_frequency_counter.cpython-313.pyc +0 -0
@@ -1,1144 +1,1144 @@
1
- """
2
- Migration Validation System for Settings Database Migration
3
-
4
- This module provides comprehensive validation and testing capabilities for the
5
- migration system, including data integrity checks, edge case testing, and
6
- rollback procedures for all tool configurations.
7
-
8
- Designed to validate all 15+ tool configurations and complex data structures
9
- identified in the production codebase analysis.
10
- """
11
-
12
- import json
13
- import sqlite3
14
- import os
15
- import shutil
16
- import logging
17
- import tempfile
18
- import hashlib
19
- from typing import Dict, List, Tuple, Any, Optional, Union, Set
20
- from datetime import datetime
21
- from pathlib import Path
22
- from dataclasses import dataclass
23
-
24
- from .migration_manager import MigrationManager
25
- from .database_connection_manager import DatabaseConnectionManager
26
- from .database_schema_manager import DatabaseSchemaManager
27
-
28
-
29
- @dataclass
30
- class ValidationResult:
31
- """Result of a validation operation."""
32
- success: bool
33
- errors: List[str]
34
- warnings: List[str]
35
- details: Dict[str, Any]
36
-
37
- def add_error(self, error: str) -> None:
38
- """Add an error to the result."""
39
- self.errors.append(error)
40
- self.success = False
41
-
42
- def add_warning(self, warning: str) -> None:
43
- """Add a warning to the result."""
44
- self.warnings.append(warning)
45
-
46
- def merge(self, other: 'ValidationResult') -> None:
47
- """Merge another validation result into this one."""
48
- self.errors.extend(other.errors)
49
- self.warnings.extend(other.warnings)
50
- self.details.update(other.details)
51
- if not other.success:
52
- self.success = False
53
-
54
-
55
- class MigrationValidator:
56
- """
57
- Comprehensive migration validation and testing system.
58
-
59
- Features:
60
- - Data integrity validation for all tool configurations
61
- - Edge case testing (corrupted JSON, missing fields, invalid data)
62
- - Automatic backup creation and rollback procedures
63
- - Performance testing for large settings files
64
- - Comprehensive test suite for all 15+ tool types
65
- """
66
-
67
- def __init__(self, migration_manager: MigrationManager):
68
- """
69
- Initialize the migration validator.
70
-
71
- Args:
72
- migration_manager: MigrationManager instance to validate
73
- """
74
- self.migration_manager = migration_manager
75
- self.logger = logging.getLogger(__name__)
76
-
77
- # Test configuration
78
- self._test_data_dir = None
79
- self._backup_dir = None
80
- self._temp_files = []
81
-
82
- # Validation settings
83
- self._strict_validation = True
84
- self._validate_types = True
85
- self._validate_structure = True
86
- self._validate_content = True
87
-
88
- # Known tool configurations from production analysis
89
- self._known_tools = {
90
- 'AI Tools', 'Base64 Encoder/Decoder', 'Case Tool', 'Cron Tool', 'Diff Viewer',
91
- 'Email Extraction Tool', 'Email Header Analyzer', 'Find & Replace Text',
92
- 'Folder File Reporter', 'Generator Tools', 'HTML Extraction Tool',
93
- 'JSON/XML Tool', 'Sorter Tools', 'Translator Tools', 'URL Parser',
94
- 'URL and Link Extractor', 'Word Frequency Counter',
95
- # AI Provider configurations (stored as separate tool settings)
96
- 'Google AI', 'Anthropic AI', 'OpenAI', 'AWS Bedrock', 'Cohere AI',
97
- 'HuggingFace AI', 'Groq AI', 'OpenRouterAI', 'LM Studio'
98
- }
99
-
100
- # Critical settings that must be preserved
101
- self._critical_settings = {
102
- 'export_path', 'debug_level', 'selected_tool',
103
- 'active_input_tab', 'active_output_tab',
104
- 'input_tabs', 'output_tabs', 'tool_settings'
105
- }
106
-
107
- def validate_complete_migration(self, json_filepath: str) -> ValidationResult:
108
- """
109
- Perform comprehensive validation of a complete migration cycle.
110
-
111
- Args:
112
- json_filepath: Path to source JSON settings file
113
-
114
- Returns:
115
- ValidationResult with comprehensive validation details
116
- """
117
- result = ValidationResult(True, [], [], {})
118
-
119
- try:
120
- self.logger.info(f"Starting comprehensive migration validation: {json_filepath}")
121
-
122
- # Step 1: Validate source JSON file
123
- source_validation = self._validate_source_json(json_filepath)
124
- result.merge(source_validation)
125
-
126
- if not source_validation.success and self._strict_validation:
127
- return result
128
-
129
- # Step 2: Create backup and test environment
130
- backup_result = self._setup_test_environment(json_filepath)
131
- result.merge(backup_result)
132
-
133
- # Step 3: Perform migration to database
134
- migration_result = self._test_json_to_database_migration(json_filepath)
135
- result.merge(migration_result)
136
-
137
- # Step 4: Validate database content
138
- if migration_result.success:
139
- db_validation = self._validate_database_content()
140
- result.merge(db_validation)
141
-
142
- # Step 5: Test reverse migration
143
- if migration_result.success:
144
- reverse_result = self._test_database_to_json_migration()
145
- result.merge(reverse_result)
146
-
147
- # Step 6: Validate round-trip accuracy
148
- if reverse_result.success:
149
- roundtrip_result = self._validate_roundtrip_accuracy(json_filepath)
150
- result.merge(roundtrip_result)
151
-
152
- # Step 7: Test edge cases
153
- edge_case_result = self._test_edge_cases()
154
- result.merge(edge_case_result)
155
-
156
- # Step 8: Performance testing
157
- performance_result = self._test_performance()
158
- result.merge(performance_result)
159
-
160
- result.details['validation_summary'] = {
161
- 'total_tests': 8,
162
- 'passed_tests': sum(1 for r in [source_validation, backup_result, migration_result,
163
- db_validation, reverse_result, roundtrip_result,
164
- edge_case_result, performance_result] if r.success),
165
- 'total_errors': len(result.errors),
166
- 'total_warnings': len(result.warnings)
167
- }
168
-
169
- self.logger.info("Comprehensive migration validation completed")
170
- return result
171
-
172
- except Exception as e:
173
- result.add_error(f"Validation failed with exception: {e}")
174
- self.logger.error(f"Migration validation error: {e}")
175
- return result
176
-
177
- finally:
178
- self._cleanup_test_environment()
179
-
180
- def validate_tool_configurations(self) -> ValidationResult:
181
- """
182
- Validate all known tool configurations for migration compatibility.
183
-
184
- Returns:
185
- ValidationResult with tool-specific validation details
186
- """
187
- result = ValidationResult(True, [], [], {})
188
-
189
- try:
190
- self.logger.info("Validating all tool configurations")
191
-
192
- # Create test data for each known tool
193
- test_configs = self._generate_test_tool_configurations()
194
-
195
- for tool_name, tool_config in test_configs.items():
196
- tool_result = self._validate_single_tool_configuration(tool_name, tool_config)
197
- result.merge(tool_result)
198
-
199
- result.details[f'tool_{tool_name.replace(" ", "_")}'] = {
200
- 'success': tool_result.success,
201
- 'errors': tool_result.errors,
202
- 'warnings': tool_result.warnings
203
- }
204
-
205
- result.details['tool_validation_summary'] = {
206
- 'total_tools': len(test_configs),
207
- 'successful_tools': sum(1 for tool in result.details.values()
208
- if isinstance(tool, dict) and tool.get('success', False)),
209
- 'failed_tools': sum(1 for tool in result.details.values()
210
- if isinstance(tool, dict) and not tool.get('success', True))
211
- }
212
-
213
- self.logger.info("Tool configuration validation completed")
214
- return result
215
-
216
- except Exception as e:
217
- result.add_error(f"Tool validation failed: {e}")
218
- return result
219
-
220
- def test_edge_cases(self) -> ValidationResult:
221
- """
222
- Test migration with various edge cases and malformed data.
223
-
224
- Returns:
225
- ValidationResult with edge case test results
226
- """
227
- result = ValidationResult(True, [], [], {})
228
-
229
- try:
230
- self.logger.info("Testing migration edge cases")
231
-
232
- # Test cases for edge scenarios
233
- edge_cases = [
234
- ('empty_json', {}),
235
- ('missing_tool_settings', {'export_path': 'test'}),
236
- ('corrupted_nested_structure', {'tool_settings': {'invalid': None}}),
237
- ('large_data_structure', self._generate_large_test_data()),
238
- ('unicode_content', self._generate_unicode_test_data()),
239
- ('encrypted_keys', self._generate_encrypted_test_data()),
240
- ('invalid_types', self._generate_invalid_type_data()),
241
- ('missing_tabs', {'tool_settings': {}}),
242
- ('extra_fields', self._generate_extra_fields_data())
243
- ]
244
-
245
- for case_name, test_data in edge_cases:
246
- case_result = self._test_single_edge_case(case_name, test_data)
247
- result.merge(case_result)
248
-
249
- result.details[f'edge_case_{case_name}'] = {
250
- 'success': case_result.success,
251
- 'errors': case_result.errors,
252
- 'warnings': case_result.warnings
253
- }
254
-
255
- result.details['edge_case_summary'] = {
256
- 'total_cases': len(edge_cases),
257
- 'passed_cases': sum(1 for case in result.details.values()
258
- if isinstance(case, dict) and case.get('success', False)),
259
- 'failed_cases': sum(1 for case in result.details.values()
260
- if isinstance(case, dict) and not case.get('success', True))
261
- }
262
-
263
- self.logger.info("Edge case testing completed")
264
- return result
265
-
266
- except Exception as e:
267
- result.add_error(f"Edge case testing failed: {e}")
268
- return result
269
-
270
- def create_automatic_backup(self, json_filepath: str) -> Tuple[bool, Optional[str]]:
271
- """
272
- Create automatic backup before migration with validation.
273
-
274
- Args:
275
- json_filepath: Path to JSON file to backup
276
-
277
- Returns:
278
- Tuple of (success, backup_path)
279
- """
280
- try:
281
- if not os.path.exists(json_filepath):
282
- self.logger.error(f"Source file not found: {json_filepath}")
283
- return False, None
284
-
285
- # Generate backup path with timestamp
286
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
287
- backup_path = f"{json_filepath}.backup_{timestamp}"
288
-
289
- # Create backup
290
- shutil.copy2(json_filepath, backup_path)
291
-
292
- # Validate backup integrity
293
- if self._validate_backup_integrity(json_filepath, backup_path):
294
- self.logger.info(f"Automatic backup created: {backup_path}")
295
- return True, backup_path
296
- else:
297
- self.logger.error("Backup integrity validation failed")
298
- return False, None
299
-
300
- except Exception as e:
301
- self.logger.error(f"Automatic backup failed: {e}")
302
- return False, None
303
-
304
- def test_rollback_procedures(self, backup_filepath: str) -> ValidationResult:
305
- """
306
- Test rollback procedures for failed migrations.
307
-
308
- Args:
309
- backup_filepath: Path to backup file for rollback testing
310
-
311
- Returns:
312
- ValidationResult with rollback test results
313
- """
314
- result = ValidationResult(True, [], [], {})
315
-
316
- try:
317
- self.logger.info(f"Testing rollback procedures: {backup_filepath}")
318
-
319
- if not os.path.exists(backup_filepath):
320
- result.add_error(f"Backup file not found: {backup_filepath}")
321
- return result
322
-
323
- # Test rollback functionality
324
- rollback_success = self.migration_manager.rollback_migration(backup_filepath)
325
-
326
- if rollback_success:
327
- result.details['rollback_test'] = {
328
- 'success': True,
329
- 'backup_file': backup_filepath,
330
- 'rollback_time': datetime.now().isoformat()
331
- }
332
- else:
333
- result.add_error("Rollback procedure failed")
334
- result.details['rollback_test'] = {
335
- 'success': False,
336
- 'backup_file': backup_filepath,
337
- 'error': 'Rollback operation failed'
338
- }
339
-
340
- # Validate restored file integrity
341
- if rollback_success:
342
- integrity_result = self._validate_rollback_integrity(backup_filepath)
343
- result.merge(integrity_result)
344
-
345
- self.logger.info("Rollback procedure testing completed")
346
- return result
347
-
348
- except Exception as e:
349
- result.add_error(f"Rollback testing failed: {e}")
350
- return result
351
-
352
- # Private implementation methods
353
-
354
- def _validate_source_json(self, json_filepath: str) -> ValidationResult:
355
- """Validate source JSON file structure and content."""
356
- result = ValidationResult(True, [], [], {})
357
-
358
- try:
359
- if not os.path.exists(json_filepath):
360
- result.add_error(f"JSON file not found: {json_filepath}")
361
- return result
362
-
363
- # Load and parse JSON
364
- with open(json_filepath, 'r', encoding='utf-8') as f:
365
- data = json.load(f)
366
-
367
- # Validate critical settings exist
368
- for setting in self._critical_settings:
369
- if setting not in data:
370
- if setting in ['input_tabs', 'output_tabs', 'tool_settings']:
371
- result.add_warning(f"Missing critical setting: {setting}")
372
- else:
373
- result.add_error(f"Missing critical setting: {setting}")
374
-
375
- # Validate tool settings structure
376
- if 'tool_settings' in data:
377
- tool_validation = self._validate_tool_settings_structure(data['tool_settings'])
378
- result.merge(tool_validation)
379
-
380
- # Validate tab arrays
381
- if 'input_tabs' in data:
382
- tab_validation = self._validate_tab_arrays(data['input_tabs'], 'input_tabs')
383
- result.merge(tab_validation)
384
-
385
- if 'output_tabs' in data:
386
- tab_validation = self._validate_tab_arrays(data['output_tabs'], 'output_tabs')
387
- result.merge(tab_validation)
388
-
389
- result.details['source_validation'] = {
390
- 'file_size': os.path.getsize(json_filepath),
391
- 'tool_count': len(data.get('tool_settings', {})),
392
- 'has_critical_settings': all(s in data for s in self._critical_settings)
393
- }
394
-
395
- return result
396
-
397
- except json.JSONDecodeError as e:
398
- result.add_error(f"Invalid JSON format: {e}")
399
- return result
400
- except Exception as e:
401
- result.add_error(f"Source validation failed: {e}")
402
- return result
403
-
404
- def _setup_test_environment(self, json_filepath: str) -> ValidationResult:
405
- """Setup test environment with temporary directories and backups."""
406
- result = ValidationResult(True, [], [], {})
407
-
408
- try:
409
- # Create temporary test directory
410
- self._test_data_dir = tempfile.mkdtemp(prefix="migration_test_")
411
- self._backup_dir = os.path.join(self._test_data_dir, "backups")
412
- os.makedirs(self._backup_dir, exist_ok=True)
413
-
414
- # Create backup
415
- backup_success, backup_path = self.create_automatic_backup(json_filepath)
416
- if backup_success:
417
- result.details['backup_created'] = backup_path
418
- else:
419
- result.add_error("Failed to create test backup")
420
-
421
- result.details['test_environment'] = {
422
- 'test_dir': self._test_data_dir,
423
- 'backup_dir': self._backup_dir,
424
- 'setup_time': datetime.now().isoformat()
425
- }
426
-
427
- return result
428
-
429
- except Exception as e:
430
- result.add_error(f"Test environment setup failed: {e}")
431
- return result
432
-
433
- def _test_json_to_database_migration(self, json_filepath: str) -> ValidationResult:
434
- """Test JSON to database migration process."""
435
- result = ValidationResult(True, [], [], {})
436
-
437
- try:
438
- # Perform migration
439
- migration_success = self.migration_manager.migrate_from_json(json_filepath, validate=True)
440
-
441
- if migration_success:
442
- result.details['json_to_db_migration'] = {
443
- 'success': True,
444
- 'migration_time': datetime.now().isoformat()
445
- }
446
- else:
447
- result.add_error("JSON to database migration failed")
448
- result.details['json_to_db_migration'] = {
449
- 'success': False,
450
- 'error': 'Migration operation failed'
451
- }
452
-
453
- return result
454
-
455
- except Exception as e:
456
- result.add_error(f"JSON to database migration test failed: {e}")
457
- return result
458
-
459
- def _validate_database_content(self) -> ValidationResult:
460
- """Validate database content after migration."""
461
- result = ValidationResult(True, [], [], {})
462
-
463
- try:
464
- conn = self.migration_manager.connection_manager.get_connection()
465
-
466
- # Check all tables exist and have data
467
- tables = ['core_settings', 'tool_settings', 'tab_content',
468
- 'performance_settings', 'font_settings', 'dialog_settings']
469
-
470
- for table in tables:
471
- cursor = conn.execute(f"SELECT COUNT(*) FROM {table}")
472
- count = cursor.fetchone()[0]
473
-
474
- result.details[f'{table}_count'] = count
475
-
476
- if table in ['core_settings', 'tool_settings'] and count == 0:
477
- result.add_warning(f"Table {table} is empty")
478
-
479
- # Validate data types and structure
480
- type_validation = self._validate_database_types(conn)
481
- result.merge(type_validation)
482
-
483
- return result
484
-
485
- except Exception as e:
486
- result.add_error(f"Database content validation failed: {e}")
487
- return result
488
-
489
- def _test_database_to_json_migration(self) -> ValidationResult:
490
- """Test database to JSON migration process."""
491
- result = ValidationResult(True, [], [], {})
492
-
493
- try:
494
- # Create temporary JSON file for reverse migration
495
- temp_json = os.path.join(self._test_data_dir, "reverse_migration.json")
496
-
497
- # Perform reverse migration
498
- migration_success = self.migration_manager.migrate_to_json(temp_json, validate=True)
499
-
500
- if migration_success:
501
- result.details['db_to_json_migration'] = {
502
- 'success': True,
503
- 'output_file': temp_json,
504
- 'migration_time': datetime.now().isoformat()
505
- }
506
- self._temp_files.append(temp_json)
507
- else:
508
- result.add_error("Database to JSON migration failed")
509
- result.details['db_to_json_migration'] = {
510
- 'success': False,
511
- 'error': 'Reverse migration operation failed'
512
- }
513
-
514
- return result
515
-
516
- except Exception as e:
517
- result.add_error(f"Database to JSON migration test failed: {e}")
518
- return result
519
-
520
- def _validate_roundtrip_accuracy(self, original_json: str) -> ValidationResult:
521
- """Validate round-trip migration accuracy."""
522
- result = ValidationResult(True, [], [], {})
523
-
524
- try:
525
- # Load original data
526
- with open(original_json, 'r', encoding='utf-8') as f:
527
- original_data = json.load(f)
528
-
529
- # Load migrated data
530
- migrated_json = os.path.join(self._test_data_dir, "reverse_migration.json")
531
- if not os.path.exists(migrated_json):
532
- result.add_error("Migrated JSON file not found for comparison")
533
- return result
534
-
535
- with open(migrated_json, 'r', encoding='utf-8') as f:
536
- migrated_data = json.load(f)
537
-
538
- # Deep comparison
539
- comparison_result = self._deep_compare_data(original_data, migrated_data)
540
- result.merge(comparison_result)
541
-
542
- # Calculate data integrity metrics
543
- integrity_metrics = self._calculate_integrity_metrics(original_data, migrated_data)
544
- result.details['integrity_metrics'] = integrity_metrics
545
-
546
- return result
547
-
548
- except Exception as e:
549
- result.add_error(f"Round-trip accuracy validation failed: {e}")
550
- return result
551
-
552
- def _test_edge_cases(self) -> ValidationResult:
553
- """Test various edge cases and error conditions."""
554
- result = ValidationResult(True, [], [], {})
555
-
556
- # This is a placeholder - the actual implementation would be in test_edge_cases()
557
- # which is already implemented above
558
- edge_case_result = self.test_edge_cases()
559
- result.merge(edge_case_result)
560
-
561
- return result
562
-
563
- def _test_performance(self) -> ValidationResult:
564
- """Test migration performance with various data sizes."""
565
- result = ValidationResult(True, [], [], {})
566
-
567
- try:
568
- # Test with different data sizes
569
- test_sizes = [
570
- ('small', 10), # 10 tools
571
- ('medium', 50), # 50 tools
572
- ('large', 100), # 100 tools
573
- ('xlarge', 500) # 500 tools
574
- ]
575
-
576
- for size_name, tool_count in test_sizes:
577
- perf_result = self._test_migration_performance(size_name, tool_count)
578
- result.merge(perf_result)
579
- result.details[f'performance_{size_name}'] = perf_result.details
580
-
581
- return result
582
-
583
- except Exception as e:
584
- result.add_error(f"Performance testing failed: {e}")
585
- return result
586
-
587
- def _cleanup_test_environment(self) -> None:
588
- """Clean up temporary test files and directories."""
589
- try:
590
- # Remove temporary files
591
- for temp_file in self._temp_files:
592
- if os.path.exists(temp_file):
593
- os.remove(temp_file)
594
-
595
- # Remove test directory
596
- if self._test_data_dir and os.path.exists(self._test_data_dir):
597
- shutil.rmtree(self._test_data_dir)
598
-
599
- self.logger.debug("Test environment cleaned up")
600
-
601
- except Exception as e:
602
- self.logger.warning(f"Test cleanup failed: {e}")
603
-
604
- def _validate_tool_settings_structure(self, tool_settings: Dict[str, Any]) -> ValidationResult:
605
- """Validate tool settings structure and known tools."""
606
- result = ValidationResult(True, [], [], {})
607
-
608
- try:
609
- # Check for known tools
610
- found_tools = set(tool_settings.keys())
611
- unknown_tools = found_tools - self._known_tools
612
-
613
- if unknown_tools:
614
- result.add_warning(f"Unknown tools found: {unknown_tools}")
615
-
616
- # Validate each tool configuration
617
- for tool_name, tool_config in tool_settings.items():
618
- if not isinstance(tool_config, dict):
619
- result.add_error(f"Tool {tool_name} has invalid configuration type: {type(tool_config)}")
620
- continue
621
-
622
- # Tool-specific validation
623
- tool_result = self._validate_specific_tool(tool_name, tool_config)
624
- result.merge(tool_result)
625
-
626
- result.details['tool_structure_validation'] = {
627
- 'total_tools': len(tool_settings),
628
- 'known_tools': len(found_tools & self._known_tools),
629
- 'unknown_tools': len(unknown_tools)
630
- }
631
-
632
- return result
633
-
634
- except Exception as e:
635
- result.add_error(f"Tool settings validation failed: {e}")
636
- return result
637
-
638
- def _validate_tab_arrays(self, tab_data: List[str], tab_type: str) -> ValidationResult:
639
- """Validate tab array structure and content."""
640
- result = ValidationResult(True, [], [], {})
641
-
642
- try:
643
- if not isinstance(tab_data, list):
644
- result.add_error(f"{tab_type} is not a list: {type(tab_data)}")
645
- return result
646
-
647
- # Check tab count (should be 7)
648
- if len(tab_data) != 7:
649
- result.add_warning(f"{tab_type} has {len(tab_data)} tabs, expected 7")
650
-
651
- # Check tab content types
652
- for i, content in enumerate(tab_data):
653
- if not isinstance(content, str):
654
- result.add_error(f"{tab_type}[{i}] is not a string: {type(content)}")
655
-
656
- result.details[f'{tab_type}_validation'] = {
657
- 'tab_count': len(tab_data),
658
- 'non_empty_tabs': sum(1 for tab in tab_data if tab.strip()),
659
- 'total_content_length': sum(len(tab) for tab in tab_data)
660
- }
661
-
662
- return result
663
-
664
- except Exception as e:
665
- result.add_error(f"Tab array validation failed: {e}")
666
- return result
667
-
668
- def _validate_specific_tool(self, tool_name: str, tool_config: Dict[str, Any]) -> ValidationResult:
669
- """Validate specific tool configuration based on known patterns."""
670
- result = ValidationResult(True, [], [], {})
671
-
672
- try:
673
- # AI tools validation
674
- if any(ai_name in tool_name for ai_name in ['AI', 'Google', 'Anthropic', 'OpenAI', 'Cohere', 'HuggingFace', 'Groq', 'OpenRouter', 'AWS Bedrock', 'LM Studio']):
675
- ai_result = self._validate_ai_tool_config(tool_name, tool_config)
676
- result.merge(ai_result)
677
-
678
- # cURL tool validation
679
- elif tool_name == 'cURL Tool':
680
- curl_result = self._validate_curl_tool_config(tool_config)
681
- result.merge(curl_result)
682
-
683
- # Generator tools validation
684
- elif tool_name == 'Generator Tools':
685
- gen_result = self._validate_generator_tools_config(tool_config)
686
- result.merge(gen_result)
687
-
688
- # Other tool validations can be added here
689
-
690
- return result
691
-
692
- except Exception as e:
693
- result.add_error(f"Specific tool validation failed for {tool_name}: {e}")
694
- return result
695
-
696
- def _validate_ai_tool_config(self, tool_name: str, config: Dict[str, Any]) -> ValidationResult:
697
- """Validate AI tool configuration."""
698
- result = ValidationResult(True, [], [], {})
699
-
700
- # Check for required AI tool fields
701
- required_fields = ['API_KEY', 'MODEL']
702
- for field in required_fields:
703
- if field not in config:
704
- result.add_error(f"AI tool {tool_name} missing required field: {field}")
705
-
706
- # Check for encrypted API keys
707
- if 'API_KEY' in config and config['API_KEY'].startswith('ENC:'):
708
- result.details[f'{tool_name}_encrypted'] = True
709
-
710
- # Check model list
711
- if 'MODELS_LIST' in config and isinstance(config['MODELS_LIST'], list):
712
- result.details[f'{tool_name}_model_count'] = len(config['MODELS_LIST'])
713
-
714
- return result
715
-
716
- def _validate_curl_tool_config(self, config: Dict[str, Any]) -> ValidationResult:
717
- """Validate cURL tool configuration."""
718
- result = ValidationResult(True, [], [], {})
719
-
720
- # Check for history array
721
- if 'history' in config:
722
- if isinstance(config['history'], list):
723
- result.details['curl_history_count'] = len(config['history'])
724
-
725
- # Validate history entries
726
- for i, entry in enumerate(config['history']):
727
- if not isinstance(entry, dict):
728
- result.add_error(f"cURL history entry {i} is not a dict")
729
- continue
730
-
731
- required_history_fields = ['timestamp', 'method', 'url', 'status_code']
732
- for field in required_history_fields:
733
- if field not in entry:
734
- result.add_warning(f"cURL history entry {i} missing field: {field}")
735
- else:
736
- result.add_error("cURL history is not a list")
737
-
738
- return result
739
-
740
- def _validate_generator_tools_config(self, config: Dict[str, Any]) -> ValidationResult:
741
- """Validate Generator Tools nested configuration."""
742
- result = ValidationResult(True, [], [], {})
743
-
744
- # Check for nested tool configurations
745
- expected_generators = [
746
- 'Strong Password Generator', 'Repeating Text Generator',
747
- 'Lorem Ipsum Generator', 'UUID/GUID Generator'
748
- ]
749
-
750
- for generator in expected_generators:
751
- if generator in config:
752
- if not isinstance(config[generator], dict):
753
- result.add_error(f"Generator {generator} config is not a dict")
754
- else:
755
- result.add_warning(f"Generator {generator} not found in config")
756
-
757
- result.details['generator_tools_count'] = len(config)
758
-
759
- return result
760
-
761
- def _generate_test_tool_configurations(self) -> Dict[str, Dict[str, Any]]:
762
- """Generate test configurations for all known tools."""
763
- return {
764
- 'Test AI Tool': {
765
- 'API_KEY': 'test_key',
766
- 'MODEL': 'test_model',
767
- 'MODELS_LIST': ['model1', 'model2'],
768
- 'temperature': 0.7
769
- },
770
- 'Test cURL Tool': {
771
- 'default_timeout': 30,
772
- 'history': [
773
- {
774
- 'timestamp': '2025-01-01T00:00:00',
775
- 'method': 'GET',
776
- 'url': 'https://test.com',
777
- 'status_code': 200
778
- }
779
- ]
780
- },
781
- 'Test Generator Tools': {
782
- 'Password Generator': {
783
- 'length': 12,
784
- 'symbols': True
785
- }
786
- }
787
- }
788
-
789
- def _validate_single_tool_configuration(self, tool_name: str, tool_config: Dict[str, Any]) -> ValidationResult:
790
- """Validate a single tool configuration through migration."""
791
- result = ValidationResult(True, [], [], {})
792
-
793
- try:
794
- # Create test JSON with just this tool
795
- test_data = {
796
- 'export_path': 'test',
797
- 'tool_settings': {tool_name: tool_config}
798
- }
799
-
800
- # Create temporary test file
801
- test_file = os.path.join(self._test_data_dir or tempfile.gettempdir(), f"test_{tool_name.replace(' ', '_')}.json")
802
- with open(test_file, 'w', encoding='utf-8') as f:
803
- json.dump(test_data, f, indent=2)
804
-
805
- self._temp_files.append(test_file)
806
-
807
- # Test migration
808
- migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
809
-
810
- if migration_success:
811
- result.details['migration_success'] = True
812
- else:
813
- result.add_error(f"Migration failed for tool: {tool_name}")
814
-
815
- return result
816
-
817
- except Exception as e:
818
- result.add_error(f"Tool configuration test failed for {tool_name}: {e}")
819
- return result
820
-
821
- def _test_single_edge_case(self, case_name: str, test_data: Dict[str, Any]) -> ValidationResult:
822
- """Test a single edge case scenario."""
823
- result = ValidationResult(True, [], [], {})
824
-
825
- try:
826
- # Create test file
827
- test_file = os.path.join(self._test_data_dir or tempfile.gettempdir(), f"edge_case_{case_name}.json")
828
-
829
- with open(test_file, 'w', encoding='utf-8') as f:
830
- json.dump(test_data, f, indent=2)
831
-
832
- self._temp_files.append(test_file)
833
-
834
- # Test migration (expect some to fail gracefully)
835
- migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
836
-
837
- # For edge cases, we expect some to fail - that's okay
838
- result.details['migration_attempted'] = True
839
- result.details['migration_success'] = migration_success
840
- result.details['case_data_size'] = len(json.dumps(test_data))
841
-
842
- return result
843
-
844
- except Exception as e:
845
- # Edge cases are expected to sometimes cause exceptions
846
- result.add_warning(f"Edge case {case_name} caused exception (expected): {e}")
847
- result.details['exception_occurred'] = True
848
- return result
849
-
850
- def _generate_large_test_data(self) -> Dict[str, Any]:
851
- """Generate large test data for performance testing."""
852
- large_data = {
853
- 'export_path': 'test',
854
- 'tool_settings': {}
855
- }
856
-
857
- # Generate many tool configurations
858
- for i in range(100):
859
- large_data['tool_settings'][f'Test Tool {i}'] = {
860
- 'setting1': f'value_{i}',
861
- 'setting2': i * 10,
862
- 'setting3': [f'item_{j}' for j in range(10)],
863
- 'nested': {
864
- 'deep': {
865
- 'value': f'deep_value_{i}'
866
- }
867
- }
868
- }
869
-
870
- return large_data
871
-
872
- def _generate_unicode_test_data(self) -> Dict[str, Any]:
873
- """Generate test data with Unicode content."""
874
- return {
875
- 'export_path': 'тест',
876
- 'tool_settings': {
877
- 'Unicode Tool': {
878
- 'name': '测试工具',
879
- 'description': 'Тестовое описание',
880
- 'emoji': '🚀🔧⚙️',
881
- 'special_chars': '©®™€£¥'
882
- }
883
- }
884
- }
885
-
886
- def _generate_encrypted_test_data(self) -> Dict[str, Any]:
887
- """Generate test data with encrypted keys."""
888
- return {
889
- 'export_path': 'test',
890
- 'tool_settings': {
891
- 'Encrypted Tool': {
892
- 'API_KEY': 'ENC:dGVzdF9lbmNyeXB0ZWRfa2V5',
893
- 'SECRET': 'ENC:YW5vdGhlcl9lbmNyeXB0ZWRfc2VjcmV0',
894
- 'normal_setting': 'plain_value'
895
- }
896
- }
897
- }
898
-
899
- def _generate_invalid_type_data(self) -> Dict[str, Any]:
900
- """Generate test data with invalid types."""
901
- return {
902
- 'export_path': 123, # Should be string
903
- 'tool_settings': {
904
- 'Invalid Tool': {
905
- 'setting': float('inf'), # Invalid JSON value
906
- 'another': complex(1, 2) # Invalid JSON type
907
- }
908
- }
909
- }
910
-
911
- def _generate_extra_fields_data(self) -> Dict[str, Any]:
912
- """Generate test data with extra unknown fields."""
913
- return {
914
- 'export_path': 'test',
915
- 'unknown_field': 'should_be_preserved',
916
- 'tool_settings': {
917
- 'Extra Fields Tool': {
918
- 'standard_setting': 'value',
919
- 'custom_field': 'custom_value',
920
- 'metadata': {
921
- 'version': '1.0',
922
- 'author': 'test'
923
- }
924
- }
925
- },
926
- 'experimental_feature': True
927
- }
928
-
929
- def _test_migration_performance(self, size_name: str, tool_count: int) -> ValidationResult:
930
- """Test migration performance with specified data size."""
931
- result = ValidationResult(True, [], [], {})
932
-
933
- try:
934
- # Generate test data
935
- test_data = {
936
- 'export_path': 'test',
937
- 'tool_settings': {}
938
- }
939
-
940
- for i in range(tool_count):
941
- test_data['tool_settings'][f'Tool_{i}'] = {
942
- 'setting1': f'value_{i}',
943
- 'setting2': i,
944
- 'nested': {'deep': f'deep_{i}'}
945
- }
946
-
947
- # Create test file
948
- test_file = os.path.join(self._test_data_dir or tempfile.gettempdir(), f"perf_test_{size_name}.json")
949
-
950
- start_time = datetime.now()
951
- with open(test_file, 'w', encoding='utf-8') as f:
952
- json.dump(test_data, f)
953
- write_time = (datetime.now() - start_time).total_seconds()
954
-
955
- self._temp_files.append(test_file)
956
-
957
- # Test migration performance
958
- start_time = datetime.now()
959
- migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
960
- migration_time = (datetime.now() - start_time).total_seconds()
961
-
962
- result.details = {
963
- 'tool_count': tool_count,
964
- 'file_size': os.path.getsize(test_file),
965
- 'write_time': write_time,
966
- 'migration_time': migration_time,
967
- 'migration_success': migration_success,
968
- 'tools_per_second': tool_count / migration_time if migration_time > 0 else 0
969
- }
970
-
971
- if not migration_success:
972
- result.add_error(f"Performance test failed for {size_name} ({tool_count} tools)")
973
-
974
- return result
975
-
976
- except Exception as e:
977
- result.add_error(f"Performance test failed for {size_name}: {e}")
978
- return result
979
-
980
- def _validate_backup_integrity(self, original_file: str, backup_file: str) -> bool:
981
- """Validate backup file integrity using checksums."""
982
- try:
983
- def get_file_hash(filepath: str) -> str:
984
- with open(filepath, 'rb') as f:
985
- return hashlib.sha256(f.read()).hexdigest()
986
-
987
- original_hash = get_file_hash(original_file)
988
- backup_hash = get_file_hash(backup_file)
989
-
990
- return original_hash == backup_hash
991
-
992
- except Exception as e:
993
- self.logger.error(f"Backup integrity validation failed: {e}")
994
- return False
995
-
996
- def _validate_rollback_integrity(self, backup_filepath: str) -> ValidationResult:
997
- """Validate rollback operation integrity."""
998
- result = ValidationResult(True, [], [], {})
999
-
1000
- try:
1001
- # Determine original file path
1002
- original_path = backup_filepath.split('.backup_')[0]
1003
-
1004
- if os.path.exists(original_path):
1005
- integrity_valid = self._validate_backup_integrity(backup_filepath, original_path)
1006
-
1007
- if integrity_valid:
1008
- result.details['rollback_integrity'] = True
1009
- else:
1010
- result.add_error("Rollback integrity check failed")
1011
- result.details['rollback_integrity'] = False
1012
- else:
1013
- result.add_warning("Original file not found for integrity check")
1014
-
1015
- return result
1016
-
1017
- except Exception as e:
1018
- result.add_error(f"Rollback integrity validation failed: {e}")
1019
- return result
1020
-
1021
- def _validate_database_types(self, conn: sqlite3.Connection) -> ValidationResult:
1022
- """Validate database data types and serialization."""
1023
- result = ValidationResult(True, [], [], {})
1024
-
1025
- try:
1026
- # Check data types in each table
1027
- tables_to_check = [
1028
- ('core_settings', ['key', 'value', 'data_type']),
1029
- ('tool_settings', ['tool_name', 'setting_path', 'setting_value', 'data_type']),
1030
- ('tab_content', ['tab_type', 'tab_index', 'content'])
1031
- ]
1032
-
1033
- for table_name, columns in tables_to_check:
1034
- cursor = conn.execute(f"SELECT * FROM {table_name} LIMIT 5")
1035
- rows = cursor.fetchall()
1036
-
1037
- result.details[f'{table_name}_sample_count'] = len(rows)
1038
-
1039
- # Check for data type consistency
1040
- if 'data_type' in columns:
1041
- cursor = conn.execute(f"SELECT DISTINCT data_type FROM {table_name}")
1042
- data_types = [row[0] for row in cursor.fetchall()]
1043
- result.details[f'{table_name}_data_types'] = data_types
1044
-
1045
- return result
1046
-
1047
- except Exception as e:
1048
- result.add_error(f"Database type validation failed: {e}")
1049
- return result
1050
-
1051
- def _deep_compare_data(self, original: Dict[str, Any], migrated: Dict[str, Any]) -> ValidationResult:
1052
- """Perform deep comparison of original and migrated data."""
1053
- result = ValidationResult(True, [], [], {})
1054
-
1055
- try:
1056
- # Compare keys
1057
- original_keys = set(original.keys())
1058
- migrated_keys = set(migrated.keys())
1059
-
1060
- missing_keys = original_keys - migrated_keys
1061
- extra_keys = migrated_keys - original_keys
1062
-
1063
- if missing_keys:
1064
- result.add_error(f"Missing keys in migrated data: {missing_keys}")
1065
-
1066
- if extra_keys:
1067
- result.add_warning(f"Extra keys in migrated data: {extra_keys}")
1068
-
1069
- # Compare values for common keys
1070
- common_keys = original_keys & migrated_keys
1071
- value_differences = []
1072
-
1073
- for key in common_keys:
1074
- if not self._deep_equal(original[key], migrated[key]):
1075
- value_differences.append(key)
1076
-
1077
- if value_differences:
1078
- result.add_error(f"Value differences found in keys: {value_differences}")
1079
-
1080
- result.details['comparison_summary'] = {
1081
- 'total_original_keys': len(original_keys),
1082
- 'total_migrated_keys': len(migrated_keys),
1083
- 'common_keys': len(common_keys),
1084
- 'missing_keys': len(missing_keys),
1085
- 'extra_keys': len(extra_keys),
1086
- 'value_differences': len(value_differences)
1087
- }
1088
-
1089
- return result
1090
-
1091
- except Exception as e:
1092
- result.add_error(f"Deep comparison failed: {e}")
1093
- return result
1094
-
1095
- def _deep_equal(self, obj1: Any, obj2: Any) -> bool:
1096
- """Deep equality comparison with type checking."""
1097
- if type(obj1) != type(obj2):
1098
- return False
1099
-
1100
- if isinstance(obj1, dict):
1101
- if set(obj1.keys()) != set(obj2.keys()):
1102
- return False
1103
- return all(self._deep_equal(obj1[k], obj2[k]) for k in obj1.keys())
1104
-
1105
- elif isinstance(obj1, list):
1106
- if len(obj1) != len(obj2):
1107
- return False
1108
- return all(self._deep_equal(obj1[i], obj2[i]) for i in range(len(obj1)))
1109
-
1110
- else:
1111
- return obj1 == obj2
1112
-
1113
- def _calculate_integrity_metrics(self, original: Dict[str, Any], migrated: Dict[str, Any]) -> Dict[str, Any]:
1114
- """Calculate data integrity metrics."""
1115
- try:
1116
- original_str = json.dumps(original, sort_keys=True)
1117
- migrated_str = json.dumps(migrated, sort_keys=True)
1118
-
1119
- # Calculate similarity metrics
1120
- original_size = len(original_str)
1121
- migrated_size = len(migrated_str)
1122
-
1123
- # Simple character-level similarity
1124
- min_len = min(original_size, migrated_size)
1125
- max_len = max(original_size, migrated_size)
1126
-
1127
- matching_chars = sum(1 for i in range(min_len)
1128
- if original_str[i] == migrated_str[i])
1129
-
1130
- similarity = matching_chars / max_len if max_len > 0 else 1.0
1131
-
1132
- return {
1133
- 'original_size': original_size,
1134
- 'migrated_size': migrated_size,
1135
- 'size_difference': abs(original_size - migrated_size),
1136
- 'character_similarity': similarity,
1137
- 'exact_match': original_str == migrated_str
1138
- }
1139
-
1140
- except Exception as e:
1141
- return {
1142
- 'error': str(e),
1143
- 'exact_match': False
1
+ """
2
+ Migration Validation System for Settings Database Migration
3
+
4
+ This module provides comprehensive validation and testing capabilities for the
5
+ migration system, including data integrity checks, edge case testing, and
6
+ rollback procedures for all tool configurations.
7
+
8
+ Designed to validate all 15+ tool configurations and complex data structures
9
+ identified in the production codebase analysis.
10
+ """
11
+
12
+ import json
13
+ import sqlite3
14
+ import os
15
+ import shutil
16
+ import logging
17
+ import tempfile
18
+ import hashlib
19
+ from typing import Dict, List, Tuple, Any, Optional, Union, Set
20
+ from datetime import datetime
21
+ from pathlib import Path
22
+ from dataclasses import dataclass
23
+
24
+ from .migration_manager import MigrationManager
25
+ from .database_connection_manager import DatabaseConnectionManager
26
+ from .database_schema_manager import DatabaseSchemaManager
27
+
28
+
29
+ @dataclass
30
+ class ValidationResult:
31
+ """Result of a validation operation."""
32
+ success: bool
33
+ errors: List[str]
34
+ warnings: List[str]
35
+ details: Dict[str, Any]
36
+
37
+ def add_error(self, error: str) -> None:
38
+ """Add an error to the result."""
39
+ self.errors.append(error)
40
+ self.success = False
41
+
42
+ def add_warning(self, warning: str) -> None:
43
+ """Add a warning to the result."""
44
+ self.warnings.append(warning)
45
+
46
+ def merge(self, other: 'ValidationResult') -> None:
47
+ """Merge another validation result into this one."""
48
+ self.errors.extend(other.errors)
49
+ self.warnings.extend(other.warnings)
50
+ self.details.update(other.details)
51
+ if not other.success:
52
+ self.success = False
53
+
54
+
55
+ class MigrationValidator:
56
+ """
57
+ Comprehensive migration validation and testing system.
58
+
59
+ Features:
60
+ - Data integrity validation for all tool configurations
61
+ - Edge case testing (corrupted JSON, missing fields, invalid data)
62
+ - Automatic backup creation and rollback procedures
63
+ - Performance testing for large settings files
64
+ - Comprehensive test suite for all 15+ tool types
65
+ """
66
+
67
+ def __init__(self, migration_manager: MigrationManager):
68
+ """
69
+ Initialize the migration validator.
70
+
71
+ Args:
72
+ migration_manager: MigrationManager instance to validate
73
+ """
74
+ self.migration_manager = migration_manager
75
+ self.logger = logging.getLogger(__name__)
76
+
77
+ # Test configuration
78
+ self._test_data_dir = None
79
+ self._backup_dir = None
80
+ self._temp_files = []
81
+
82
+ # Validation settings
83
+ self._strict_validation = True
84
+ self._validate_types = True
85
+ self._validate_structure = True
86
+ self._validate_content = True
87
+
88
+ # Known tool configurations from production analysis
89
+ self._known_tools = {
90
+ 'AI Tools', 'Base64 Encoder/Decoder', 'Case Tool', 'Cron Tool', 'Diff Viewer',
91
+ 'Email Extraction Tool', 'Email Header Analyzer', 'Find & Replace Text',
92
+ 'Folder File Reporter', 'Generator Tools', 'HTML Extraction Tool',
93
+ 'JSON/XML Tool', 'Sorter Tools', 'Translator Tools', 'URL Parser',
94
+ 'URL and Link Extractor', 'Word Frequency Counter',
95
+ # AI Provider configurations (stored as separate tool settings)
96
+ 'Google AI', 'Anthropic AI', 'OpenAI', 'AWS Bedrock', 'Cohere AI',
97
+ 'HuggingFace AI', 'Groq AI', 'OpenRouterAI', 'LM Studio'
98
+ }
99
+
100
+ # Critical settings that must be preserved
101
+ self._critical_settings = {
102
+ 'export_path', 'debug_level', 'selected_tool',
103
+ 'active_input_tab', 'active_output_tab',
104
+ 'input_tabs', 'output_tabs', 'tool_settings'
105
+ }
106
+
107
+ def validate_complete_migration(self, json_filepath: str) -> ValidationResult:
108
+ """
109
+ Perform comprehensive validation of a complete migration cycle.
110
+
111
+ Args:
112
+ json_filepath: Path to source JSON settings file
113
+
114
+ Returns:
115
+ ValidationResult with comprehensive validation details
116
+ """
117
+ result = ValidationResult(True, [], [], {})
118
+
119
+ try:
120
+ self.logger.info(f"Starting comprehensive migration validation: {json_filepath}")
121
+
122
+ # Step 1: Validate source JSON file
123
+ source_validation = self._validate_source_json(json_filepath)
124
+ result.merge(source_validation)
125
+
126
+ if not source_validation.success and self._strict_validation:
127
+ return result
128
+
129
+ # Step 2: Create backup and test environment
130
+ backup_result = self._setup_test_environment(json_filepath)
131
+ result.merge(backup_result)
132
+
133
+ # Step 3: Perform migration to database
134
+ migration_result = self._test_json_to_database_migration(json_filepath)
135
+ result.merge(migration_result)
136
+
137
+ # Step 4: Validate database content
138
+ if migration_result.success:
139
+ db_validation = self._validate_database_content()
140
+ result.merge(db_validation)
141
+
142
+ # Step 5: Test reverse migration
143
+ if migration_result.success:
144
+ reverse_result = self._test_database_to_json_migration()
145
+ result.merge(reverse_result)
146
+
147
+ # Step 6: Validate round-trip accuracy
148
+ if reverse_result.success:
149
+ roundtrip_result = self._validate_roundtrip_accuracy(json_filepath)
150
+ result.merge(roundtrip_result)
151
+
152
+ # Step 7: Test edge cases
153
+ edge_case_result = self._test_edge_cases()
154
+ result.merge(edge_case_result)
155
+
156
+ # Step 8: Performance testing
157
+ performance_result = self._test_performance()
158
+ result.merge(performance_result)
159
+
160
+ result.details['validation_summary'] = {
161
+ 'total_tests': 8,
162
+ 'passed_tests': sum(1 for r in [source_validation, backup_result, migration_result,
163
+ db_validation, reverse_result, roundtrip_result,
164
+ edge_case_result, performance_result] if r.success),
165
+ 'total_errors': len(result.errors),
166
+ 'total_warnings': len(result.warnings)
167
+ }
168
+
169
+ self.logger.info("Comprehensive migration validation completed")
170
+ return result
171
+
172
+ except Exception as e:
173
+ result.add_error(f"Validation failed with exception: {e}")
174
+ self.logger.error(f"Migration validation error: {e}")
175
+ return result
176
+
177
+ finally:
178
+ self._cleanup_test_environment()
179
+
180
+ def validate_tool_configurations(self) -> ValidationResult:
181
+ """
182
+ Validate all known tool configurations for migration compatibility.
183
+
184
+ Returns:
185
+ ValidationResult with tool-specific validation details
186
+ """
187
+ result = ValidationResult(True, [], [], {})
188
+
189
+ try:
190
+ self.logger.info("Validating all tool configurations")
191
+
192
+ # Create test data for each known tool
193
+ test_configs = self._generate_test_tool_configurations()
194
+
195
+ for tool_name, tool_config in test_configs.items():
196
+ tool_result = self._validate_single_tool_configuration(tool_name, tool_config)
197
+ result.merge(tool_result)
198
+
199
+ result.details[f'tool_{tool_name.replace(" ", "_")}'] = {
200
+ 'success': tool_result.success,
201
+ 'errors': tool_result.errors,
202
+ 'warnings': tool_result.warnings
203
+ }
204
+
205
+ result.details['tool_validation_summary'] = {
206
+ 'total_tools': len(test_configs),
207
+ 'successful_tools': sum(1 for tool in result.details.values()
208
+ if isinstance(tool, dict) and tool.get('success', False)),
209
+ 'failed_tools': sum(1 for tool in result.details.values()
210
+ if isinstance(tool, dict) and not tool.get('success', True))
211
+ }
212
+
213
+ self.logger.info("Tool configuration validation completed")
214
+ return result
215
+
216
+ except Exception as e:
217
+ result.add_error(f"Tool validation failed: {e}")
218
+ return result
219
+
220
+ def test_edge_cases(self) -> ValidationResult:
221
+ """
222
+ Test migration with various edge cases and malformed data.
223
+
224
+ Returns:
225
+ ValidationResult with edge case test results
226
+ """
227
+ result = ValidationResult(True, [], [], {})
228
+
229
+ try:
230
+ self.logger.info("Testing migration edge cases")
231
+
232
+ # Test cases for edge scenarios
233
+ edge_cases = [
234
+ ('empty_json', {}),
235
+ ('missing_tool_settings', {'export_path': 'test'}),
236
+ ('corrupted_nested_structure', {'tool_settings': {'invalid': None}}),
237
+ ('large_data_structure', self._generate_large_test_data()),
238
+ ('unicode_content', self._generate_unicode_test_data()),
239
+ ('encrypted_keys', self._generate_encrypted_test_data()),
240
+ ('invalid_types', self._generate_invalid_type_data()),
241
+ ('missing_tabs', {'tool_settings': {}}),
242
+ ('extra_fields', self._generate_extra_fields_data())
243
+ ]
244
+
245
+ for case_name, test_data in edge_cases:
246
+ case_result = self._test_single_edge_case(case_name, test_data)
247
+ result.merge(case_result)
248
+
249
+ result.details[f'edge_case_{case_name}'] = {
250
+ 'success': case_result.success,
251
+ 'errors': case_result.errors,
252
+ 'warnings': case_result.warnings
253
+ }
254
+
255
+ result.details['edge_case_summary'] = {
256
+ 'total_cases': len(edge_cases),
257
+ 'passed_cases': sum(1 for case in result.details.values()
258
+ if isinstance(case, dict) and case.get('success', False)),
259
+ 'failed_cases': sum(1 for case in result.details.values()
260
+ if isinstance(case, dict) and not case.get('success', True))
261
+ }
262
+
263
+ self.logger.info("Edge case testing completed")
264
+ return result
265
+
266
+ except Exception as e:
267
+ result.add_error(f"Edge case testing failed: {e}")
268
+ return result
269
+
270
+ def create_automatic_backup(self, json_filepath: str) -> Tuple[bool, Optional[str]]:
271
+ """
272
+ Create automatic backup before migration with validation.
273
+
274
+ Args:
275
+ json_filepath: Path to JSON file to backup
276
+
277
+ Returns:
278
+ Tuple of (success, backup_path)
279
+ """
280
+ try:
281
+ if not os.path.exists(json_filepath):
282
+ self.logger.error(f"Source file not found: {json_filepath}")
283
+ return False, None
284
+
285
+ # Generate backup path with timestamp
286
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
287
+ backup_path = f"{json_filepath}.backup_{timestamp}"
288
+
289
+ # Create backup
290
+ shutil.copy2(json_filepath, backup_path)
291
+
292
+ # Validate backup integrity
293
+ if self._validate_backup_integrity(json_filepath, backup_path):
294
+ self.logger.info(f"Automatic backup created: {backup_path}")
295
+ return True, backup_path
296
+ else:
297
+ self.logger.error("Backup integrity validation failed")
298
+ return False, None
299
+
300
+ except Exception as e:
301
+ self.logger.error(f"Automatic backup failed: {e}")
302
+ return False, None
303
+
304
+ def test_rollback_procedures(self, backup_filepath: str) -> ValidationResult:
305
+ """
306
+ Test rollback procedures for failed migrations.
307
+
308
+ Args:
309
+ backup_filepath: Path to backup file for rollback testing
310
+
311
+ Returns:
312
+ ValidationResult with rollback test results
313
+ """
314
+ result = ValidationResult(True, [], [], {})
315
+
316
+ try:
317
+ self.logger.info(f"Testing rollback procedures: {backup_filepath}")
318
+
319
+ if not os.path.exists(backup_filepath):
320
+ result.add_error(f"Backup file not found: {backup_filepath}")
321
+ return result
322
+
323
+ # Test rollback functionality
324
+ rollback_success = self.migration_manager.rollback_migration(backup_filepath)
325
+
326
+ if rollback_success:
327
+ result.details['rollback_test'] = {
328
+ 'success': True,
329
+ 'backup_file': backup_filepath,
330
+ 'rollback_time': datetime.now().isoformat()
331
+ }
332
+ else:
333
+ result.add_error("Rollback procedure failed")
334
+ result.details['rollback_test'] = {
335
+ 'success': False,
336
+ 'backup_file': backup_filepath,
337
+ 'error': 'Rollback operation failed'
338
+ }
339
+
340
+ # Validate restored file integrity
341
+ if rollback_success:
342
+ integrity_result = self._validate_rollback_integrity(backup_filepath)
343
+ result.merge(integrity_result)
344
+
345
+ self.logger.info("Rollback procedure testing completed")
346
+ return result
347
+
348
+ except Exception as e:
349
+ result.add_error(f"Rollback testing failed: {e}")
350
+ return result
351
+
352
+ # Private implementation methods
353
+
354
+ def _validate_source_json(self, json_filepath: str) -> ValidationResult:
355
+ """Validate source JSON file structure and content."""
356
+ result = ValidationResult(True, [], [], {})
357
+
358
+ try:
359
+ if not os.path.exists(json_filepath):
360
+ result.add_error(f"JSON file not found: {json_filepath}")
361
+ return result
362
+
363
+ # Load and parse JSON
364
+ with open(json_filepath, 'r', encoding='utf-8') as f:
365
+ data = json.load(f)
366
+
367
+ # Validate critical settings exist
368
+ for setting in self._critical_settings:
369
+ if setting not in data:
370
+ if setting in ['input_tabs', 'output_tabs', 'tool_settings']:
371
+ result.add_warning(f"Missing critical setting: {setting}")
372
+ else:
373
+ result.add_error(f"Missing critical setting: {setting}")
374
+
375
+ # Validate tool settings structure
376
+ if 'tool_settings' in data:
377
+ tool_validation = self._validate_tool_settings_structure(data['tool_settings'])
378
+ result.merge(tool_validation)
379
+
380
+ # Validate tab arrays
381
+ if 'input_tabs' in data:
382
+ tab_validation = self._validate_tab_arrays(data['input_tabs'], 'input_tabs')
383
+ result.merge(tab_validation)
384
+
385
+ if 'output_tabs' in data:
386
+ tab_validation = self._validate_tab_arrays(data['output_tabs'], 'output_tabs')
387
+ result.merge(tab_validation)
388
+
389
+ result.details['source_validation'] = {
390
+ 'file_size': os.path.getsize(json_filepath),
391
+ 'tool_count': len(data.get('tool_settings', {})),
392
+ 'has_critical_settings': all(s in data for s in self._critical_settings)
393
+ }
394
+
395
+ return result
396
+
397
+ except json.JSONDecodeError as e:
398
+ result.add_error(f"Invalid JSON format: {e}")
399
+ return result
400
+ except Exception as e:
401
+ result.add_error(f"Source validation failed: {e}")
402
+ return result
403
+
404
+ def _setup_test_environment(self, json_filepath: str) -> ValidationResult:
405
+ """Setup test environment with temporary directories and backups."""
406
+ result = ValidationResult(True, [], [], {})
407
+
408
+ try:
409
+ # Create temporary test directory
410
+ self._test_data_dir = tempfile.mkdtemp(prefix="migration_test_")
411
+ self._backup_dir = os.path.join(self._test_data_dir, "backups")
412
+ os.makedirs(self._backup_dir, exist_ok=True)
413
+
414
+ # Create backup
415
+ backup_success, backup_path = self.create_automatic_backup(json_filepath)
416
+ if backup_success:
417
+ result.details['backup_created'] = backup_path
418
+ else:
419
+ result.add_error("Failed to create test backup")
420
+
421
+ result.details['test_environment'] = {
422
+ 'test_dir': self._test_data_dir,
423
+ 'backup_dir': self._backup_dir,
424
+ 'setup_time': datetime.now().isoformat()
425
+ }
426
+
427
+ return result
428
+
429
+ except Exception as e:
430
+ result.add_error(f"Test environment setup failed: {e}")
431
+ return result
432
+
433
+ def _test_json_to_database_migration(self, json_filepath: str) -> ValidationResult:
434
+ """Test JSON to database migration process."""
435
+ result = ValidationResult(True, [], [], {})
436
+
437
+ try:
438
+ # Perform migration
439
+ migration_success = self.migration_manager.migrate_from_json(json_filepath, validate=True)
440
+
441
+ if migration_success:
442
+ result.details['json_to_db_migration'] = {
443
+ 'success': True,
444
+ 'migration_time': datetime.now().isoformat()
445
+ }
446
+ else:
447
+ result.add_error("JSON to database migration failed")
448
+ result.details['json_to_db_migration'] = {
449
+ 'success': False,
450
+ 'error': 'Migration operation failed'
451
+ }
452
+
453
+ return result
454
+
455
+ except Exception as e:
456
+ result.add_error(f"JSON to database migration test failed: {e}")
457
+ return result
458
+
459
+ def _validate_database_content(self) -> ValidationResult:
460
+ """Validate database content after migration."""
461
+ result = ValidationResult(True, [], [], {})
462
+
463
+ try:
464
+ conn = self.migration_manager.connection_manager.get_connection()
465
+
466
+ # Check all tables exist and have data
467
+ tables = ['core_settings', 'tool_settings', 'tab_content',
468
+ 'performance_settings', 'font_settings', 'dialog_settings']
469
+
470
+ for table in tables:
471
+ cursor = conn.execute(f"SELECT COUNT(*) FROM {table}")
472
+ count = cursor.fetchone()[0]
473
+
474
+ result.details[f'{table}_count'] = count
475
+
476
+ if table in ['core_settings', 'tool_settings'] and count == 0:
477
+ result.add_warning(f"Table {table} is empty")
478
+
479
+ # Validate data types and structure
480
+ type_validation = self._validate_database_types(conn)
481
+ result.merge(type_validation)
482
+
483
+ return result
484
+
485
+ except Exception as e:
486
+ result.add_error(f"Database content validation failed: {e}")
487
+ return result
488
+
489
+ def _test_database_to_json_migration(self) -> ValidationResult:
490
+ """Test database to JSON migration process."""
491
+ result = ValidationResult(True, [], [], {})
492
+
493
+ try:
494
+ # Create temporary JSON file for reverse migration
495
+ temp_json = os.path.join(self._test_data_dir, "reverse_migration.json")
496
+
497
+ # Perform reverse migration
498
+ migration_success = self.migration_manager.migrate_to_json(temp_json, validate=True)
499
+
500
+ if migration_success:
501
+ result.details['db_to_json_migration'] = {
502
+ 'success': True,
503
+ 'output_file': temp_json,
504
+ 'migration_time': datetime.now().isoformat()
505
+ }
506
+ self._temp_files.append(temp_json)
507
+ else:
508
+ result.add_error("Database to JSON migration failed")
509
+ result.details['db_to_json_migration'] = {
510
+ 'success': False,
511
+ 'error': 'Reverse migration operation failed'
512
+ }
513
+
514
+ return result
515
+
516
+ except Exception as e:
517
+ result.add_error(f"Database to JSON migration test failed: {e}")
518
+ return result
519
+
520
+ def _validate_roundtrip_accuracy(self, original_json: str) -> ValidationResult:
521
+ """Validate round-trip migration accuracy."""
522
+ result = ValidationResult(True, [], [], {})
523
+
524
+ try:
525
+ # Load original data
526
+ with open(original_json, 'r', encoding='utf-8') as f:
527
+ original_data = json.load(f)
528
+
529
+ # Load migrated data
530
+ migrated_json = os.path.join(self._test_data_dir, "reverse_migration.json")
531
+ if not os.path.exists(migrated_json):
532
+ result.add_error("Migrated JSON file not found for comparison")
533
+ return result
534
+
535
+ with open(migrated_json, 'r', encoding='utf-8') as f:
536
+ migrated_data = json.load(f)
537
+
538
+ # Deep comparison
539
+ comparison_result = self._deep_compare_data(original_data, migrated_data)
540
+ result.merge(comparison_result)
541
+
542
+ # Calculate data integrity metrics
543
+ integrity_metrics = self._calculate_integrity_metrics(original_data, migrated_data)
544
+ result.details['integrity_metrics'] = integrity_metrics
545
+
546
+ return result
547
+
548
+ except Exception as e:
549
+ result.add_error(f"Round-trip accuracy validation failed: {e}")
550
+ return result
551
+
552
+ def _test_edge_cases(self) -> ValidationResult:
553
+ """Test various edge cases and error conditions."""
554
+ result = ValidationResult(True, [], [], {})
555
+
556
+ # This is a placeholder - the actual implementation would be in test_edge_cases()
557
+ # which is already implemented above
558
+ edge_case_result = self.test_edge_cases()
559
+ result.merge(edge_case_result)
560
+
561
+ return result
562
+
563
+ def _test_performance(self) -> ValidationResult:
564
+ """Test migration performance with various data sizes."""
565
+ result = ValidationResult(True, [], [], {})
566
+
567
+ try:
568
+ # Test with different data sizes
569
+ test_sizes = [
570
+ ('small', 10), # 10 tools
571
+ ('medium', 50), # 50 tools
572
+ ('large', 100), # 100 tools
573
+ ('xlarge', 500) # 500 tools
574
+ ]
575
+
576
+ for size_name, tool_count in test_sizes:
577
+ perf_result = self._test_migration_performance(size_name, tool_count)
578
+ result.merge(perf_result)
579
+ result.details[f'performance_{size_name}'] = perf_result.details
580
+
581
+ return result
582
+
583
+ except Exception as e:
584
+ result.add_error(f"Performance testing failed: {e}")
585
+ return result
586
+
587
+ def _cleanup_test_environment(self) -> None:
588
+ """Clean up temporary test files and directories."""
589
+ try:
590
+ # Remove temporary files
591
+ for temp_file in self._temp_files:
592
+ if os.path.exists(temp_file):
593
+ os.remove(temp_file)
594
+
595
+ # Remove test directory
596
+ if self._test_data_dir and os.path.exists(self._test_data_dir):
597
+ shutil.rmtree(self._test_data_dir)
598
+
599
+ self.logger.debug("Test environment cleaned up")
600
+
601
+ except Exception as e:
602
+ self.logger.warning(f"Test cleanup failed: {e}")
603
+
604
+ def _validate_tool_settings_structure(self, tool_settings: Dict[str, Any]) -> ValidationResult:
605
+ """Validate tool settings structure and known tools."""
606
+ result = ValidationResult(True, [], [], {})
607
+
608
+ try:
609
+ # Check for known tools
610
+ found_tools = set(tool_settings.keys())
611
+ unknown_tools = found_tools - self._known_tools
612
+
613
+ if unknown_tools:
614
+ result.add_warning(f"Unknown tools found: {unknown_tools}")
615
+
616
+ # Validate each tool configuration
617
+ for tool_name, tool_config in tool_settings.items():
618
+ if not isinstance(tool_config, dict):
619
+ result.add_error(f"Tool {tool_name} has invalid configuration type: {type(tool_config)}")
620
+ continue
621
+
622
+ # Tool-specific validation
623
+ tool_result = self._validate_specific_tool(tool_name, tool_config)
624
+ result.merge(tool_result)
625
+
626
+ result.details['tool_structure_validation'] = {
627
+ 'total_tools': len(tool_settings),
628
+ 'known_tools': len(found_tools & self._known_tools),
629
+ 'unknown_tools': len(unknown_tools)
630
+ }
631
+
632
+ return result
633
+
634
+ except Exception as e:
635
+ result.add_error(f"Tool settings validation failed: {e}")
636
+ return result
637
+
638
+ def _validate_tab_arrays(self, tab_data: List[str], tab_type: str) -> ValidationResult:
639
+ """Validate tab array structure and content."""
640
+ result = ValidationResult(True, [], [], {})
641
+
642
+ try:
643
+ if not isinstance(tab_data, list):
644
+ result.add_error(f"{tab_type} is not a list: {type(tab_data)}")
645
+ return result
646
+
647
+ # Check tab count (should be 7)
648
+ if len(tab_data) != 7:
649
+ result.add_warning(f"{tab_type} has {len(tab_data)} tabs, expected 7")
650
+
651
+ # Check tab content types
652
+ for i, content in enumerate(tab_data):
653
+ if not isinstance(content, str):
654
+ result.add_error(f"{tab_type}[{i}] is not a string: {type(content)}")
655
+
656
+ result.details[f'{tab_type}_validation'] = {
657
+ 'tab_count': len(tab_data),
658
+ 'non_empty_tabs': sum(1 for tab in tab_data if tab.strip()),
659
+ 'total_content_length': sum(len(tab) for tab in tab_data)
660
+ }
661
+
662
+ return result
663
+
664
+ except Exception as e:
665
+ result.add_error(f"Tab array validation failed: {e}")
666
+ return result
667
+
668
+ def _validate_specific_tool(self, tool_name: str, tool_config: Dict[str, Any]) -> ValidationResult:
669
+ """Validate specific tool configuration based on known patterns."""
670
+ result = ValidationResult(True, [], [], {})
671
+
672
+ try:
673
+ # AI tools validation
674
+ if any(ai_name in tool_name for ai_name in ['AI', 'Google', 'Anthropic', 'OpenAI', 'Cohere', 'HuggingFace', 'Groq', 'OpenRouter', 'AWS Bedrock', 'LM Studio']):
675
+ ai_result = self._validate_ai_tool_config(tool_name, tool_config)
676
+ result.merge(ai_result)
677
+
678
+ # cURL tool validation
679
+ elif tool_name == 'cURL Tool':
680
+ curl_result = self._validate_curl_tool_config(tool_config)
681
+ result.merge(curl_result)
682
+
683
+ # Generator tools validation
684
+ elif tool_name == 'Generator Tools':
685
+ gen_result = self._validate_generator_tools_config(tool_config)
686
+ result.merge(gen_result)
687
+
688
+ # Other tool validations can be added here
689
+
690
+ return result
691
+
692
+ except Exception as e:
693
+ result.add_error(f"Specific tool validation failed for {tool_name}: {e}")
694
+ return result
695
+
696
+ def _validate_ai_tool_config(self, tool_name: str, config: Dict[str, Any]) -> ValidationResult:
697
+ """Validate AI tool configuration."""
698
+ result = ValidationResult(True, [], [], {})
699
+
700
+ # Check for required AI tool fields
701
+ required_fields = ['API_KEY', 'MODEL']
702
+ for field in required_fields:
703
+ if field not in config:
704
+ result.add_error(f"AI tool {tool_name} missing required field: {field}")
705
+
706
+ # Check for encrypted API keys
707
+ if 'API_KEY' in config and config['API_KEY'].startswith('ENC:'):
708
+ result.details[f'{tool_name}_encrypted'] = True
709
+
710
+ # Check model list
711
+ if 'MODELS_LIST' in config and isinstance(config['MODELS_LIST'], list):
712
+ result.details[f'{tool_name}_model_count'] = len(config['MODELS_LIST'])
713
+
714
+ return result
715
+
716
+ def _validate_curl_tool_config(self, config: Dict[str, Any]) -> ValidationResult:
717
+ """Validate cURL tool configuration."""
718
+ result = ValidationResult(True, [], [], {})
719
+
720
+ # Check for history array
721
+ if 'history' in config:
722
+ if isinstance(config['history'], list):
723
+ result.details['curl_history_count'] = len(config['history'])
724
+
725
+ # Validate history entries
726
+ for i, entry in enumerate(config['history']):
727
+ if not isinstance(entry, dict):
728
+ result.add_error(f"cURL history entry {i} is not a dict")
729
+ continue
730
+
731
+ required_history_fields = ['timestamp', 'method', 'url', 'status_code']
732
+ for field in required_history_fields:
733
+ if field not in entry:
734
+ result.add_warning(f"cURL history entry {i} missing field: {field}")
735
+ else:
736
+ result.add_error("cURL history is not a list")
737
+
738
+ return result
739
+
740
+ def _validate_generator_tools_config(self, config: Dict[str, Any]) -> ValidationResult:
741
+ """Validate Generator Tools nested configuration."""
742
+ result = ValidationResult(True, [], [], {})
743
+
744
+ # Check for nested tool configurations
745
+ expected_generators = [
746
+ 'Strong Password Generator', 'Repeating Text Generator',
747
+ 'Lorem Ipsum Generator', 'UUID/GUID Generator'
748
+ ]
749
+
750
+ for generator in expected_generators:
751
+ if generator in config:
752
+ if not isinstance(config[generator], dict):
753
+ result.add_error(f"Generator {generator} config is not a dict")
754
+ else:
755
+ result.add_warning(f"Generator {generator} not found in config")
756
+
757
+ result.details['generator_tools_count'] = len(config)
758
+
759
+ return result
760
+
761
+ def _generate_test_tool_configurations(self) -> Dict[str, Dict[str, Any]]:
762
+ """Generate test configurations for all known tools."""
763
+ return {
764
+ 'Test AI Tool': {
765
+ 'API_KEY': 'test_key',
766
+ 'MODEL': 'test_model',
767
+ 'MODELS_LIST': ['model1', 'model2'],
768
+ 'temperature': 0.7
769
+ },
770
+ 'Test cURL Tool': {
771
+ 'default_timeout': 30,
772
+ 'history': [
773
+ {
774
+ 'timestamp': '2025-01-01T00:00:00',
775
+ 'method': 'GET',
776
+ 'url': 'https://test.com',
777
+ 'status_code': 200
778
+ }
779
+ ]
780
+ },
781
+ 'Test Generator Tools': {
782
+ 'Password Generator': {
783
+ 'length': 12,
784
+ 'symbols': True
785
+ }
786
+ }
787
+ }
788
+
789
+ def _validate_single_tool_configuration(self, tool_name: str, tool_config: Dict[str, Any]) -> ValidationResult:
790
+ """Validate a single tool configuration through migration."""
791
+ result = ValidationResult(True, [], [], {})
792
+
793
+ try:
794
+ # Create test JSON with just this tool
795
+ test_data = {
796
+ 'export_path': 'test',
797
+ 'tool_settings': {tool_name: tool_config}
798
+ }
799
+
800
+ # Create temporary test file
801
+ test_file = os.path.join(self._test_data_dir or tempfile.gettempdir(), f"test_{tool_name.replace(' ', '_')}.json")
802
+ with open(test_file, 'w', encoding='utf-8') as f:
803
+ json.dump(test_data, f, indent=2)
804
+
805
+ self._temp_files.append(test_file)
806
+
807
+ # Test migration
808
+ migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
809
+
810
+ if migration_success:
811
+ result.details['migration_success'] = True
812
+ else:
813
+ result.add_error(f"Migration failed for tool: {tool_name}")
814
+
815
+ return result
816
+
817
+ except Exception as e:
818
+ result.add_error(f"Tool configuration test failed for {tool_name}: {e}")
819
+ return result
820
+
821
+ def _test_single_edge_case(self, case_name: str, test_data: Dict[str, Any]) -> ValidationResult:
822
+ """Test a single edge case scenario."""
823
+ result = ValidationResult(True, [], [], {})
824
+
825
+ try:
826
+ # Create test file
827
+ test_file = os.path.join(self._test_data_dir or tempfile.gettempdir(), f"edge_case_{case_name}.json")
828
+
829
+ with open(test_file, 'w', encoding='utf-8') as f:
830
+ json.dump(test_data, f, indent=2)
831
+
832
+ self._temp_files.append(test_file)
833
+
834
+ # Test migration (expect some to fail gracefully)
835
+ migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
836
+
837
+ # For edge cases, we expect some to fail - that's okay
838
+ result.details['migration_attempted'] = True
839
+ result.details['migration_success'] = migration_success
840
+ result.details['case_data_size'] = len(json.dumps(test_data))
841
+
842
+ return result
843
+
844
+ except Exception as e:
845
+ # Edge cases are expected to sometimes cause exceptions
846
+ result.add_warning(f"Edge case {case_name} caused exception (expected): {e}")
847
+ result.details['exception_occurred'] = True
848
+ return result
849
+
850
+ def _generate_large_test_data(self) -> Dict[str, Any]:
851
+ """Generate large test data for performance testing."""
852
+ large_data = {
853
+ 'export_path': 'test',
854
+ 'tool_settings': {}
855
+ }
856
+
857
+ # Generate many tool configurations
858
+ for i in range(100):
859
+ large_data['tool_settings'][f'Test Tool {i}'] = {
860
+ 'setting1': f'value_{i}',
861
+ 'setting2': i * 10,
862
+ 'setting3': [f'item_{j}' for j in range(10)],
863
+ 'nested': {
864
+ 'deep': {
865
+ 'value': f'deep_value_{i}'
866
+ }
867
+ }
868
+ }
869
+
870
+ return large_data
871
+
872
+ def _generate_unicode_test_data(self) -> Dict[str, Any]:
873
+ """Generate test data with Unicode content."""
874
+ return {
875
+ 'export_path': 'тест',
876
+ 'tool_settings': {
877
+ 'Unicode Tool': {
878
+ 'name': '测试工具',
879
+ 'description': 'Тестовое описание',
880
+ 'emoji': '🚀🔧⚙️',
881
+ 'special_chars': '©®™€£¥'
882
+ }
883
+ }
884
+ }
885
+
886
+ def _generate_encrypted_test_data(self) -> Dict[str, Any]:
887
+ """Generate test data with encrypted keys."""
888
+ return {
889
+ 'export_path': 'test',
890
+ 'tool_settings': {
891
+ 'Encrypted Tool': {
892
+ 'API_KEY': 'ENC:dGVzdF9lbmNyeXB0ZWRfa2V5',
893
+ 'SECRET': 'ENC:YW5vdGhlcl9lbmNyeXB0ZWRfc2VjcmV0',
894
+ 'normal_setting': 'plain_value'
895
+ }
896
+ }
897
+ }
898
+
899
+ def _generate_invalid_type_data(self) -> Dict[str, Any]:
900
+ """Generate test data with invalid types."""
901
+ return {
902
+ 'export_path': 123, # Should be string
903
+ 'tool_settings': {
904
+ 'Invalid Tool': {
905
+ 'setting': float('inf'), # Invalid JSON value
906
+ 'another': complex(1, 2) # Invalid JSON type
907
+ }
908
+ }
909
+ }
910
+
911
+ def _generate_extra_fields_data(self) -> Dict[str, Any]:
912
+ """Generate test data with extra unknown fields."""
913
+ return {
914
+ 'export_path': 'test',
915
+ 'unknown_field': 'should_be_preserved',
916
+ 'tool_settings': {
917
+ 'Extra Fields Tool': {
918
+ 'standard_setting': 'value',
919
+ 'custom_field': 'custom_value',
920
+ 'metadata': {
921
+ 'version': '1.0',
922
+ 'author': 'test'
923
+ }
924
+ }
925
+ },
926
+ 'experimental_feature': True
927
+ }
928
+
929
+ def _test_migration_performance(self, size_name: str, tool_count: int) -> ValidationResult:
930
+ """Test migration performance with specified data size."""
931
+ result = ValidationResult(True, [], [], {})
932
+
933
+ try:
934
+ # Generate test data
935
+ test_data = {
936
+ 'export_path': 'test',
937
+ 'tool_settings': {}
938
+ }
939
+
940
+ for i in range(tool_count):
941
+ test_data['tool_settings'][f'Tool_{i}'] = {
942
+ 'setting1': f'value_{i}',
943
+ 'setting2': i,
944
+ 'nested': {'deep': f'deep_{i}'}
945
+ }
946
+
947
+ # Create test file
948
+ test_file = os.path.join(self._test_data_dir or tempfile.gettempdir(), f"perf_test_{size_name}.json")
949
+
950
+ start_time = datetime.now()
951
+ with open(test_file, 'w', encoding='utf-8') as f:
952
+ json.dump(test_data, f)
953
+ write_time = (datetime.now() - start_time).total_seconds()
954
+
955
+ self._temp_files.append(test_file)
956
+
957
+ # Test migration performance
958
+ start_time = datetime.now()
959
+ migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
960
+ migration_time = (datetime.now() - start_time).total_seconds()
961
+
962
+ result.details = {
963
+ 'tool_count': tool_count,
964
+ 'file_size': os.path.getsize(test_file),
965
+ 'write_time': write_time,
966
+ 'migration_time': migration_time,
967
+ 'migration_success': migration_success,
968
+ 'tools_per_second': tool_count / migration_time if migration_time > 0 else 0
969
+ }
970
+
971
+ if not migration_success:
972
+ result.add_error(f"Performance test failed for {size_name} ({tool_count} tools)")
973
+
974
+ return result
975
+
976
+ except Exception as e:
977
+ result.add_error(f"Performance test failed for {size_name}: {e}")
978
+ return result
979
+
980
+ def _validate_backup_integrity(self, original_file: str, backup_file: str) -> bool:
981
+ """Validate backup file integrity using checksums."""
982
+ try:
983
+ def get_file_hash(filepath: str) -> str:
984
+ with open(filepath, 'rb') as f:
985
+ return hashlib.sha256(f.read()).hexdigest()
986
+
987
+ original_hash = get_file_hash(original_file)
988
+ backup_hash = get_file_hash(backup_file)
989
+
990
+ return original_hash == backup_hash
991
+
992
+ except Exception as e:
993
+ self.logger.error(f"Backup integrity validation failed: {e}")
994
+ return False
995
+
996
+ def _validate_rollback_integrity(self, backup_filepath: str) -> ValidationResult:
997
+ """Validate rollback operation integrity."""
998
+ result = ValidationResult(True, [], [], {})
999
+
1000
+ try:
1001
+ # Determine original file path
1002
+ original_path = backup_filepath.split('.backup_')[0]
1003
+
1004
+ if os.path.exists(original_path):
1005
+ integrity_valid = self._validate_backup_integrity(backup_filepath, original_path)
1006
+
1007
+ if integrity_valid:
1008
+ result.details['rollback_integrity'] = True
1009
+ else:
1010
+ result.add_error("Rollback integrity check failed")
1011
+ result.details['rollback_integrity'] = False
1012
+ else:
1013
+ result.add_warning("Original file not found for integrity check")
1014
+
1015
+ return result
1016
+
1017
+ except Exception as e:
1018
+ result.add_error(f"Rollback integrity validation failed: {e}")
1019
+ return result
1020
+
1021
+ def _validate_database_types(self, conn: sqlite3.Connection) -> ValidationResult:
1022
+ """Validate database data types and serialization."""
1023
+ result = ValidationResult(True, [], [], {})
1024
+
1025
+ try:
1026
+ # Check data types in each table
1027
+ tables_to_check = [
1028
+ ('core_settings', ['key', 'value', 'data_type']),
1029
+ ('tool_settings', ['tool_name', 'setting_path', 'setting_value', 'data_type']),
1030
+ ('tab_content', ['tab_type', 'tab_index', 'content'])
1031
+ ]
1032
+
1033
+ for table_name, columns in tables_to_check:
1034
+ cursor = conn.execute(f"SELECT * FROM {table_name} LIMIT 5")
1035
+ rows = cursor.fetchall()
1036
+
1037
+ result.details[f'{table_name}_sample_count'] = len(rows)
1038
+
1039
+ # Check for data type consistency
1040
+ if 'data_type' in columns:
1041
+ cursor = conn.execute(f"SELECT DISTINCT data_type FROM {table_name}")
1042
+ data_types = [row[0] for row in cursor.fetchall()]
1043
+ result.details[f'{table_name}_data_types'] = data_types
1044
+
1045
+ return result
1046
+
1047
+ except Exception as e:
1048
+ result.add_error(f"Database type validation failed: {e}")
1049
+ return result
1050
+
1051
+ def _deep_compare_data(self, original: Dict[str, Any], migrated: Dict[str, Any]) -> ValidationResult:
1052
+ """Perform deep comparison of original and migrated data."""
1053
+ result = ValidationResult(True, [], [], {})
1054
+
1055
+ try:
1056
+ # Compare keys
1057
+ original_keys = set(original.keys())
1058
+ migrated_keys = set(migrated.keys())
1059
+
1060
+ missing_keys = original_keys - migrated_keys
1061
+ extra_keys = migrated_keys - original_keys
1062
+
1063
+ if missing_keys:
1064
+ result.add_error(f"Missing keys in migrated data: {missing_keys}")
1065
+
1066
+ if extra_keys:
1067
+ result.add_warning(f"Extra keys in migrated data: {extra_keys}")
1068
+
1069
+ # Compare values for common keys
1070
+ common_keys = original_keys & migrated_keys
1071
+ value_differences = []
1072
+
1073
+ for key in common_keys:
1074
+ if not self._deep_equal(original[key], migrated[key]):
1075
+ value_differences.append(key)
1076
+
1077
+ if value_differences:
1078
+ result.add_error(f"Value differences found in keys: {value_differences}")
1079
+
1080
+ result.details['comparison_summary'] = {
1081
+ 'total_original_keys': len(original_keys),
1082
+ 'total_migrated_keys': len(migrated_keys),
1083
+ 'common_keys': len(common_keys),
1084
+ 'missing_keys': len(missing_keys),
1085
+ 'extra_keys': len(extra_keys),
1086
+ 'value_differences': len(value_differences)
1087
+ }
1088
+
1089
+ return result
1090
+
1091
+ except Exception as e:
1092
+ result.add_error(f"Deep comparison failed: {e}")
1093
+ return result
1094
+
1095
+ def _deep_equal(self, obj1: Any, obj2: Any) -> bool:
1096
+ """Deep equality comparison with type checking."""
1097
+ if type(obj1) != type(obj2):
1098
+ return False
1099
+
1100
+ if isinstance(obj1, dict):
1101
+ if set(obj1.keys()) != set(obj2.keys()):
1102
+ return False
1103
+ return all(self._deep_equal(obj1[k], obj2[k]) for k in obj1.keys())
1104
+
1105
+ elif isinstance(obj1, list):
1106
+ if len(obj1) != len(obj2):
1107
+ return False
1108
+ return all(self._deep_equal(obj1[i], obj2[i]) for i in range(len(obj1)))
1109
+
1110
+ else:
1111
+ return obj1 == obj2
1112
+
1113
+ def _calculate_integrity_metrics(self, original: Dict[str, Any], migrated: Dict[str, Any]) -> Dict[str, Any]:
1114
+ """Calculate data integrity metrics."""
1115
+ try:
1116
+ original_str = json.dumps(original, sort_keys=True)
1117
+ migrated_str = json.dumps(migrated, sort_keys=True)
1118
+
1119
+ # Calculate similarity metrics
1120
+ original_size = len(original_str)
1121
+ migrated_size = len(migrated_str)
1122
+
1123
+ # Simple character-level similarity
1124
+ min_len = min(original_size, migrated_size)
1125
+ max_len = max(original_size, migrated_size)
1126
+
1127
+ matching_chars = sum(1 for i in range(min_len)
1128
+ if original_str[i] == migrated_str[i])
1129
+
1130
+ similarity = matching_chars / max_len if max_len > 0 else 1.0
1131
+
1132
+ return {
1133
+ 'original_size': original_size,
1134
+ 'migrated_size': migrated_size,
1135
+ 'size_difference': abs(original_size - migrated_size),
1136
+ 'character_similarity': similarity,
1137
+ 'exact_match': original_str == migrated_str
1138
+ }
1139
+
1140
+ except Exception as e:
1141
+ return {
1142
+ 'error': str(e),
1143
+ 'exact_match': False
1144
1144
  }