pomera-ai-commander 1.1.1 → 1.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +105 -680
- package/bin/pomera-ai-commander.js +62 -62
- package/core/__init__.py +65 -65
- package/core/app_context.py +482 -482
- package/core/async_text_processor.py +421 -421
- package/core/backup_manager.py +655 -655
- package/core/backup_recovery_manager.py +1199 -1033
- package/core/content_hash_cache.py +508 -508
- package/core/context_menu.py +313 -313
- package/core/data_directory.py +549 -0
- package/core/data_validator.py +1066 -1066
- package/core/database_connection_manager.py +744 -744
- package/core/database_curl_settings_manager.py +608 -608
- package/core/database_promera_ai_settings_manager.py +446 -446
- package/core/database_schema.py +411 -411
- package/core/database_schema_manager.py +395 -395
- package/core/database_settings_manager.py +1507 -1507
- package/core/database_settings_manager_interface.py +456 -456
- package/core/dialog_manager.py +734 -734
- package/core/diff_utils.py +239 -0
- package/core/efficient_line_numbers.py +540 -510
- package/core/error_handler.py +746 -746
- package/core/error_service.py +431 -431
- package/core/event_consolidator.py +511 -511
- package/core/mcp/__init__.py +43 -43
- package/core/mcp/find_replace_diff.py +334 -0
- package/core/mcp/protocol.py +288 -288
- package/core/mcp/schema.py +251 -251
- package/core/mcp/server_stdio.py +299 -299
- package/core/mcp/tool_registry.py +2699 -2345
- package/core/memento.py +275 -0
- package/core/memory_efficient_text_widget.py +711 -711
- package/core/migration_manager.py +914 -914
- package/core/migration_test_suite.py +1085 -1085
- package/core/migration_validator.py +1143 -1143
- package/core/optimized_find_replace.py +714 -714
- package/core/optimized_pattern_engine.py +424 -424
- package/core/optimized_search_highlighter.py +552 -552
- package/core/performance_monitor.py +674 -674
- package/core/persistence_manager.py +712 -712
- package/core/progressive_stats_calculator.py +632 -632
- package/core/regex_pattern_cache.py +529 -529
- package/core/regex_pattern_library.py +350 -350
- package/core/search_operation_manager.py +434 -434
- package/core/settings_defaults_registry.py +1087 -1087
- package/core/settings_integrity_validator.py +1111 -1111
- package/core/settings_serializer.py +557 -557
- package/core/settings_validator.py +1823 -1823
- package/core/smart_stats_calculator.py +709 -709
- package/core/statistics_update_manager.py +619 -619
- package/core/stats_config_manager.py +858 -858
- package/core/streaming_text_handler.py +723 -723
- package/core/task_scheduler.py +596 -596
- package/core/update_pattern_library.py +168 -168
- package/core/visibility_monitor.py +596 -596
- package/core/widget_cache.py +498 -498
- package/mcp.json +51 -61
- package/migrate_data.py +127 -0
- package/package.json +64 -57
- package/pomera.py +7883 -7482
- package/pomera_mcp_server.py +183 -144
- package/requirements.txt +33 -0
- package/scripts/Dockerfile.alpine +43 -0
- package/scripts/Dockerfile.gui-test +54 -0
- package/scripts/Dockerfile.linux +43 -0
- package/scripts/Dockerfile.test-linux +80 -0
- package/scripts/Dockerfile.ubuntu +39 -0
- package/scripts/README.md +53 -0
- package/scripts/build-all.bat +113 -0
- package/scripts/build-docker.bat +53 -0
- package/scripts/build-docker.sh +55 -0
- package/scripts/build-optimized.bat +101 -0
- package/scripts/build.sh +78 -0
- package/scripts/docker-compose.test.yml +27 -0
- package/scripts/docker-compose.yml +32 -0
- package/scripts/postinstall.js +62 -0
- package/scripts/requirements-minimal.txt +33 -0
- package/scripts/test-linux-simple.bat +28 -0
- package/scripts/validate-release-workflow.py +450 -0
- package/tools/__init__.py +4 -4
- package/tools/ai_tools.py +2891 -2891
- package/tools/ascii_art_generator.py +352 -352
- package/tools/base64_tools.py +183 -183
- package/tools/base_tool.py +511 -511
- package/tools/case_tool.py +308 -308
- package/tools/column_tools.py +395 -395
- package/tools/cron_tool.py +884 -884
- package/tools/curl_history.py +600 -600
- package/tools/curl_processor.py +1207 -1207
- package/tools/curl_settings.py +502 -502
- package/tools/curl_tool.py +5467 -5467
- package/tools/diff_viewer.py +1817 -1072
- package/tools/email_extraction_tool.py +248 -248
- package/tools/email_header_analyzer.py +425 -425
- package/tools/extraction_tools.py +250 -250
- package/tools/find_replace.py +2289 -1750
- package/tools/folder_file_reporter.py +1463 -1463
- package/tools/folder_file_reporter_adapter.py +480 -480
- package/tools/generator_tools.py +1216 -1216
- package/tools/hash_generator.py +255 -255
- package/tools/html_tool.py +656 -656
- package/tools/jsonxml_tool.py +729 -729
- package/tools/line_tools.py +419 -419
- package/tools/markdown_tools.py +561 -561
- package/tools/mcp_widget.py +1417 -1417
- package/tools/notes_widget.py +978 -973
- package/tools/number_base_converter.py +372 -372
- package/tools/regex_extractor.py +571 -571
- package/tools/slug_generator.py +310 -310
- package/tools/sorter_tools.py +458 -458
- package/tools/string_escape_tool.py +392 -392
- package/tools/text_statistics_tool.py +365 -365
- package/tools/text_wrapper.py +430 -430
- package/tools/timestamp_converter.py +421 -421
- package/tools/tool_loader.py +710 -710
- package/tools/translator_tools.py +522 -522
- package/tools/url_link_extractor.py +261 -261
- package/tools/url_parser.py +204 -204
- package/tools/whitespace_tools.py +355 -355
- package/tools/word_frequency_counter.py +146 -146
- package/core/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/__pycache__/app_context.cpython-313.pyc +0 -0
- package/core/__pycache__/async_text_processor.cpython-313.pyc +0 -0
- package/core/__pycache__/backup_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/backup_recovery_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/content_hash_cache.cpython-313.pyc +0 -0
- package/core/__pycache__/context_menu.cpython-313.pyc +0 -0
- package/core/__pycache__/data_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/database_connection_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_curl_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_promera_ai_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_schema.cpython-313.pyc +0 -0
- package/core/__pycache__/database_schema_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_settings_manager_interface.cpython-313.pyc +0 -0
- package/core/__pycache__/dialog_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/efficient_line_numbers.cpython-313.pyc +0 -0
- package/core/__pycache__/error_handler.cpython-313.pyc +0 -0
- package/core/__pycache__/error_service.cpython-313.pyc +0 -0
- package/core/__pycache__/event_consolidator.cpython-313.pyc +0 -0
- package/core/__pycache__/memory_efficient_text_widget.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_test_suite.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_find_replace.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_pattern_engine.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_search_highlighter.cpython-313.pyc +0 -0
- package/core/__pycache__/performance_monitor.cpython-313.pyc +0 -0
- package/core/__pycache__/persistence_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/progressive_stats_calculator.cpython-313.pyc +0 -0
- package/core/__pycache__/regex_pattern_cache.cpython-313.pyc +0 -0
- package/core/__pycache__/regex_pattern_library.cpython-313.pyc +0 -0
- package/core/__pycache__/search_operation_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_defaults_registry.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_integrity_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_serializer.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/smart_stats_calculator.cpython-313.pyc +0 -0
- package/core/__pycache__/statistics_update_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/stats_config_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/streaming_text_handler.cpython-313.pyc +0 -0
- package/core/__pycache__/task_scheduler.cpython-313.pyc +0 -0
- package/core/__pycache__/visibility_monitor.cpython-313.pyc +0 -0
- package/core/__pycache__/widget_cache.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/protocol.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/schema.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/server_stdio.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/tool_registry.cpython-313.pyc +0 -0
- package/tools/__pycache__/__init__.cpython-313.pyc +0 -0
- package/tools/__pycache__/ai_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/ascii_art_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/base64_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/base_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/case_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/column_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/cron_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_history.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_processor.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_settings.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/diff_viewer.cpython-313.pyc +0 -0
- package/tools/__pycache__/email_extraction_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/email_header_analyzer.cpython-313.pyc +0 -0
- package/tools/__pycache__/extraction_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/find_replace.cpython-313.pyc +0 -0
- package/tools/__pycache__/folder_file_reporter.cpython-313.pyc +0 -0
- package/tools/__pycache__/folder_file_reporter_adapter.cpython-313.pyc +0 -0
- package/tools/__pycache__/generator_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/hash_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/html_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/huggingface_helper.cpython-313.pyc +0 -0
- package/tools/__pycache__/jsonxml_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/line_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/list_comparator.cpython-313.pyc +0 -0
- package/tools/__pycache__/markdown_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/mcp_widget.cpython-313.pyc +0 -0
- package/tools/__pycache__/notes_widget.cpython-313.pyc +0 -0
- package/tools/__pycache__/number_base_converter.cpython-313.pyc +0 -0
- package/tools/__pycache__/regex_extractor.cpython-313.pyc +0 -0
- package/tools/__pycache__/slug_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/sorter_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/string_escape_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/text_statistics_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/text_wrapper.cpython-313.pyc +0 -0
- package/tools/__pycache__/timestamp_converter.cpython-313.pyc +0 -0
- package/tools/__pycache__/tool_loader.cpython-313.pyc +0 -0
- package/tools/__pycache__/translator_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/url_link_extractor.cpython-313.pyc +0 -0
- package/tools/__pycache__/url_parser.cpython-313.pyc +0 -0
- package/tools/__pycache__/whitespace_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/word_frequency_counter.cpython-313.pyc +0 -0
|
@@ -1,1086 +1,1086 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Comprehensive Migration Test Suite
|
|
3
|
-
|
|
4
|
-
This module provides comprehensive test cases for all 15+ tool configurations
|
|
5
|
-
identified in the production codebase analysis. It includes automated testing
|
|
6
|
-
for data integrity, edge cases, and migration validation.
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
import json
|
|
10
|
-
import os
|
|
11
|
-
import tempfile
|
|
12
|
-
import unittest
|
|
13
|
-
import logging
|
|
14
|
-
from typing import Dict, List, Any, Optional
|
|
15
|
-
from datetime import datetime
|
|
16
|
-
|
|
17
|
-
from .migration_manager import MigrationManager
|
|
18
|
-
from .migration_validator import MigrationValidator, ValidationResult
|
|
19
|
-
from .database_connection_manager import DatabaseConnectionManager
|
|
20
|
-
from .database_schema_manager import DatabaseSchemaManager
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class MigrationTestSuite:
|
|
24
|
-
"""
|
|
25
|
-
Comprehensive test suite for migration system validation.
|
|
26
|
-
|
|
27
|
-
Features:
|
|
28
|
-
- Tests for all 15+ production tool configurations
|
|
29
|
-
- Edge case testing with malformed data
|
|
30
|
-
- Performance testing with large datasets
|
|
31
|
-
- Rollback and recovery testing
|
|
32
|
-
- Data integrity validation
|
|
33
|
-
"""
|
|
34
|
-
|
|
35
|
-
def __init__(self):
|
|
36
|
-
"""Initialize the test suite."""
|
|
37
|
-
self.logger = logging.getLogger(__name__)
|
|
38
|
-
self.test_results = []
|
|
39
|
-
self.temp_files = []
|
|
40
|
-
self.test_db_path = ":memory:"
|
|
41
|
-
|
|
42
|
-
# Initialize components
|
|
43
|
-
self.connection_manager = None
|
|
44
|
-
self.schema_manager = None
|
|
45
|
-
self.migration_manager = None
|
|
46
|
-
self.validator = None
|
|
47
|
-
|
|
48
|
-
def setup_test_environment(self) -> bool:
|
|
49
|
-
"""
|
|
50
|
-
Setup test environment with database and migration components.
|
|
51
|
-
|
|
52
|
-
Returns:
|
|
53
|
-
True if setup successful, False otherwise
|
|
54
|
-
"""
|
|
55
|
-
try:
|
|
56
|
-
# Initialize database components
|
|
57
|
-
self.connection_manager = DatabaseConnectionManager(self.test_db_path)
|
|
58
|
-
self.schema_manager = DatabaseSchemaManager(self.connection_manager)
|
|
59
|
-
self.migration_manager = MigrationManager(self.connection_manager)
|
|
60
|
-
self.validator = MigrationValidator(self.migration_manager)
|
|
61
|
-
|
|
62
|
-
# Initialize database schema
|
|
63
|
-
schema_success = self.schema_manager.initialize_schema()
|
|
64
|
-
if not schema_success:
|
|
65
|
-
self.logger.error("Failed to initialize test database schema")
|
|
66
|
-
return False
|
|
67
|
-
|
|
68
|
-
self.logger.info("Test environment setup completed")
|
|
69
|
-
return True
|
|
70
|
-
|
|
71
|
-
except Exception as e:
|
|
72
|
-
self.logger.error(f"Test environment setup failed: {e}")
|
|
73
|
-
return False
|
|
74
|
-
|
|
75
|
-
def run_all_tests(self) -> Dict[str, Any]:
|
|
76
|
-
"""
|
|
77
|
-
Run all migration tests and return comprehensive results.
|
|
78
|
-
|
|
79
|
-
Returns:
|
|
80
|
-
Dictionary with test results and summary
|
|
81
|
-
"""
|
|
82
|
-
if not self.setup_test_environment():
|
|
83
|
-
return {'success': False, 'error': 'Test environment setup failed'}
|
|
84
|
-
|
|
85
|
-
try:
|
|
86
|
-
self.logger.info("Starting comprehensive migration test suite")
|
|
87
|
-
|
|
88
|
-
# Test categories
|
|
89
|
-
test_categories = [
|
|
90
|
-
('tool_configurations', self.test_all_tool_configurations),
|
|
91
|
-
('edge_cases', self.test_edge_cases),
|
|
92
|
-
('performance', self.test_performance_scenarios),
|
|
93
|
-
('rollback_procedures', self.test_rollback_procedures),
|
|
94
|
-
('data_integrity', self.test_data_integrity),
|
|
95
|
-
('concurrent_access', self.test_concurrent_access),
|
|
96
|
-
('large_datasets', self.test_large_datasets),
|
|
97
|
-
('unicode_support', self.test_unicode_support),
|
|
98
|
-
('encrypted_data', self.test_encrypted_data_handling),
|
|
99
|
-
('schema_validation', self.test_schema_validation)
|
|
100
|
-
]
|
|
101
|
-
|
|
102
|
-
results = {
|
|
103
|
-
'test_summary': {
|
|
104
|
-
'total_categories': len(test_categories),
|
|
105
|
-
'start_time': datetime.now().isoformat(),
|
|
106
|
-
'passed_categories': 0,
|
|
107
|
-
'failed_categories': 0
|
|
108
|
-
},
|
|
109
|
-
'category_results': {},
|
|
110
|
-
'overall_success': True
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
# Run each test category
|
|
114
|
-
for category_name, test_function in test_categories:
|
|
115
|
-
try:
|
|
116
|
-
self.logger.info(f"Running test category: {category_name}")
|
|
117
|
-
category_result = test_function()
|
|
118
|
-
|
|
119
|
-
results['category_results'][category_name] = category_result
|
|
120
|
-
|
|
121
|
-
if category_result.get('success', False):
|
|
122
|
-
results['test_summary']['passed_categories'] += 1
|
|
123
|
-
else:
|
|
124
|
-
results['test_summary']['failed_categories'] += 1
|
|
125
|
-
results['overall_success'] = False
|
|
126
|
-
|
|
127
|
-
except Exception as e:
|
|
128
|
-
self.logger.error(f"Test category {category_name} failed with exception: {e}")
|
|
129
|
-
results['category_results'][category_name] = {
|
|
130
|
-
'success': False,
|
|
131
|
-
'error': str(e),
|
|
132
|
-
'exception': True
|
|
133
|
-
}
|
|
134
|
-
results['test_summary']['failed_categories'] += 1
|
|
135
|
-
results['overall_success'] = False
|
|
136
|
-
|
|
137
|
-
results['test_summary']['end_time'] = datetime.now().isoformat()
|
|
138
|
-
|
|
139
|
-
self.logger.info("Comprehensive migration test suite completed")
|
|
140
|
-
return results
|
|
141
|
-
|
|
142
|
-
except Exception as e:
|
|
143
|
-
self.logger.error(f"Test suite execution failed: {e}")
|
|
144
|
-
return {
|
|
145
|
-
'success': False,
|
|
146
|
-
'error': str(e),
|
|
147
|
-
'test_summary': {'total_categories': 0, 'passed_categories': 0, 'failed_categories': 0}
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
finally:
|
|
151
|
-
self.cleanup_test_environment()
|
|
152
|
-
|
|
153
|
-
def test_all_tool_configurations(self) -> Dict[str, Any]:
|
|
154
|
-
"""Test migration for all known tool configurations."""
|
|
155
|
-
try:
|
|
156
|
-
self.logger.info("Testing all tool configurations")
|
|
157
|
-
|
|
158
|
-
# Production tool configurations based on analysis
|
|
159
|
-
tool_configs = self._get_production_tool_configurations()
|
|
160
|
-
|
|
161
|
-
results = {
|
|
162
|
-
'success': True,
|
|
163
|
-
'total_tools': len(tool_configs),
|
|
164
|
-
'passed_tools': 0,
|
|
165
|
-
'failed_tools': 0,
|
|
166
|
-
'tool_results': {}
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
for tool_name, tool_config in tool_configs.items():
|
|
170
|
-
try:
|
|
171
|
-
tool_result = self._test_single_tool_migration(tool_name, tool_config)
|
|
172
|
-
results['tool_results'][tool_name] = tool_result
|
|
173
|
-
|
|
174
|
-
if tool_result['success']:
|
|
175
|
-
results['passed_tools'] += 1
|
|
176
|
-
else:
|
|
177
|
-
results['failed_tools'] += 1
|
|
178
|
-
results['success'] = False
|
|
179
|
-
|
|
180
|
-
except Exception as e:
|
|
181
|
-
self.logger.error(f"Tool test failed for {tool_name}: {e}")
|
|
182
|
-
results['tool_results'][tool_name] = {
|
|
183
|
-
'success': False,
|
|
184
|
-
'error': str(e)
|
|
185
|
-
}
|
|
186
|
-
results['failed_tools'] += 1
|
|
187
|
-
results['success'] = False
|
|
188
|
-
|
|
189
|
-
return results
|
|
190
|
-
|
|
191
|
-
except Exception as e:
|
|
192
|
-
return {'success': False, 'error': str(e)}
|
|
193
|
-
|
|
194
|
-
def test_edge_cases(self) -> Dict[str, Any]:
|
|
195
|
-
"""Test migration with various edge cases and malformed data."""
|
|
196
|
-
try:
|
|
197
|
-
self.logger.info("Testing edge cases")
|
|
198
|
-
|
|
199
|
-
edge_cases = [
|
|
200
|
-
('empty_settings', {}),
|
|
201
|
-
('null_values', {'tool_settings': {'Test Tool': None}}),
|
|
202
|
-
('missing_required_fields', {'tool_settings': {}}),
|
|
203
|
-
('invalid_json_structure', {'tool_settings': 'invalid'}),
|
|
204
|
-
('circular_references', self._create_circular_reference_data()),
|
|
205
|
-
('extremely_nested', self._create_deeply_nested_data()),
|
|
206
|
-
('special_characters', self._create_special_characters_data()),
|
|
207
|
-
('large_strings', self._create_large_strings_data()),
|
|
208
|
-
('invalid_unicode', self._create_invalid_unicode_data()),
|
|
209
|
-
('mixed_data_types', self._create_mixed_types_data())
|
|
210
|
-
]
|
|
211
|
-
|
|
212
|
-
results = {
|
|
213
|
-
'success': True,
|
|
214
|
-
'total_cases': len(edge_cases),
|
|
215
|
-
'passed_cases': 0,
|
|
216
|
-
'failed_cases': 0,
|
|
217
|
-
'case_results': {}
|
|
218
|
-
}
|
|
219
|
-
|
|
220
|
-
for case_name, test_data in edge_cases:
|
|
221
|
-
try:
|
|
222
|
-
case_result = self._test_edge_case_migration(case_name, test_data)
|
|
223
|
-
results['case_results'][case_name] = case_result
|
|
224
|
-
|
|
225
|
-
# For edge cases, we expect some to fail gracefully
|
|
226
|
-
if case_result.get('handled_gracefully', False):
|
|
227
|
-
results['passed_cases'] += 1
|
|
228
|
-
else:
|
|
229
|
-
results['failed_cases'] += 1
|
|
230
|
-
# Don't mark overall as failed for expected edge case failures
|
|
231
|
-
|
|
232
|
-
except Exception as e:
|
|
233
|
-
self.logger.warning(f"Edge case {case_name} caused exception (may be expected): {e}")
|
|
234
|
-
results['case_results'][case_name] = {
|
|
235
|
-
'handled_gracefully': True,
|
|
236
|
-
'exception': str(e)
|
|
237
|
-
}
|
|
238
|
-
results['passed_cases'] += 1
|
|
239
|
-
|
|
240
|
-
return results
|
|
241
|
-
|
|
242
|
-
except Exception as e:
|
|
243
|
-
return {'success': False, 'error': str(e)}
|
|
244
|
-
|
|
245
|
-
def test_performance_scenarios(self) -> Dict[str, Any]:
|
|
246
|
-
"""Test migration performance with various data sizes."""
|
|
247
|
-
try:
|
|
248
|
-
self.logger.info("Testing performance scenarios")
|
|
249
|
-
|
|
250
|
-
performance_tests = [
|
|
251
|
-
('small_dataset', 10, 100), # 10 tools, 100 settings each
|
|
252
|
-
('medium_dataset', 50, 200), # 50 tools, 200 settings each
|
|
253
|
-
('large_dataset', 100, 500), # 100 tools, 500 settings each
|
|
254
|
-
('xlarge_dataset', 200, 1000), # 200 tools, 1000 settings each
|
|
255
|
-
('stress_test', 500, 2000) # 500 tools, 2000 settings each
|
|
256
|
-
]
|
|
257
|
-
|
|
258
|
-
results = {
|
|
259
|
-
'success': True,
|
|
260
|
-
'total_tests': len(performance_tests),
|
|
261
|
-
'passed_tests': 0,
|
|
262
|
-
'failed_tests': 0,
|
|
263
|
-
'performance_results': {}
|
|
264
|
-
}
|
|
265
|
-
|
|
266
|
-
for test_name, tool_count, settings_per_tool in performance_tests:
|
|
267
|
-
try:
|
|
268
|
-
perf_result = self._test_performance_scenario(test_name, tool_count, settings_per_tool)
|
|
269
|
-
results['performance_results'][test_name] = perf_result
|
|
270
|
-
|
|
271
|
-
if perf_result['success']:
|
|
272
|
-
results['passed_tests'] += 1
|
|
273
|
-
else:
|
|
274
|
-
results['failed_tests'] += 1
|
|
275
|
-
results['success'] = False
|
|
276
|
-
|
|
277
|
-
except Exception as e:
|
|
278
|
-
self.logger.error(f"Performance test {test_name} failed: {e}")
|
|
279
|
-
results['performance_results'][test_name] = {
|
|
280
|
-
'success': False,
|
|
281
|
-
'error': str(e)
|
|
282
|
-
}
|
|
283
|
-
results['failed_tests'] += 1
|
|
284
|
-
results['success'] = False
|
|
285
|
-
|
|
286
|
-
return results
|
|
287
|
-
|
|
288
|
-
except Exception as e:
|
|
289
|
-
return {'success': False, 'error': str(e)}
|
|
290
|
-
|
|
291
|
-
def test_rollback_procedures(self) -> Dict[str, Any]:
|
|
292
|
-
"""Test rollback and recovery procedures."""
|
|
293
|
-
try:
|
|
294
|
-
self.logger.info("Testing rollback procedures")
|
|
295
|
-
|
|
296
|
-
# Create test data
|
|
297
|
-
test_data = self._get_sample_settings_data()
|
|
298
|
-
test_file = self._create_temp_json_file(test_data, "rollback_test.json")
|
|
299
|
-
|
|
300
|
-
results = {
|
|
301
|
-
'success': True,
|
|
302
|
-
'tests': {}
|
|
303
|
-
}
|
|
304
|
-
|
|
305
|
-
# Test 1: Backup creation
|
|
306
|
-
backup_success, backup_path = self.validator.create_automatic_backup(test_file)
|
|
307
|
-
results['tests']['backup_creation'] = {
|
|
308
|
-
'success': backup_success,
|
|
309
|
-
'backup_path': backup_path
|
|
310
|
-
}
|
|
311
|
-
|
|
312
|
-
if not backup_success:
|
|
313
|
-
results['success'] = False
|
|
314
|
-
return results
|
|
315
|
-
|
|
316
|
-
# Test 2: Migration with backup
|
|
317
|
-
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
318
|
-
results['tests']['migration_with_backup'] = {
|
|
319
|
-
'success': migration_success
|
|
320
|
-
}
|
|
321
|
-
|
|
322
|
-
# Test 3: Rollback procedure
|
|
323
|
-
if backup_path:
|
|
324
|
-
rollback_result = self.validator.test_rollback_procedures(backup_path)
|
|
325
|
-
results['tests']['rollback_procedure'] = {
|
|
326
|
-
'success': rollback_result.success,
|
|
327
|
-
'errors': rollback_result.errors,
|
|
328
|
-
'warnings': rollback_result.warnings
|
|
329
|
-
}
|
|
330
|
-
|
|
331
|
-
if not rollback_result.success:
|
|
332
|
-
results['success'] = False
|
|
333
|
-
|
|
334
|
-
return results
|
|
335
|
-
|
|
336
|
-
except Exception as e:
|
|
337
|
-
return {'success': False, 'error': str(e)}
|
|
338
|
-
|
|
339
|
-
def test_data_integrity(self) -> Dict[str, Any]:
|
|
340
|
-
"""Test data integrity through complete migration cycles."""
|
|
341
|
-
try:
|
|
342
|
-
self.logger.info("Testing data integrity")
|
|
343
|
-
|
|
344
|
-
# Test with production-like data
|
|
345
|
-
test_data = self._get_comprehensive_test_data()
|
|
346
|
-
test_file = self._create_temp_json_file(test_data, "integrity_test.json")
|
|
347
|
-
|
|
348
|
-
# Perform comprehensive validation
|
|
349
|
-
validation_result = self.validator.validate_complete_migration(test_file)
|
|
350
|
-
|
|
351
|
-
return {
|
|
352
|
-
'success': validation_result.success,
|
|
353
|
-
'errors': validation_result.errors,
|
|
354
|
-
'warnings': validation_result.warnings,
|
|
355
|
-
'details': validation_result.details
|
|
356
|
-
}
|
|
357
|
-
|
|
358
|
-
except Exception as e:
|
|
359
|
-
return {'success': False, 'error': str(e)}
|
|
360
|
-
|
|
361
|
-
def test_concurrent_access(self) -> Dict[str, Any]:
|
|
362
|
-
"""Test concurrent access scenarios."""
|
|
363
|
-
try:
|
|
364
|
-
self.logger.info("Testing concurrent access")
|
|
365
|
-
|
|
366
|
-
# For this implementation, we'll test basic concurrent operations
|
|
367
|
-
# In a full implementation, this would use threading
|
|
368
|
-
|
|
369
|
-
results = {
|
|
370
|
-
'success': True,
|
|
371
|
-
'concurrent_operations': 0,
|
|
372
|
-
'failed_operations': 0
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
# Simulate multiple operations
|
|
376
|
-
test_data = self._get_sample_settings_data()
|
|
377
|
-
|
|
378
|
-
for i in range(5):
|
|
379
|
-
try:
|
|
380
|
-
# Create separate test file for each operation
|
|
381
|
-
test_file = self._create_temp_json_file(test_data, f"concurrent_test_{i}.json")
|
|
382
|
-
|
|
383
|
-
# Perform migration
|
|
384
|
-
success = self.migration_manager.migrate_from_json(test_file)
|
|
385
|
-
|
|
386
|
-
if success:
|
|
387
|
-
results['concurrent_operations'] += 1
|
|
388
|
-
else:
|
|
389
|
-
results['failed_operations'] += 1
|
|
390
|
-
results['success'] = False
|
|
391
|
-
|
|
392
|
-
except Exception as e:
|
|
393
|
-
self.logger.error(f"Concurrent operation {i} failed: {e}")
|
|
394
|
-
results['failed_operations'] += 1
|
|
395
|
-
results['success'] = False
|
|
396
|
-
|
|
397
|
-
return results
|
|
398
|
-
|
|
399
|
-
except Exception as e:
|
|
400
|
-
return {'success': False, 'error': str(e)}
|
|
401
|
-
|
|
402
|
-
def test_large_datasets(self) -> Dict[str, Any]:
|
|
403
|
-
"""Test migration with large datasets."""
|
|
404
|
-
try:
|
|
405
|
-
self.logger.info("Testing large datasets")
|
|
406
|
-
|
|
407
|
-
# Create large dataset
|
|
408
|
-
large_data = self._create_large_dataset(1000, 5000) # 1000 tools, 5000 settings each
|
|
409
|
-
test_file = self._create_temp_json_file(large_data, "large_dataset_test.json")
|
|
410
|
-
|
|
411
|
-
start_time = datetime.now()
|
|
412
|
-
|
|
413
|
-
# Test migration
|
|
414
|
-
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
415
|
-
|
|
416
|
-
migration_time = (datetime.now() - start_time).total_seconds()
|
|
417
|
-
|
|
418
|
-
return {
|
|
419
|
-
'success': migration_success,
|
|
420
|
-
'dataset_size': len(json.dumps(large_data)),
|
|
421
|
-
'tool_count': len(large_data.get('tool_settings', {})),
|
|
422
|
-
'migration_time': migration_time,
|
|
423
|
-
'performance_acceptable': migration_time < 60 # Should complete within 60 seconds
|
|
424
|
-
}
|
|
425
|
-
|
|
426
|
-
except Exception as e:
|
|
427
|
-
return {'success': False, 'error': str(e)}
|
|
428
|
-
|
|
429
|
-
def test_unicode_support(self) -> Dict[str, Any]:
|
|
430
|
-
"""Test Unicode and international character support."""
|
|
431
|
-
try:
|
|
432
|
-
self.logger.info("Testing Unicode support")
|
|
433
|
-
|
|
434
|
-
unicode_data = {
|
|
435
|
-
'export_path': 'тест/测试/テスト',
|
|
436
|
-
'tool_settings': {
|
|
437
|
-
'Unicode Tool 中文': {
|
|
438
|
-
'name': 'Тестовый инструмент',
|
|
439
|
-
'description': 'ツールの説明',
|
|
440
|
-
'emoji_settings': '🚀🔧⚙️🛠️',
|
|
441
|
-
'special_chars': '©®™€£¥§¶•‰',
|
|
442
|
-
'math_symbols': '∑∏∆∇∂∫√∞',
|
|
443
|
-
'arrows': '←→↑↓↔↕⇄⇅',
|
|
444
|
-
'multilingual': {
|
|
445
|
-
'english': 'Hello World',
|
|
446
|
-
'chinese': '你好世界',
|
|
447
|
-
'japanese': 'こんにちは世界',
|
|
448
|
-
'russian': 'Привет мир',
|
|
449
|
-
'arabic': 'مرحبا بالعالم',
|
|
450
|
-
'hebrew': 'שלום עולם'
|
|
451
|
-
}
|
|
452
|
-
}
|
|
453
|
-
}
|
|
454
|
-
}
|
|
455
|
-
|
|
456
|
-
test_file = self._create_temp_json_file(unicode_data, "unicode_test.json")
|
|
457
|
-
|
|
458
|
-
# Test migration
|
|
459
|
-
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
460
|
-
|
|
461
|
-
if not migration_success:
|
|
462
|
-
return {'success': False, 'error': 'Unicode migration failed'}
|
|
463
|
-
|
|
464
|
-
# Test reverse migration
|
|
465
|
-
reverse_file = self._create_temp_file_path("unicode_reverse.json")
|
|
466
|
-
reverse_success = self.migration_manager.migrate_to_json(reverse_file)
|
|
467
|
-
|
|
468
|
-
if not reverse_success:
|
|
469
|
-
return {'success': False, 'error': 'Unicode reverse migration failed'}
|
|
470
|
-
|
|
471
|
-
# Validate Unicode preservation
|
|
472
|
-
with open(reverse_file, 'r', encoding='utf-8') as f:
|
|
473
|
-
restored_data = json.load(f)
|
|
474
|
-
|
|
475
|
-
unicode_preserved = self._compare_unicode_data(unicode_data, restored_data)
|
|
476
|
-
|
|
477
|
-
return {
|
|
478
|
-
'success': unicode_preserved,
|
|
479
|
-
'migration_success': migration_success,
|
|
480
|
-
'reverse_migration_success': reverse_success,
|
|
481
|
-
'unicode_preserved': unicode_preserved
|
|
482
|
-
}
|
|
483
|
-
|
|
484
|
-
except Exception as e:
|
|
485
|
-
return {'success': False, 'error': str(e)}
|
|
486
|
-
|
|
487
|
-
def test_encrypted_data_handling(self) -> Dict[str, Any]:
|
|
488
|
-
"""Test handling of encrypted data (API keys with ENC: prefix)."""
|
|
489
|
-
try:
|
|
490
|
-
self.logger.info("Testing encrypted data handling")
|
|
491
|
-
|
|
492
|
-
encrypted_data = {
|
|
493
|
-
'export_path': 'test',
|
|
494
|
-
'tool_settings': {
|
|
495
|
-
'Encrypted Tool': {
|
|
496
|
-
'API_KEY': 'ENC:dGVzdF9lbmNyeXB0ZWRfa2V5X3ZhbHVl',
|
|
497
|
-
'SECRET_TOKEN': 'ENC:YW5vdGhlcl9lbmNyeXB0ZWRfc2VjcmV0',
|
|
498
|
-
'normal_setting': 'plain_text_value',
|
|
499
|
-
'nested_encrypted': {
|
|
500
|
-
'PRIVATE_KEY': 'ENC:cHJpdmF0ZV9rZXlfZW5jcnlwdGVk',
|
|
501
|
-
'public_setting': 'public_value'
|
|
502
|
-
}
|
|
503
|
-
},
|
|
504
|
-
'AWS Bedrock': {
|
|
505
|
-
'API_KEY': 'ENC:Z0FBQUFBQm81ZEI4alg1a2UzU1ZUWXc3VWVacjhxUS1IUDhvV1RyM1FGSU85ZTNZWlZQbnRLZGI0aUxxOUJKSU02aGxIbG9tNGlienFhWHE2cVdCWERkc0R1MEZLd3hGTW9Pa3oyYjBZRmNtTUJnVzdfdUNfRjlXSkI2ZFRUS1dYR3BBM0FraVJlREk3NlUtUmhQWl9Md1VQRTluNDk5dUo1NmxBX3JZSWtYWTQyQjhtQzh6NGlSdk1ZcnlIbEx1TnBLUi1Ua0R1d1hPWWo4X1V2MG92c1JRaDBoY25EcVFZRjZGV2ZGeXBObk8xQTJlVTRjUHdhbkE0Z3d0VkVIUHhJRkpfMGV1X21hWA==',
|
|
506
|
-
'MODEL': 'anthropic.claude-3-5-sonnet-20240620-v1:0'
|
|
507
|
-
}
|
|
508
|
-
}
|
|
509
|
-
}
|
|
510
|
-
|
|
511
|
-
test_file = self._create_temp_json_file(encrypted_data, "encrypted_test.json")
|
|
512
|
-
|
|
513
|
-
# Test migration
|
|
514
|
-
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
515
|
-
|
|
516
|
-
if not migration_success:
|
|
517
|
-
return {'success': False, 'error': 'Encrypted data migration failed'}
|
|
518
|
-
|
|
519
|
-
# Test reverse migration
|
|
520
|
-
reverse_file = self._create_temp_file_path("encrypted_reverse.json")
|
|
521
|
-
reverse_success = self.migration_manager.migrate_to_json(reverse_file)
|
|
522
|
-
|
|
523
|
-
if not reverse_success:
|
|
524
|
-
return {'success': False, 'error': 'Encrypted data reverse migration failed'}
|
|
525
|
-
|
|
526
|
-
# Validate encrypted data preservation
|
|
527
|
-
with open(reverse_file, 'r', encoding='utf-8') as f:
|
|
528
|
-
restored_data = json.load(f)
|
|
529
|
-
|
|
530
|
-
encrypted_preserved = self._validate_encrypted_data_preservation(encrypted_data, restored_data)
|
|
531
|
-
|
|
532
|
-
return {
|
|
533
|
-
'success': encrypted_preserved,
|
|
534
|
-
'migration_success': migration_success,
|
|
535
|
-
'reverse_migration_success': reverse_success,
|
|
536
|
-
'encrypted_data_preserved': encrypted_preserved
|
|
537
|
-
}
|
|
538
|
-
|
|
539
|
-
except Exception as e:
|
|
540
|
-
return {'success': False, 'error': str(e)}
|
|
541
|
-
|
|
542
|
-
def test_schema_validation(self) -> Dict[str, Any]:
|
|
543
|
-
"""Test database schema validation and integrity."""
|
|
544
|
-
try:
|
|
545
|
-
self.logger.info("Testing schema validation")
|
|
546
|
-
|
|
547
|
-
# Test schema validation
|
|
548
|
-
schema_valid = self.schema_manager.validate_schema()
|
|
549
|
-
|
|
550
|
-
# Get schema information
|
|
551
|
-
schema_info = self.schema_manager.get_schema_info()
|
|
552
|
-
|
|
553
|
-
# Test schema repair (if needed)
|
|
554
|
-
repair_success = True
|
|
555
|
-
if not schema_valid:
|
|
556
|
-
repair_success = self.schema_manager.repair_schema()
|
|
557
|
-
|
|
558
|
-
return {
|
|
559
|
-
'success': schema_valid and repair_success,
|
|
560
|
-
'schema_valid': schema_valid,
|
|
561
|
-
'repair_success': repair_success,
|
|
562
|
-
'schema_info': schema_info
|
|
563
|
-
}
|
|
564
|
-
|
|
565
|
-
except Exception as e:
|
|
566
|
-
return {'success': False, 'error': str(e)}
|
|
567
|
-
|
|
568
|
-
# Helper methods for test data generation
|
|
569
|
-
|
|
570
|
-
def _get_production_tool_configurations(self) -> Dict[str, Dict[str, Any]]:
|
|
571
|
-
"""Get production-like tool configurations for testing."""
|
|
572
|
-
return {
|
|
573
|
-
'Case Tool': {
|
|
574
|
-
'mode': 'Upper',
|
|
575
|
-
'exclusions': 'a\nan\nand\nas\nat\nbut\nby\nen\nfor\nif\nin\nis\nof\non\nor\nthe\nto\nvia\nvs'
|
|
576
|
-
},
|
|
577
|
-
'Base64 Encoder/Decoder': {
|
|
578
|
-
'mode': 'encode'
|
|
579
|
-
},
|
|
580
|
-
'JSON/XML Tool': {
|
|
581
|
-
'operation': 'json_to_xml',
|
|
582
|
-
'json_indent': 2,
|
|
583
|
-
'xml_indent': 2,
|
|
584
|
-
'preserve_attributes': True,
|
|
585
|
-
'sort_keys': False,
|
|
586
|
-
'array_wrapper': 'item',
|
|
587
|
-
'root_element': 'root',
|
|
588
|
-
'jsonpath_query': '$',
|
|
589
|
-
'xpath_query': '//*'
|
|
590
|
-
},
|
|
591
|
-
'cURL Tool': {
|
|
592
|
-
'default_timeout': 90,
|
|
593
|
-
'follow_redirects': True,
|
|
594
|
-
'verify_ssl': False,
|
|
595
|
-
'max_redirects': 10,
|
|
596
|
-
'user_agent': 'Test Agent',
|
|
597
|
-
'save_history': True,
|
|
598
|
-
'max_history_items': 100,
|
|
599
|
-
'history': [
|
|
600
|
-
{
|
|
601
|
-
'timestamp': '2025-10-08T21:54:15.103533',
|
|
602
|
-
'method': 'POST',
|
|
603
|
-
'url': 'https://test.api.com/data',
|
|
604
|
-
'status_code': 201,
|
|
605
|
-
'response_time': 0.8,
|
|
606
|
-
'success': True,
|
|
607
|
-
'headers': {'Content-Type': 'application/json'},
|
|
608
|
-
'body': '{"test": "data"}',
|
|
609
|
-
'auth_type': 'Bearer Token',
|
|
610
|
-
'response_preview': '{"id": 123}',
|
|
611
|
-
'response_size': 50,
|
|
612
|
-
'content_type': 'application/json'
|
|
613
|
-
}
|
|
614
|
-
],
|
|
615
|
-
'collections': {}
|
|
616
|
-
},
|
|
617
|
-
'Generator Tools': {
|
|
618
|
-
'Strong Password Generator': {
|
|
619
|
-
'length': 20,
|
|
620
|
-
'numbers': '',
|
|
621
|
-
'symbols': '',
|
|
622
|
-
'letters_percent': 70,
|
|
623
|
-
'numbers_percent': 20,
|
|
624
|
-
'symbols_percent': 10
|
|
625
|
-
},
|
|
626
|
-
'UUID/GUID Generator': {
|
|
627
|
-
'version': 4,
|
|
628
|
-
'format': 'standard',
|
|
629
|
-
'case': 'lowercase',
|
|
630
|
-
'count': 1
|
|
631
|
-
}
|
|
632
|
-
},
|
|
633
|
-
'Google AI': {
|
|
634
|
-
'API_KEY': 'test_key',
|
|
635
|
-
'MODEL': 'gemini-1.5-pro-latest',
|
|
636
|
-
'MODELS_LIST': ['gemini-1.5-pro-latest', 'gemini-1.5-flash-latest'],
|
|
637
|
-
'system_prompt': 'You are a helpful assistant.',
|
|
638
|
-
'temperature': 0.7,
|
|
639
|
-
'topK': 40,
|
|
640
|
-
'topP': 0.95,
|
|
641
|
-
'candidateCount': 1,
|
|
642
|
-
'maxOutputTokens': 8192
|
|
643
|
-
},
|
|
644
|
-
'Anthropic AI': {
|
|
645
|
-
'API_KEY': 'test_key',
|
|
646
|
-
'MODEL': 'claude-3-5-sonnet-20240620',
|
|
647
|
-
'MODELS_LIST': ['claude-3-5-sonnet-20240620', 'claude-3-opus-20240229'],
|
|
648
|
-
'system': 'You are a helpful assistant.',
|
|
649
|
-
'max_tokens': 4096,
|
|
650
|
-
'temperature': 0.7
|
|
651
|
-
},
|
|
652
|
-
'OpenAI': {
|
|
653
|
-
'API_KEY': 'test_key',
|
|
654
|
-
'MODEL': 'gpt-4o',
|
|
655
|
-
'MODELS_LIST': ['gpt-4o', 'gpt-4-turbo', 'gpt-3.5-turbo'],
|
|
656
|
-
'system_prompt': 'You are a helpful assistant.',
|
|
657
|
-
'temperature': 0.7,
|
|
658
|
-
'max_tokens': 4096
|
|
659
|
-
},
|
|
660
|
-
'Folder File Reporter': {
|
|
661
|
-
'last_input_folder': '/test/input',
|
|
662
|
-
'last_output_folder': '/test/output',
|
|
663
|
-
'field_selections': {
|
|
664
|
-
'path': False,
|
|
665
|
-
'name': True,
|
|
666
|
-
'size': True,
|
|
667
|
-
'date_modified': False
|
|
668
|
-
},
|
|
669
|
-
'separator': ' | ',
|
|
670
|
-
'folders_only': False,
|
|
671
|
-
'recursion_mode': 'full',
|
|
672
|
-
'size_format': 'human'
|
|
673
|
-
},
|
|
674
|
-
'Find & Replace Text': {
|
|
675
|
-
'find': '',
|
|
676
|
-
'replace': '',
|
|
677
|
-
'mode': 'Text',
|
|
678
|
-
'option': 'ignore_case',
|
|
679
|
-
'find_history': [],
|
|
680
|
-
'replace_history': []
|
|
681
|
-
}
|
|
682
|
-
}
|
|
683
|
-
|
|
684
|
-
def _get_sample_settings_data(self) -> Dict[str, Any]:
|
|
685
|
-
"""Get sample settings data for testing."""
|
|
686
|
-
return {
|
|
687
|
-
'export_path': 'C:\\Users\\Test\\Downloads',
|
|
688
|
-
'debug_level': 'DEBUG',
|
|
689
|
-
'selected_tool': 'Test Tool',
|
|
690
|
-
'active_input_tab': 0,
|
|
691
|
-
'active_output_tab': 0,
|
|
692
|
-
'input_tabs': ['test input'] + [''] * 6,
|
|
693
|
-
'output_tabs': ['test output'] + [''] * 6,
|
|
694
|
-
'tool_settings': {
|
|
695
|
-
'Test Tool': {
|
|
696
|
-
'setting1': 'value1',
|
|
697
|
-
'setting2': 42,
|
|
698
|
-
'setting3': True
|
|
699
|
-
}
|
|
700
|
-
}
|
|
701
|
-
}
|
|
702
|
-
|
|
703
|
-
def _get_comprehensive_test_data(self) -> Dict[str, Any]:
|
|
704
|
-
"""Get comprehensive test data including all major components."""
|
|
705
|
-
return {
|
|
706
|
-
'export_path': 'C:\\Users\\Test\\Downloads',
|
|
707
|
-
'debug_level': 'DEBUG',
|
|
708
|
-
'selected_tool': 'JSON/XML Tool',
|
|
709
|
-
'active_input_tab': 1,
|
|
710
|
-
'active_output_tab': 0,
|
|
711
|
-
'input_tabs': ['', 'test input', '', '', '', '', ''],
|
|
712
|
-
'output_tabs': ['test output', '', '', '', '', '', ''],
|
|
713
|
-
'tool_settings': self._get_production_tool_configurations(),
|
|
714
|
-
'performance_settings': {
|
|
715
|
-
'mode': 'automatic',
|
|
716
|
-
'async_processing': {
|
|
717
|
-
'enabled': True,
|
|
718
|
-
'threshold_kb': 10,
|
|
719
|
-
'max_workers': 2
|
|
720
|
-
},
|
|
721
|
-
'caching': {
|
|
722
|
-
'enabled': True,
|
|
723
|
-
'stats_cache_size': 1000
|
|
724
|
-
}
|
|
725
|
-
},
|
|
726
|
-
'font_settings': {
|
|
727
|
-
'text_font': {
|
|
728
|
-
'family': 'Source Code Pro',
|
|
729
|
-
'size': 11,
|
|
730
|
-
'fallback_family': 'Consolas'
|
|
731
|
-
}
|
|
732
|
-
},
|
|
733
|
-
'dialog_settings': {
|
|
734
|
-
'success': {
|
|
735
|
-
'enabled': False,
|
|
736
|
-
'description': 'Success notifications'
|
|
737
|
-
},
|
|
738
|
-
'error': {
|
|
739
|
-
'enabled': True,
|
|
740
|
-
'locked': True,
|
|
741
|
-
'description': 'Error messages'
|
|
742
|
-
}
|
|
743
|
-
}
|
|
744
|
-
}
|
|
745
|
-
|
|
746
|
-
def _test_single_tool_migration(self, tool_name: str, tool_config: Dict[str, Any]) -> Dict[str, Any]:
|
|
747
|
-
"""Test migration for a single tool configuration."""
|
|
748
|
-
try:
|
|
749
|
-
# Create test data with just this tool
|
|
750
|
-
test_data = {
|
|
751
|
-
'export_path': 'test',
|
|
752
|
-
'tool_settings': {tool_name: tool_config}
|
|
753
|
-
}
|
|
754
|
-
|
|
755
|
-
test_file = self._create_temp_json_file(test_data, f"tool_test_{tool_name.replace(' ', '_')}.json")
|
|
756
|
-
|
|
757
|
-
# Test migration
|
|
758
|
-
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
759
|
-
|
|
760
|
-
if not migration_success:
|
|
761
|
-
return {'success': False, 'error': 'Migration failed'}
|
|
762
|
-
|
|
763
|
-
# Test reverse migration
|
|
764
|
-
reverse_file = self._create_temp_file_path(f"tool_reverse_{tool_name.replace(' ', '_')}.json")
|
|
765
|
-
reverse_success = self.migration_manager.migrate_to_json(reverse_file)
|
|
766
|
-
|
|
767
|
-
if not reverse_success:
|
|
768
|
-
return {'success': False, 'error': 'Reverse migration failed'}
|
|
769
|
-
|
|
770
|
-
# Validate data integrity
|
|
771
|
-
with open(reverse_file, 'r', encoding='utf-8') as f:
|
|
772
|
-
restored_data = json.load(f)
|
|
773
|
-
|
|
774
|
-
tool_preserved = (tool_name in restored_data.get('tool_settings', {}) and
|
|
775
|
-
restored_data['tool_settings'][tool_name] == tool_config)
|
|
776
|
-
|
|
777
|
-
return {
|
|
778
|
-
'success': tool_preserved,
|
|
779
|
-
'migration_success': migration_success,
|
|
780
|
-
'reverse_migration_success': reverse_success,
|
|
781
|
-
'data_preserved': tool_preserved
|
|
782
|
-
}
|
|
783
|
-
|
|
784
|
-
except Exception as e:
|
|
785
|
-
return {'success': False, 'error': str(e)}
|
|
786
|
-
|
|
787
|
-
def _test_edge_case_migration(self, case_name: str, test_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
788
|
-
"""Test migration for an edge case scenario."""
|
|
789
|
-
try:
|
|
790
|
-
test_file = self._create_temp_json_file(test_data, f"edge_case_{case_name}.json")
|
|
791
|
-
|
|
792
|
-
# Attempt migration (may fail gracefully)
|
|
793
|
-
try:
|
|
794
|
-
migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
|
|
795
|
-
|
|
796
|
-
return {
|
|
797
|
-
'handled_gracefully': True,
|
|
798
|
-
'migration_success': migration_success,
|
|
799
|
-
'error': None
|
|
800
|
-
}
|
|
801
|
-
|
|
802
|
-
except Exception as migration_error:
|
|
803
|
-
# Edge cases may cause exceptions - this is acceptable
|
|
804
|
-
return {
|
|
805
|
-
'handled_gracefully': True,
|
|
806
|
-
'migration_success': False,
|
|
807
|
-
'error': str(migration_error),
|
|
808
|
-
'exception_handled': True
|
|
809
|
-
}
|
|
810
|
-
|
|
811
|
-
except Exception as e:
|
|
812
|
-
return {
|
|
813
|
-
'handled_gracefully': False,
|
|
814
|
-
'error': str(e)
|
|
815
|
-
}
|
|
816
|
-
|
|
817
|
-
def _test_performance_scenario(self, test_name: str, tool_count: int, settings_per_tool: int) -> Dict[str, Any]:
|
|
818
|
-
"""Test performance for a specific scenario."""
|
|
819
|
-
try:
|
|
820
|
-
# Generate performance test data
|
|
821
|
-
test_data = self._create_large_dataset(tool_count, settings_per_tool)
|
|
822
|
-
test_file = self._create_temp_json_file(test_data, f"perf_{test_name}.json")
|
|
823
|
-
|
|
824
|
-
# Measure migration performance
|
|
825
|
-
start_time = datetime.now()
|
|
826
|
-
migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
|
|
827
|
-
migration_time = (datetime.now() - start_time).total_seconds()
|
|
828
|
-
|
|
829
|
-
# Measure reverse migration performance
|
|
830
|
-
reverse_file = self._create_temp_file_path(f"perf_reverse_{test_name}.json")
|
|
831
|
-
start_time = datetime.now()
|
|
832
|
-
reverse_success = self.migration_manager.migrate_to_json(reverse_file)
|
|
833
|
-
reverse_time = (datetime.now() - start_time).total_seconds()
|
|
834
|
-
|
|
835
|
-
return {
|
|
836
|
-
'success': migration_success and reverse_success,
|
|
837
|
-
'tool_count': tool_count,
|
|
838
|
-
'settings_per_tool': settings_per_tool,
|
|
839
|
-
'total_settings': tool_count * settings_per_tool,
|
|
840
|
-
'migration_time': migration_time,
|
|
841
|
-
'reverse_time': reverse_time,
|
|
842
|
-
'total_time': migration_time + reverse_time,
|
|
843
|
-
'settings_per_second': (tool_count * settings_per_tool) / (migration_time + reverse_time) if (migration_time + reverse_time) > 0 else 0
|
|
844
|
-
}
|
|
845
|
-
|
|
846
|
-
except Exception as e:
|
|
847
|
-
return {'success': False, 'error': str(e)}
|
|
848
|
-
|
|
849
|
-
def _create_large_dataset(self, tool_count: int, settings_per_tool: int) -> Dict[str, Any]:
|
|
850
|
-
"""Create large dataset for performance testing."""
|
|
851
|
-
data = {
|
|
852
|
-
'export_path': 'test',
|
|
853
|
-
'input_tabs': [''] * 7,
|
|
854
|
-
'output_tabs': [''] * 7,
|
|
855
|
-
'tool_settings': {}
|
|
856
|
-
}
|
|
857
|
-
|
|
858
|
-
for i in range(tool_count):
|
|
859
|
-
tool_settings = {}
|
|
860
|
-
for j in range(settings_per_tool):
|
|
861
|
-
tool_settings[f'setting_{j}'] = f'value_{i}_{j}'
|
|
862
|
-
|
|
863
|
-
# Add some complex nested structures
|
|
864
|
-
tool_settings['nested'] = {
|
|
865
|
-
'level1': {
|
|
866
|
-
'level2': {
|
|
867
|
-
'array': [f'item_{k}' for k in range(10)],
|
|
868
|
-
'number': i * j if j > 0 else i,
|
|
869
|
-
'boolean': (i + j) % 2 == 0
|
|
870
|
-
}
|
|
871
|
-
}
|
|
872
|
-
}
|
|
873
|
-
|
|
874
|
-
data['tool_settings'][f'Tool_{i}'] = tool_settings
|
|
875
|
-
|
|
876
|
-
return data
|
|
877
|
-
|
|
878
|
-
def _create_circular_reference_data(self) -> Dict[str, Any]:
|
|
879
|
-
"""Create data with circular references (should be handled gracefully)."""
|
|
880
|
-
# Note: JSON doesn't support circular references, so this creates deeply nested structure
|
|
881
|
-
data = {'export_path': 'test', 'tool_settings': {}}
|
|
882
|
-
|
|
883
|
-
# Create deeply nested structure that might cause issues
|
|
884
|
-
nested = data
|
|
885
|
-
for i in range(100):
|
|
886
|
-
nested['next'] = {'level': i}
|
|
887
|
-
nested = nested['next']
|
|
888
|
-
|
|
889
|
-
return data
|
|
890
|
-
|
|
891
|
-
def _create_deeply_nested_data(self) -> Dict[str, Any]:
|
|
892
|
-
"""Create extremely nested data structure."""
|
|
893
|
-
data = {
|
|
894
|
-
'export_path': 'test',
|
|
895
|
-
'tool_settings': {
|
|
896
|
-
'Nested Tool': {}
|
|
897
|
-
}
|
|
898
|
-
}
|
|
899
|
-
|
|
900
|
-
# Create 50 levels of nesting
|
|
901
|
-
nested = data['tool_settings']['Nested Tool']
|
|
902
|
-
for i in range(50):
|
|
903
|
-
nested[f'level_{i}'] = {}
|
|
904
|
-
nested = nested[f'level_{i}']
|
|
905
|
-
|
|
906
|
-
nested['final_value'] = 'deep_value'
|
|
907
|
-
|
|
908
|
-
return data
|
|
909
|
-
|
|
910
|
-
def _create_special_characters_data(self) -> Dict[str, Any]:
|
|
911
|
-
"""Create data with special characters and edge cases."""
|
|
912
|
-
return {
|
|
913
|
-
'export_path': 'test',
|
|
914
|
-
'tool_settings': {
|
|
915
|
-
'Special Chars Tool': {
|
|
916
|
-
'null_bytes': 'test\x00null',
|
|
917
|
-
'control_chars': 'test\x01\x02\x03control',
|
|
918
|
-
'quotes': 'test"with\'quotes',
|
|
919
|
-
'backslashes': 'test\\with\\backslashes',
|
|
920
|
-
'newlines': 'test\nwith\nnewlines',
|
|
921
|
-
'tabs': 'test\twith\ttabs',
|
|
922
|
-
'unicode_escape': 'test\\u0041unicode'
|
|
923
|
-
}
|
|
924
|
-
}
|
|
925
|
-
}
|
|
926
|
-
|
|
927
|
-
def _create_large_strings_data(self) -> Dict[str, Any]:
|
|
928
|
-
"""Create data with very large string values."""
|
|
929
|
-
large_string = 'x' * 1000000 # 1MB string
|
|
930
|
-
|
|
931
|
-
return {
|
|
932
|
-
'export_path': 'test',
|
|
933
|
-
'tool_settings': {
|
|
934
|
-
'Large String Tool': {
|
|
935
|
-
'large_value': large_string,
|
|
936
|
-
'large_array': [large_string[:1000] for _ in range(1000)]
|
|
937
|
-
}
|
|
938
|
-
}
|
|
939
|
-
}
|
|
940
|
-
|
|
941
|
-
def _create_invalid_unicode_data(self) -> Dict[str, Any]:
|
|
942
|
-
"""Create data with potentially problematic Unicode."""
|
|
943
|
-
return {
|
|
944
|
-
'export_path': 'test',
|
|
945
|
-
'tool_settings': {
|
|
946
|
-
'Unicode Edge Cases': {
|
|
947
|
-
'surrogate_pairs': '𝕳𝖊𝖑𝖑𝖔 𝖂𝖔𝖗𝖑𝖉',
|
|
948
|
-
'combining_chars': 'e\u0301\u0302\u0303', # e with multiple combining marks
|
|
949
|
-
'rtl_text': 'مرحبا بالعالم',
|
|
950
|
-
'mixed_scripts': 'Hello世界Мир',
|
|
951
|
-
'zero_width': 'test\u200bzero\u200bwidth'
|
|
952
|
-
}
|
|
953
|
-
}
|
|
954
|
-
}
|
|
955
|
-
|
|
956
|
-
def _create_mixed_types_data(self) -> Dict[str, Any]:
|
|
957
|
-
"""Create data with mixed and edge case data types."""
|
|
958
|
-
return {
|
|
959
|
-
'export_path': 'test',
|
|
960
|
-
'tool_settings': {
|
|
961
|
-
'Mixed Types Tool': {
|
|
962
|
-
'string': 'test',
|
|
963
|
-
'integer': 42,
|
|
964
|
-
'float': 3.14159,
|
|
965
|
-
'boolean_true': True,
|
|
966
|
-
'boolean_false': False,
|
|
967
|
-
'null_value': None,
|
|
968
|
-
'empty_string': '',
|
|
969
|
-
'empty_array': [],
|
|
970
|
-
'empty_object': {},
|
|
971
|
-
'large_number': 9223372036854775807, # Max int64
|
|
972
|
-
'small_number': -9223372036854775808,
|
|
973
|
-
'scientific_notation': 1.23e-10
|
|
974
|
-
}
|
|
975
|
-
}
|
|
976
|
-
}
|
|
977
|
-
|
|
978
|
-
def _create_temp_json_file(self, data: Dict[str, Any], filename: str) -> str:
|
|
979
|
-
"""Create temporary JSON file with test data."""
|
|
980
|
-
temp_path = self._create_temp_file_path(filename)
|
|
981
|
-
|
|
982
|
-
with open(temp_path, 'w', encoding='utf-8') as f:
|
|
983
|
-
json.dump(data, f, indent=2, ensure_ascii=False)
|
|
984
|
-
|
|
985
|
-
self.temp_files.append(temp_path)
|
|
986
|
-
return temp_path
|
|
987
|
-
|
|
988
|
-
def _create_temp_file_path(self, filename: str) -> str:
|
|
989
|
-
"""Create temporary file path."""
|
|
990
|
-
temp_dir = tempfile.gettempdir()
|
|
991
|
-
return os.path.join(temp_dir, f"migration_test_{filename}")
|
|
992
|
-
|
|
993
|
-
def _compare_unicode_data(self, original: Dict[str, Any], restored: Dict[str, Any]) -> bool:
|
|
994
|
-
"""Compare Unicode data preservation."""
|
|
995
|
-
try:
|
|
996
|
-
# Convert to JSON strings for comparison
|
|
997
|
-
original_str = json.dumps(original, sort_keys=True, ensure_ascii=False)
|
|
998
|
-
restored_str = json.dumps(restored, sort_keys=True, ensure_ascii=False)
|
|
999
|
-
|
|
1000
|
-
return original_str == restored_str
|
|
1001
|
-
|
|
1002
|
-
except Exception:
|
|
1003
|
-
return False
|
|
1004
|
-
|
|
1005
|
-
def _validate_encrypted_data_preservation(self, original: Dict[str, Any], restored: Dict[str, Any]) -> bool:
|
|
1006
|
-
"""Validate that encrypted data (ENC: prefixed) is preserved."""
|
|
1007
|
-
try:
|
|
1008
|
-
def find_encrypted_values(data, path=""):
|
|
1009
|
-
encrypted = {}
|
|
1010
|
-
if isinstance(data, dict):
|
|
1011
|
-
for key, value in data.items():
|
|
1012
|
-
current_path = f"{path}.{key}" if path else key
|
|
1013
|
-
if isinstance(value, str) and value.startswith('ENC:'):
|
|
1014
|
-
encrypted[current_path] = value
|
|
1015
|
-
elif isinstance(value, (dict, list)):
|
|
1016
|
-
encrypted.update(find_encrypted_values(value, current_path))
|
|
1017
|
-
elif isinstance(data, list):
|
|
1018
|
-
for i, item in enumerate(data):
|
|
1019
|
-
current_path = f"{path}[{i}]"
|
|
1020
|
-
if isinstance(item, (dict, list)):
|
|
1021
|
-
encrypted.update(find_encrypted_values(item, current_path))
|
|
1022
|
-
return encrypted
|
|
1023
|
-
|
|
1024
|
-
original_encrypted = find_encrypted_values(original)
|
|
1025
|
-
restored_encrypted = find_encrypted_values(restored)
|
|
1026
|
-
|
|
1027
|
-
return original_encrypted == restored_encrypted
|
|
1028
|
-
|
|
1029
|
-
except Exception:
|
|
1030
|
-
return False
|
|
1031
|
-
|
|
1032
|
-
def cleanup_test_environment(self) -> None:
|
|
1033
|
-
"""Clean up test environment and temporary files."""
|
|
1034
|
-
try:
|
|
1035
|
-
# Close database connections
|
|
1036
|
-
if self.connection_manager:
|
|
1037
|
-
self.connection_manager.close_all_connections()
|
|
1038
|
-
|
|
1039
|
-
# Remove temporary files
|
|
1040
|
-
for temp_file in self.temp_files:
|
|
1041
|
-
try:
|
|
1042
|
-
if os.path.exists(temp_file):
|
|
1043
|
-
os.remove(temp_file)
|
|
1044
|
-
except Exception as e:
|
|
1045
|
-
self.logger.warning(f"Failed to remove temp file {temp_file}: {e}")
|
|
1046
|
-
|
|
1047
|
-
self.logger.info("Test environment cleanup completed")
|
|
1048
|
-
|
|
1049
|
-
except Exception as e:
|
|
1050
|
-
self.logger.warning(f"Test cleanup failed: {e}")
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
# Convenience function for running tests
|
|
1054
|
-
def run_comprehensive_migration_tests() -> Dict[str, Any]:
|
|
1055
|
-
"""
|
|
1056
|
-
Run comprehensive migration tests and return results.
|
|
1057
|
-
|
|
1058
|
-
Returns:
|
|
1059
|
-
Dictionary with test results and summary
|
|
1060
|
-
"""
|
|
1061
|
-
test_suite = MigrationTestSuite()
|
|
1062
|
-
return test_suite.run_all_tests()
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
if __name__ == "__main__":
|
|
1066
|
-
# Run tests if executed directly
|
|
1067
|
-
logging.basicConfig(level=logging.INFO)
|
|
1068
|
-
results = run_comprehensive_migration_tests()
|
|
1069
|
-
|
|
1070
|
-
print("\n" + "="*80)
|
|
1071
|
-
print("MIGRATION TEST SUITE RESULTS")
|
|
1072
|
-
print("="*80)
|
|
1073
|
-
|
|
1074
|
-
summary = results.get('test_summary', {})
|
|
1075
|
-
print(f"Total Categories: {summary.get('total_categories', 0)}")
|
|
1076
|
-
print(f"Passed Categories: {summary.get('passed_categories', 0)}")
|
|
1077
|
-
print(f"Failed Categories: {summary.get('failed_categories', 0)}")
|
|
1078
|
-
print(f"Overall Success: {results.get('overall_success', False)}")
|
|
1079
|
-
|
|
1080
|
-
if not results.get('overall_success', False):
|
|
1081
|
-
print("\nFAILED CATEGORIES:")
|
|
1082
|
-
for category, result in results.get('category_results', {}).items():
|
|
1083
|
-
if not result.get('success', False):
|
|
1084
|
-
print(f" - {category}: {result.get('error', 'Unknown error')}")
|
|
1085
|
-
|
|
1
|
+
"""
|
|
2
|
+
Comprehensive Migration Test Suite
|
|
3
|
+
|
|
4
|
+
This module provides comprehensive test cases for all 15+ tool configurations
|
|
5
|
+
identified in the production codebase analysis. It includes automated testing
|
|
6
|
+
for data integrity, edge cases, and migration validation.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
import tempfile
|
|
12
|
+
import unittest
|
|
13
|
+
import logging
|
|
14
|
+
from typing import Dict, List, Any, Optional
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
|
|
17
|
+
from .migration_manager import MigrationManager
|
|
18
|
+
from .migration_validator import MigrationValidator, ValidationResult
|
|
19
|
+
from .database_connection_manager import DatabaseConnectionManager
|
|
20
|
+
from .database_schema_manager import DatabaseSchemaManager
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MigrationTestSuite:
|
|
24
|
+
"""
|
|
25
|
+
Comprehensive test suite for migration system validation.
|
|
26
|
+
|
|
27
|
+
Features:
|
|
28
|
+
- Tests for all 15+ production tool configurations
|
|
29
|
+
- Edge case testing with malformed data
|
|
30
|
+
- Performance testing with large datasets
|
|
31
|
+
- Rollback and recovery testing
|
|
32
|
+
- Data integrity validation
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self):
|
|
36
|
+
"""Initialize the test suite."""
|
|
37
|
+
self.logger = logging.getLogger(__name__)
|
|
38
|
+
self.test_results = []
|
|
39
|
+
self.temp_files = []
|
|
40
|
+
self.test_db_path = ":memory:"
|
|
41
|
+
|
|
42
|
+
# Initialize components
|
|
43
|
+
self.connection_manager = None
|
|
44
|
+
self.schema_manager = None
|
|
45
|
+
self.migration_manager = None
|
|
46
|
+
self.validator = None
|
|
47
|
+
|
|
48
|
+
def setup_test_environment(self) -> bool:
|
|
49
|
+
"""
|
|
50
|
+
Setup test environment with database and migration components.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
True if setup successful, False otherwise
|
|
54
|
+
"""
|
|
55
|
+
try:
|
|
56
|
+
# Initialize database components
|
|
57
|
+
self.connection_manager = DatabaseConnectionManager(self.test_db_path)
|
|
58
|
+
self.schema_manager = DatabaseSchemaManager(self.connection_manager)
|
|
59
|
+
self.migration_manager = MigrationManager(self.connection_manager)
|
|
60
|
+
self.validator = MigrationValidator(self.migration_manager)
|
|
61
|
+
|
|
62
|
+
# Initialize database schema
|
|
63
|
+
schema_success = self.schema_manager.initialize_schema()
|
|
64
|
+
if not schema_success:
|
|
65
|
+
self.logger.error("Failed to initialize test database schema")
|
|
66
|
+
return False
|
|
67
|
+
|
|
68
|
+
self.logger.info("Test environment setup completed")
|
|
69
|
+
return True
|
|
70
|
+
|
|
71
|
+
except Exception as e:
|
|
72
|
+
self.logger.error(f"Test environment setup failed: {e}")
|
|
73
|
+
return False
|
|
74
|
+
|
|
75
|
+
def run_all_tests(self) -> Dict[str, Any]:
|
|
76
|
+
"""
|
|
77
|
+
Run all migration tests and return comprehensive results.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Dictionary with test results and summary
|
|
81
|
+
"""
|
|
82
|
+
if not self.setup_test_environment():
|
|
83
|
+
return {'success': False, 'error': 'Test environment setup failed'}
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
self.logger.info("Starting comprehensive migration test suite")
|
|
87
|
+
|
|
88
|
+
# Test categories
|
|
89
|
+
test_categories = [
|
|
90
|
+
('tool_configurations', self.test_all_tool_configurations),
|
|
91
|
+
('edge_cases', self.test_edge_cases),
|
|
92
|
+
('performance', self.test_performance_scenarios),
|
|
93
|
+
('rollback_procedures', self.test_rollback_procedures),
|
|
94
|
+
('data_integrity', self.test_data_integrity),
|
|
95
|
+
('concurrent_access', self.test_concurrent_access),
|
|
96
|
+
('large_datasets', self.test_large_datasets),
|
|
97
|
+
('unicode_support', self.test_unicode_support),
|
|
98
|
+
('encrypted_data', self.test_encrypted_data_handling),
|
|
99
|
+
('schema_validation', self.test_schema_validation)
|
|
100
|
+
]
|
|
101
|
+
|
|
102
|
+
results = {
|
|
103
|
+
'test_summary': {
|
|
104
|
+
'total_categories': len(test_categories),
|
|
105
|
+
'start_time': datetime.now().isoformat(),
|
|
106
|
+
'passed_categories': 0,
|
|
107
|
+
'failed_categories': 0
|
|
108
|
+
},
|
|
109
|
+
'category_results': {},
|
|
110
|
+
'overall_success': True
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
# Run each test category
|
|
114
|
+
for category_name, test_function in test_categories:
|
|
115
|
+
try:
|
|
116
|
+
self.logger.info(f"Running test category: {category_name}")
|
|
117
|
+
category_result = test_function()
|
|
118
|
+
|
|
119
|
+
results['category_results'][category_name] = category_result
|
|
120
|
+
|
|
121
|
+
if category_result.get('success', False):
|
|
122
|
+
results['test_summary']['passed_categories'] += 1
|
|
123
|
+
else:
|
|
124
|
+
results['test_summary']['failed_categories'] += 1
|
|
125
|
+
results['overall_success'] = False
|
|
126
|
+
|
|
127
|
+
except Exception as e:
|
|
128
|
+
self.logger.error(f"Test category {category_name} failed with exception: {e}")
|
|
129
|
+
results['category_results'][category_name] = {
|
|
130
|
+
'success': False,
|
|
131
|
+
'error': str(e),
|
|
132
|
+
'exception': True
|
|
133
|
+
}
|
|
134
|
+
results['test_summary']['failed_categories'] += 1
|
|
135
|
+
results['overall_success'] = False
|
|
136
|
+
|
|
137
|
+
results['test_summary']['end_time'] = datetime.now().isoformat()
|
|
138
|
+
|
|
139
|
+
self.logger.info("Comprehensive migration test suite completed")
|
|
140
|
+
return results
|
|
141
|
+
|
|
142
|
+
except Exception as e:
|
|
143
|
+
self.logger.error(f"Test suite execution failed: {e}")
|
|
144
|
+
return {
|
|
145
|
+
'success': False,
|
|
146
|
+
'error': str(e),
|
|
147
|
+
'test_summary': {'total_categories': 0, 'passed_categories': 0, 'failed_categories': 0}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
finally:
|
|
151
|
+
self.cleanup_test_environment()
|
|
152
|
+
|
|
153
|
+
def test_all_tool_configurations(self) -> Dict[str, Any]:
|
|
154
|
+
"""Test migration for all known tool configurations."""
|
|
155
|
+
try:
|
|
156
|
+
self.logger.info("Testing all tool configurations")
|
|
157
|
+
|
|
158
|
+
# Production tool configurations based on analysis
|
|
159
|
+
tool_configs = self._get_production_tool_configurations()
|
|
160
|
+
|
|
161
|
+
results = {
|
|
162
|
+
'success': True,
|
|
163
|
+
'total_tools': len(tool_configs),
|
|
164
|
+
'passed_tools': 0,
|
|
165
|
+
'failed_tools': 0,
|
|
166
|
+
'tool_results': {}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
for tool_name, tool_config in tool_configs.items():
|
|
170
|
+
try:
|
|
171
|
+
tool_result = self._test_single_tool_migration(tool_name, tool_config)
|
|
172
|
+
results['tool_results'][tool_name] = tool_result
|
|
173
|
+
|
|
174
|
+
if tool_result['success']:
|
|
175
|
+
results['passed_tools'] += 1
|
|
176
|
+
else:
|
|
177
|
+
results['failed_tools'] += 1
|
|
178
|
+
results['success'] = False
|
|
179
|
+
|
|
180
|
+
except Exception as e:
|
|
181
|
+
self.logger.error(f"Tool test failed for {tool_name}: {e}")
|
|
182
|
+
results['tool_results'][tool_name] = {
|
|
183
|
+
'success': False,
|
|
184
|
+
'error': str(e)
|
|
185
|
+
}
|
|
186
|
+
results['failed_tools'] += 1
|
|
187
|
+
results['success'] = False
|
|
188
|
+
|
|
189
|
+
return results
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
return {'success': False, 'error': str(e)}
|
|
193
|
+
|
|
194
|
+
def test_edge_cases(self) -> Dict[str, Any]:
|
|
195
|
+
"""Test migration with various edge cases and malformed data."""
|
|
196
|
+
try:
|
|
197
|
+
self.logger.info("Testing edge cases")
|
|
198
|
+
|
|
199
|
+
edge_cases = [
|
|
200
|
+
('empty_settings', {}),
|
|
201
|
+
('null_values', {'tool_settings': {'Test Tool': None}}),
|
|
202
|
+
('missing_required_fields', {'tool_settings': {}}),
|
|
203
|
+
('invalid_json_structure', {'tool_settings': 'invalid'}),
|
|
204
|
+
('circular_references', self._create_circular_reference_data()),
|
|
205
|
+
('extremely_nested', self._create_deeply_nested_data()),
|
|
206
|
+
('special_characters', self._create_special_characters_data()),
|
|
207
|
+
('large_strings', self._create_large_strings_data()),
|
|
208
|
+
('invalid_unicode', self._create_invalid_unicode_data()),
|
|
209
|
+
('mixed_data_types', self._create_mixed_types_data())
|
|
210
|
+
]
|
|
211
|
+
|
|
212
|
+
results = {
|
|
213
|
+
'success': True,
|
|
214
|
+
'total_cases': len(edge_cases),
|
|
215
|
+
'passed_cases': 0,
|
|
216
|
+
'failed_cases': 0,
|
|
217
|
+
'case_results': {}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
for case_name, test_data in edge_cases:
|
|
221
|
+
try:
|
|
222
|
+
case_result = self._test_edge_case_migration(case_name, test_data)
|
|
223
|
+
results['case_results'][case_name] = case_result
|
|
224
|
+
|
|
225
|
+
# For edge cases, we expect some to fail gracefully
|
|
226
|
+
if case_result.get('handled_gracefully', False):
|
|
227
|
+
results['passed_cases'] += 1
|
|
228
|
+
else:
|
|
229
|
+
results['failed_cases'] += 1
|
|
230
|
+
# Don't mark overall as failed for expected edge case failures
|
|
231
|
+
|
|
232
|
+
except Exception as e:
|
|
233
|
+
self.logger.warning(f"Edge case {case_name} caused exception (may be expected): {e}")
|
|
234
|
+
results['case_results'][case_name] = {
|
|
235
|
+
'handled_gracefully': True,
|
|
236
|
+
'exception': str(e)
|
|
237
|
+
}
|
|
238
|
+
results['passed_cases'] += 1
|
|
239
|
+
|
|
240
|
+
return results
|
|
241
|
+
|
|
242
|
+
except Exception as e:
|
|
243
|
+
return {'success': False, 'error': str(e)}
|
|
244
|
+
|
|
245
|
+
def test_performance_scenarios(self) -> Dict[str, Any]:
|
|
246
|
+
"""Test migration performance with various data sizes."""
|
|
247
|
+
try:
|
|
248
|
+
self.logger.info("Testing performance scenarios")
|
|
249
|
+
|
|
250
|
+
performance_tests = [
|
|
251
|
+
('small_dataset', 10, 100), # 10 tools, 100 settings each
|
|
252
|
+
('medium_dataset', 50, 200), # 50 tools, 200 settings each
|
|
253
|
+
('large_dataset', 100, 500), # 100 tools, 500 settings each
|
|
254
|
+
('xlarge_dataset', 200, 1000), # 200 tools, 1000 settings each
|
|
255
|
+
('stress_test', 500, 2000) # 500 tools, 2000 settings each
|
|
256
|
+
]
|
|
257
|
+
|
|
258
|
+
results = {
|
|
259
|
+
'success': True,
|
|
260
|
+
'total_tests': len(performance_tests),
|
|
261
|
+
'passed_tests': 0,
|
|
262
|
+
'failed_tests': 0,
|
|
263
|
+
'performance_results': {}
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
for test_name, tool_count, settings_per_tool in performance_tests:
|
|
267
|
+
try:
|
|
268
|
+
perf_result = self._test_performance_scenario(test_name, tool_count, settings_per_tool)
|
|
269
|
+
results['performance_results'][test_name] = perf_result
|
|
270
|
+
|
|
271
|
+
if perf_result['success']:
|
|
272
|
+
results['passed_tests'] += 1
|
|
273
|
+
else:
|
|
274
|
+
results['failed_tests'] += 1
|
|
275
|
+
results['success'] = False
|
|
276
|
+
|
|
277
|
+
except Exception as e:
|
|
278
|
+
self.logger.error(f"Performance test {test_name} failed: {e}")
|
|
279
|
+
results['performance_results'][test_name] = {
|
|
280
|
+
'success': False,
|
|
281
|
+
'error': str(e)
|
|
282
|
+
}
|
|
283
|
+
results['failed_tests'] += 1
|
|
284
|
+
results['success'] = False
|
|
285
|
+
|
|
286
|
+
return results
|
|
287
|
+
|
|
288
|
+
except Exception as e:
|
|
289
|
+
return {'success': False, 'error': str(e)}
|
|
290
|
+
|
|
291
|
+
def test_rollback_procedures(self) -> Dict[str, Any]:
|
|
292
|
+
"""Test rollback and recovery procedures."""
|
|
293
|
+
try:
|
|
294
|
+
self.logger.info("Testing rollback procedures")
|
|
295
|
+
|
|
296
|
+
# Create test data
|
|
297
|
+
test_data = self._get_sample_settings_data()
|
|
298
|
+
test_file = self._create_temp_json_file(test_data, "rollback_test.json")
|
|
299
|
+
|
|
300
|
+
results = {
|
|
301
|
+
'success': True,
|
|
302
|
+
'tests': {}
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
# Test 1: Backup creation
|
|
306
|
+
backup_success, backup_path = self.validator.create_automatic_backup(test_file)
|
|
307
|
+
results['tests']['backup_creation'] = {
|
|
308
|
+
'success': backup_success,
|
|
309
|
+
'backup_path': backup_path
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
if not backup_success:
|
|
313
|
+
results['success'] = False
|
|
314
|
+
return results
|
|
315
|
+
|
|
316
|
+
# Test 2: Migration with backup
|
|
317
|
+
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
318
|
+
results['tests']['migration_with_backup'] = {
|
|
319
|
+
'success': migration_success
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
# Test 3: Rollback procedure
|
|
323
|
+
if backup_path:
|
|
324
|
+
rollback_result = self.validator.test_rollback_procedures(backup_path)
|
|
325
|
+
results['tests']['rollback_procedure'] = {
|
|
326
|
+
'success': rollback_result.success,
|
|
327
|
+
'errors': rollback_result.errors,
|
|
328
|
+
'warnings': rollback_result.warnings
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
if not rollback_result.success:
|
|
332
|
+
results['success'] = False
|
|
333
|
+
|
|
334
|
+
return results
|
|
335
|
+
|
|
336
|
+
except Exception as e:
|
|
337
|
+
return {'success': False, 'error': str(e)}
|
|
338
|
+
|
|
339
|
+
def test_data_integrity(self) -> Dict[str, Any]:
|
|
340
|
+
"""Test data integrity through complete migration cycles."""
|
|
341
|
+
try:
|
|
342
|
+
self.logger.info("Testing data integrity")
|
|
343
|
+
|
|
344
|
+
# Test with production-like data
|
|
345
|
+
test_data = self._get_comprehensive_test_data()
|
|
346
|
+
test_file = self._create_temp_json_file(test_data, "integrity_test.json")
|
|
347
|
+
|
|
348
|
+
# Perform comprehensive validation
|
|
349
|
+
validation_result = self.validator.validate_complete_migration(test_file)
|
|
350
|
+
|
|
351
|
+
return {
|
|
352
|
+
'success': validation_result.success,
|
|
353
|
+
'errors': validation_result.errors,
|
|
354
|
+
'warnings': validation_result.warnings,
|
|
355
|
+
'details': validation_result.details
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
except Exception as e:
|
|
359
|
+
return {'success': False, 'error': str(e)}
|
|
360
|
+
|
|
361
|
+
def test_concurrent_access(self) -> Dict[str, Any]:
|
|
362
|
+
"""Test concurrent access scenarios."""
|
|
363
|
+
try:
|
|
364
|
+
self.logger.info("Testing concurrent access")
|
|
365
|
+
|
|
366
|
+
# For this implementation, we'll test basic concurrent operations
|
|
367
|
+
# In a full implementation, this would use threading
|
|
368
|
+
|
|
369
|
+
results = {
|
|
370
|
+
'success': True,
|
|
371
|
+
'concurrent_operations': 0,
|
|
372
|
+
'failed_operations': 0
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
# Simulate multiple operations
|
|
376
|
+
test_data = self._get_sample_settings_data()
|
|
377
|
+
|
|
378
|
+
for i in range(5):
|
|
379
|
+
try:
|
|
380
|
+
# Create separate test file for each operation
|
|
381
|
+
test_file = self._create_temp_json_file(test_data, f"concurrent_test_{i}.json")
|
|
382
|
+
|
|
383
|
+
# Perform migration
|
|
384
|
+
success = self.migration_manager.migrate_from_json(test_file)
|
|
385
|
+
|
|
386
|
+
if success:
|
|
387
|
+
results['concurrent_operations'] += 1
|
|
388
|
+
else:
|
|
389
|
+
results['failed_operations'] += 1
|
|
390
|
+
results['success'] = False
|
|
391
|
+
|
|
392
|
+
except Exception as e:
|
|
393
|
+
self.logger.error(f"Concurrent operation {i} failed: {e}")
|
|
394
|
+
results['failed_operations'] += 1
|
|
395
|
+
results['success'] = False
|
|
396
|
+
|
|
397
|
+
return results
|
|
398
|
+
|
|
399
|
+
except Exception as e:
|
|
400
|
+
return {'success': False, 'error': str(e)}
|
|
401
|
+
|
|
402
|
+
def test_large_datasets(self) -> Dict[str, Any]:
|
|
403
|
+
"""Test migration with large datasets."""
|
|
404
|
+
try:
|
|
405
|
+
self.logger.info("Testing large datasets")
|
|
406
|
+
|
|
407
|
+
# Create large dataset
|
|
408
|
+
large_data = self._create_large_dataset(1000, 5000) # 1000 tools, 5000 settings each
|
|
409
|
+
test_file = self._create_temp_json_file(large_data, "large_dataset_test.json")
|
|
410
|
+
|
|
411
|
+
start_time = datetime.now()
|
|
412
|
+
|
|
413
|
+
# Test migration
|
|
414
|
+
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
415
|
+
|
|
416
|
+
migration_time = (datetime.now() - start_time).total_seconds()
|
|
417
|
+
|
|
418
|
+
return {
|
|
419
|
+
'success': migration_success,
|
|
420
|
+
'dataset_size': len(json.dumps(large_data)),
|
|
421
|
+
'tool_count': len(large_data.get('tool_settings', {})),
|
|
422
|
+
'migration_time': migration_time,
|
|
423
|
+
'performance_acceptable': migration_time < 60 # Should complete within 60 seconds
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
except Exception as e:
|
|
427
|
+
return {'success': False, 'error': str(e)}
|
|
428
|
+
|
|
429
|
+
def test_unicode_support(self) -> Dict[str, Any]:
|
|
430
|
+
"""Test Unicode and international character support."""
|
|
431
|
+
try:
|
|
432
|
+
self.logger.info("Testing Unicode support")
|
|
433
|
+
|
|
434
|
+
unicode_data = {
|
|
435
|
+
'export_path': 'тест/测试/テスト',
|
|
436
|
+
'tool_settings': {
|
|
437
|
+
'Unicode Tool 中文': {
|
|
438
|
+
'name': 'Тестовый инструмент',
|
|
439
|
+
'description': 'ツールの説明',
|
|
440
|
+
'emoji_settings': '🚀🔧⚙️🛠️',
|
|
441
|
+
'special_chars': '©®™€£¥§¶•‰',
|
|
442
|
+
'math_symbols': '∑∏∆∇∂∫√∞',
|
|
443
|
+
'arrows': '←→↑↓↔↕⇄⇅',
|
|
444
|
+
'multilingual': {
|
|
445
|
+
'english': 'Hello World',
|
|
446
|
+
'chinese': '你好世界',
|
|
447
|
+
'japanese': 'こんにちは世界',
|
|
448
|
+
'russian': 'Привет мир',
|
|
449
|
+
'arabic': 'مرحبا بالعالم',
|
|
450
|
+
'hebrew': 'שלום עולם'
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
test_file = self._create_temp_json_file(unicode_data, "unicode_test.json")
|
|
457
|
+
|
|
458
|
+
# Test migration
|
|
459
|
+
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
460
|
+
|
|
461
|
+
if not migration_success:
|
|
462
|
+
return {'success': False, 'error': 'Unicode migration failed'}
|
|
463
|
+
|
|
464
|
+
# Test reverse migration
|
|
465
|
+
reverse_file = self._create_temp_file_path("unicode_reverse.json")
|
|
466
|
+
reverse_success = self.migration_manager.migrate_to_json(reverse_file)
|
|
467
|
+
|
|
468
|
+
if not reverse_success:
|
|
469
|
+
return {'success': False, 'error': 'Unicode reverse migration failed'}
|
|
470
|
+
|
|
471
|
+
# Validate Unicode preservation
|
|
472
|
+
with open(reverse_file, 'r', encoding='utf-8') as f:
|
|
473
|
+
restored_data = json.load(f)
|
|
474
|
+
|
|
475
|
+
unicode_preserved = self._compare_unicode_data(unicode_data, restored_data)
|
|
476
|
+
|
|
477
|
+
return {
|
|
478
|
+
'success': unicode_preserved,
|
|
479
|
+
'migration_success': migration_success,
|
|
480
|
+
'reverse_migration_success': reverse_success,
|
|
481
|
+
'unicode_preserved': unicode_preserved
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
except Exception as e:
|
|
485
|
+
return {'success': False, 'error': str(e)}
|
|
486
|
+
|
|
487
|
+
def test_encrypted_data_handling(self) -> Dict[str, Any]:
|
|
488
|
+
"""Test handling of encrypted data (API keys with ENC: prefix)."""
|
|
489
|
+
try:
|
|
490
|
+
self.logger.info("Testing encrypted data handling")
|
|
491
|
+
|
|
492
|
+
encrypted_data = {
|
|
493
|
+
'export_path': 'test',
|
|
494
|
+
'tool_settings': {
|
|
495
|
+
'Encrypted Tool': {
|
|
496
|
+
'API_KEY': 'ENC:dGVzdF9lbmNyeXB0ZWRfa2V5X3ZhbHVl',
|
|
497
|
+
'SECRET_TOKEN': 'ENC:YW5vdGhlcl9lbmNyeXB0ZWRfc2VjcmV0',
|
|
498
|
+
'normal_setting': 'plain_text_value',
|
|
499
|
+
'nested_encrypted': {
|
|
500
|
+
'PRIVATE_KEY': 'ENC:cHJpdmF0ZV9rZXlfZW5jcnlwdGVk',
|
|
501
|
+
'public_setting': 'public_value'
|
|
502
|
+
}
|
|
503
|
+
},
|
|
504
|
+
'AWS Bedrock': {
|
|
505
|
+
'API_KEY': 'ENC:Z0FBQUFBQm81ZEI4alg1a2UzU1ZUWXc3VWVacjhxUS1IUDhvV1RyM1FGSU85ZTNZWlZQbnRLZGI0aUxxOUJKSU02aGxIbG9tNGlienFhWHE2cVdCWERkc0R1MEZLd3hGTW9Pa3oyYjBZRmNtTUJnVzdfdUNfRjlXSkI2ZFRUS1dYR3BBM0FraVJlREk3NlUtUmhQWl9Md1VQRTluNDk5dUo1NmxBX3JZSWtYWTQyQjhtQzh6NGlSdk1ZcnlIbEx1TnBLUi1Ua0R1d1hPWWo4X1V2MG92c1JRaDBoY25EcVFZRjZGV2ZGeXBObk8xQTJlVTRjUHdhbkE0Z3d0VkVIUHhJRkpfMGV1X21hWA==',
|
|
506
|
+
'MODEL': 'anthropic.claude-3-5-sonnet-20240620-v1:0'
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
test_file = self._create_temp_json_file(encrypted_data, "encrypted_test.json")
|
|
512
|
+
|
|
513
|
+
# Test migration
|
|
514
|
+
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
515
|
+
|
|
516
|
+
if not migration_success:
|
|
517
|
+
return {'success': False, 'error': 'Encrypted data migration failed'}
|
|
518
|
+
|
|
519
|
+
# Test reverse migration
|
|
520
|
+
reverse_file = self._create_temp_file_path("encrypted_reverse.json")
|
|
521
|
+
reverse_success = self.migration_manager.migrate_to_json(reverse_file)
|
|
522
|
+
|
|
523
|
+
if not reverse_success:
|
|
524
|
+
return {'success': False, 'error': 'Encrypted data reverse migration failed'}
|
|
525
|
+
|
|
526
|
+
# Validate encrypted data preservation
|
|
527
|
+
with open(reverse_file, 'r', encoding='utf-8') as f:
|
|
528
|
+
restored_data = json.load(f)
|
|
529
|
+
|
|
530
|
+
encrypted_preserved = self._validate_encrypted_data_preservation(encrypted_data, restored_data)
|
|
531
|
+
|
|
532
|
+
return {
|
|
533
|
+
'success': encrypted_preserved,
|
|
534
|
+
'migration_success': migration_success,
|
|
535
|
+
'reverse_migration_success': reverse_success,
|
|
536
|
+
'encrypted_data_preserved': encrypted_preserved
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
except Exception as e:
|
|
540
|
+
return {'success': False, 'error': str(e)}
|
|
541
|
+
|
|
542
|
+
def test_schema_validation(self) -> Dict[str, Any]:
|
|
543
|
+
"""Test database schema validation and integrity."""
|
|
544
|
+
try:
|
|
545
|
+
self.logger.info("Testing schema validation")
|
|
546
|
+
|
|
547
|
+
# Test schema validation
|
|
548
|
+
schema_valid = self.schema_manager.validate_schema()
|
|
549
|
+
|
|
550
|
+
# Get schema information
|
|
551
|
+
schema_info = self.schema_manager.get_schema_info()
|
|
552
|
+
|
|
553
|
+
# Test schema repair (if needed)
|
|
554
|
+
repair_success = True
|
|
555
|
+
if not schema_valid:
|
|
556
|
+
repair_success = self.schema_manager.repair_schema()
|
|
557
|
+
|
|
558
|
+
return {
|
|
559
|
+
'success': schema_valid and repair_success,
|
|
560
|
+
'schema_valid': schema_valid,
|
|
561
|
+
'repair_success': repair_success,
|
|
562
|
+
'schema_info': schema_info
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
except Exception as e:
|
|
566
|
+
return {'success': False, 'error': str(e)}
|
|
567
|
+
|
|
568
|
+
# Helper methods for test data generation
|
|
569
|
+
|
|
570
|
+
def _get_production_tool_configurations(self) -> Dict[str, Dict[str, Any]]:
|
|
571
|
+
"""Get production-like tool configurations for testing."""
|
|
572
|
+
return {
|
|
573
|
+
'Case Tool': {
|
|
574
|
+
'mode': 'Upper',
|
|
575
|
+
'exclusions': 'a\nan\nand\nas\nat\nbut\nby\nen\nfor\nif\nin\nis\nof\non\nor\nthe\nto\nvia\nvs'
|
|
576
|
+
},
|
|
577
|
+
'Base64 Encoder/Decoder': {
|
|
578
|
+
'mode': 'encode'
|
|
579
|
+
},
|
|
580
|
+
'JSON/XML Tool': {
|
|
581
|
+
'operation': 'json_to_xml',
|
|
582
|
+
'json_indent': 2,
|
|
583
|
+
'xml_indent': 2,
|
|
584
|
+
'preserve_attributes': True,
|
|
585
|
+
'sort_keys': False,
|
|
586
|
+
'array_wrapper': 'item',
|
|
587
|
+
'root_element': 'root',
|
|
588
|
+
'jsonpath_query': '$',
|
|
589
|
+
'xpath_query': '//*'
|
|
590
|
+
},
|
|
591
|
+
'cURL Tool': {
|
|
592
|
+
'default_timeout': 90,
|
|
593
|
+
'follow_redirects': True,
|
|
594
|
+
'verify_ssl': False,
|
|
595
|
+
'max_redirects': 10,
|
|
596
|
+
'user_agent': 'Test Agent',
|
|
597
|
+
'save_history': True,
|
|
598
|
+
'max_history_items': 100,
|
|
599
|
+
'history': [
|
|
600
|
+
{
|
|
601
|
+
'timestamp': '2025-10-08T21:54:15.103533',
|
|
602
|
+
'method': 'POST',
|
|
603
|
+
'url': 'https://test.api.com/data',
|
|
604
|
+
'status_code': 201,
|
|
605
|
+
'response_time': 0.8,
|
|
606
|
+
'success': True,
|
|
607
|
+
'headers': {'Content-Type': 'application/json'},
|
|
608
|
+
'body': '{"test": "data"}',
|
|
609
|
+
'auth_type': 'Bearer Token',
|
|
610
|
+
'response_preview': '{"id": 123}',
|
|
611
|
+
'response_size': 50,
|
|
612
|
+
'content_type': 'application/json'
|
|
613
|
+
}
|
|
614
|
+
],
|
|
615
|
+
'collections': {}
|
|
616
|
+
},
|
|
617
|
+
'Generator Tools': {
|
|
618
|
+
'Strong Password Generator': {
|
|
619
|
+
'length': 20,
|
|
620
|
+
'numbers': '',
|
|
621
|
+
'symbols': '',
|
|
622
|
+
'letters_percent': 70,
|
|
623
|
+
'numbers_percent': 20,
|
|
624
|
+
'symbols_percent': 10
|
|
625
|
+
},
|
|
626
|
+
'UUID/GUID Generator': {
|
|
627
|
+
'version': 4,
|
|
628
|
+
'format': 'standard',
|
|
629
|
+
'case': 'lowercase',
|
|
630
|
+
'count': 1
|
|
631
|
+
}
|
|
632
|
+
},
|
|
633
|
+
'Google AI': {
|
|
634
|
+
'API_KEY': 'test_key',
|
|
635
|
+
'MODEL': 'gemini-1.5-pro-latest',
|
|
636
|
+
'MODELS_LIST': ['gemini-1.5-pro-latest', 'gemini-1.5-flash-latest'],
|
|
637
|
+
'system_prompt': 'You are a helpful assistant.',
|
|
638
|
+
'temperature': 0.7,
|
|
639
|
+
'topK': 40,
|
|
640
|
+
'topP': 0.95,
|
|
641
|
+
'candidateCount': 1,
|
|
642
|
+
'maxOutputTokens': 8192
|
|
643
|
+
},
|
|
644
|
+
'Anthropic AI': {
|
|
645
|
+
'API_KEY': 'test_key',
|
|
646
|
+
'MODEL': 'claude-3-5-sonnet-20240620',
|
|
647
|
+
'MODELS_LIST': ['claude-3-5-sonnet-20240620', 'claude-3-opus-20240229'],
|
|
648
|
+
'system': 'You are a helpful assistant.',
|
|
649
|
+
'max_tokens': 4096,
|
|
650
|
+
'temperature': 0.7
|
|
651
|
+
},
|
|
652
|
+
'OpenAI': {
|
|
653
|
+
'API_KEY': 'test_key',
|
|
654
|
+
'MODEL': 'gpt-4o',
|
|
655
|
+
'MODELS_LIST': ['gpt-4o', 'gpt-4-turbo', 'gpt-3.5-turbo'],
|
|
656
|
+
'system_prompt': 'You are a helpful assistant.',
|
|
657
|
+
'temperature': 0.7,
|
|
658
|
+
'max_tokens': 4096
|
|
659
|
+
},
|
|
660
|
+
'Folder File Reporter': {
|
|
661
|
+
'last_input_folder': '/test/input',
|
|
662
|
+
'last_output_folder': '/test/output',
|
|
663
|
+
'field_selections': {
|
|
664
|
+
'path': False,
|
|
665
|
+
'name': True,
|
|
666
|
+
'size': True,
|
|
667
|
+
'date_modified': False
|
|
668
|
+
},
|
|
669
|
+
'separator': ' | ',
|
|
670
|
+
'folders_only': False,
|
|
671
|
+
'recursion_mode': 'full',
|
|
672
|
+
'size_format': 'human'
|
|
673
|
+
},
|
|
674
|
+
'Find & Replace Text': {
|
|
675
|
+
'find': '',
|
|
676
|
+
'replace': '',
|
|
677
|
+
'mode': 'Text',
|
|
678
|
+
'option': 'ignore_case',
|
|
679
|
+
'find_history': [],
|
|
680
|
+
'replace_history': []
|
|
681
|
+
}
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
def _get_sample_settings_data(self) -> Dict[str, Any]:
|
|
685
|
+
"""Get sample settings data for testing."""
|
|
686
|
+
return {
|
|
687
|
+
'export_path': 'C:\\Users\\Test\\Downloads',
|
|
688
|
+
'debug_level': 'DEBUG',
|
|
689
|
+
'selected_tool': 'Test Tool',
|
|
690
|
+
'active_input_tab': 0,
|
|
691
|
+
'active_output_tab': 0,
|
|
692
|
+
'input_tabs': ['test input'] + [''] * 6,
|
|
693
|
+
'output_tabs': ['test output'] + [''] * 6,
|
|
694
|
+
'tool_settings': {
|
|
695
|
+
'Test Tool': {
|
|
696
|
+
'setting1': 'value1',
|
|
697
|
+
'setting2': 42,
|
|
698
|
+
'setting3': True
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
def _get_comprehensive_test_data(self) -> Dict[str, Any]:
|
|
704
|
+
"""Get comprehensive test data including all major components."""
|
|
705
|
+
return {
|
|
706
|
+
'export_path': 'C:\\Users\\Test\\Downloads',
|
|
707
|
+
'debug_level': 'DEBUG',
|
|
708
|
+
'selected_tool': 'JSON/XML Tool',
|
|
709
|
+
'active_input_tab': 1,
|
|
710
|
+
'active_output_tab': 0,
|
|
711
|
+
'input_tabs': ['', 'test input', '', '', '', '', ''],
|
|
712
|
+
'output_tabs': ['test output', '', '', '', '', '', ''],
|
|
713
|
+
'tool_settings': self._get_production_tool_configurations(),
|
|
714
|
+
'performance_settings': {
|
|
715
|
+
'mode': 'automatic',
|
|
716
|
+
'async_processing': {
|
|
717
|
+
'enabled': True,
|
|
718
|
+
'threshold_kb': 10,
|
|
719
|
+
'max_workers': 2
|
|
720
|
+
},
|
|
721
|
+
'caching': {
|
|
722
|
+
'enabled': True,
|
|
723
|
+
'stats_cache_size': 1000
|
|
724
|
+
}
|
|
725
|
+
},
|
|
726
|
+
'font_settings': {
|
|
727
|
+
'text_font': {
|
|
728
|
+
'family': 'Source Code Pro',
|
|
729
|
+
'size': 11,
|
|
730
|
+
'fallback_family': 'Consolas'
|
|
731
|
+
}
|
|
732
|
+
},
|
|
733
|
+
'dialog_settings': {
|
|
734
|
+
'success': {
|
|
735
|
+
'enabled': False,
|
|
736
|
+
'description': 'Success notifications'
|
|
737
|
+
},
|
|
738
|
+
'error': {
|
|
739
|
+
'enabled': True,
|
|
740
|
+
'locked': True,
|
|
741
|
+
'description': 'Error messages'
|
|
742
|
+
}
|
|
743
|
+
}
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
def _test_single_tool_migration(self, tool_name: str, tool_config: Dict[str, Any]) -> Dict[str, Any]:
|
|
747
|
+
"""Test migration for a single tool configuration."""
|
|
748
|
+
try:
|
|
749
|
+
# Create test data with just this tool
|
|
750
|
+
test_data = {
|
|
751
|
+
'export_path': 'test',
|
|
752
|
+
'tool_settings': {tool_name: tool_config}
|
|
753
|
+
}
|
|
754
|
+
|
|
755
|
+
test_file = self._create_temp_json_file(test_data, f"tool_test_{tool_name.replace(' ', '_')}.json")
|
|
756
|
+
|
|
757
|
+
# Test migration
|
|
758
|
+
migration_success = self.migration_manager.migrate_from_json(test_file)
|
|
759
|
+
|
|
760
|
+
if not migration_success:
|
|
761
|
+
return {'success': False, 'error': 'Migration failed'}
|
|
762
|
+
|
|
763
|
+
# Test reverse migration
|
|
764
|
+
reverse_file = self._create_temp_file_path(f"tool_reverse_{tool_name.replace(' ', '_')}.json")
|
|
765
|
+
reverse_success = self.migration_manager.migrate_to_json(reverse_file)
|
|
766
|
+
|
|
767
|
+
if not reverse_success:
|
|
768
|
+
return {'success': False, 'error': 'Reverse migration failed'}
|
|
769
|
+
|
|
770
|
+
# Validate data integrity
|
|
771
|
+
with open(reverse_file, 'r', encoding='utf-8') as f:
|
|
772
|
+
restored_data = json.load(f)
|
|
773
|
+
|
|
774
|
+
tool_preserved = (tool_name in restored_data.get('tool_settings', {}) and
|
|
775
|
+
restored_data['tool_settings'][tool_name] == tool_config)
|
|
776
|
+
|
|
777
|
+
return {
|
|
778
|
+
'success': tool_preserved,
|
|
779
|
+
'migration_success': migration_success,
|
|
780
|
+
'reverse_migration_success': reverse_success,
|
|
781
|
+
'data_preserved': tool_preserved
|
|
782
|
+
}
|
|
783
|
+
|
|
784
|
+
except Exception as e:
|
|
785
|
+
return {'success': False, 'error': str(e)}
|
|
786
|
+
|
|
787
|
+
def _test_edge_case_migration(self, case_name: str, test_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
788
|
+
"""Test migration for an edge case scenario."""
|
|
789
|
+
try:
|
|
790
|
+
test_file = self._create_temp_json_file(test_data, f"edge_case_{case_name}.json")
|
|
791
|
+
|
|
792
|
+
# Attempt migration (may fail gracefully)
|
|
793
|
+
try:
|
|
794
|
+
migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
|
|
795
|
+
|
|
796
|
+
return {
|
|
797
|
+
'handled_gracefully': True,
|
|
798
|
+
'migration_success': migration_success,
|
|
799
|
+
'error': None
|
|
800
|
+
}
|
|
801
|
+
|
|
802
|
+
except Exception as migration_error:
|
|
803
|
+
# Edge cases may cause exceptions - this is acceptable
|
|
804
|
+
return {
|
|
805
|
+
'handled_gracefully': True,
|
|
806
|
+
'migration_success': False,
|
|
807
|
+
'error': str(migration_error),
|
|
808
|
+
'exception_handled': True
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
except Exception as e:
|
|
812
|
+
return {
|
|
813
|
+
'handled_gracefully': False,
|
|
814
|
+
'error': str(e)
|
|
815
|
+
}
|
|
816
|
+
|
|
817
|
+
def _test_performance_scenario(self, test_name: str, tool_count: int, settings_per_tool: int) -> Dict[str, Any]:
|
|
818
|
+
"""Test performance for a specific scenario."""
|
|
819
|
+
try:
|
|
820
|
+
# Generate performance test data
|
|
821
|
+
test_data = self._create_large_dataset(tool_count, settings_per_tool)
|
|
822
|
+
test_file = self._create_temp_json_file(test_data, f"perf_{test_name}.json")
|
|
823
|
+
|
|
824
|
+
# Measure migration performance
|
|
825
|
+
start_time = datetime.now()
|
|
826
|
+
migration_success = self.migration_manager.migrate_from_json(test_file, validate=False)
|
|
827
|
+
migration_time = (datetime.now() - start_time).total_seconds()
|
|
828
|
+
|
|
829
|
+
# Measure reverse migration performance
|
|
830
|
+
reverse_file = self._create_temp_file_path(f"perf_reverse_{test_name}.json")
|
|
831
|
+
start_time = datetime.now()
|
|
832
|
+
reverse_success = self.migration_manager.migrate_to_json(reverse_file)
|
|
833
|
+
reverse_time = (datetime.now() - start_time).total_seconds()
|
|
834
|
+
|
|
835
|
+
return {
|
|
836
|
+
'success': migration_success and reverse_success,
|
|
837
|
+
'tool_count': tool_count,
|
|
838
|
+
'settings_per_tool': settings_per_tool,
|
|
839
|
+
'total_settings': tool_count * settings_per_tool,
|
|
840
|
+
'migration_time': migration_time,
|
|
841
|
+
'reverse_time': reverse_time,
|
|
842
|
+
'total_time': migration_time + reverse_time,
|
|
843
|
+
'settings_per_second': (tool_count * settings_per_tool) / (migration_time + reverse_time) if (migration_time + reverse_time) > 0 else 0
|
|
844
|
+
}
|
|
845
|
+
|
|
846
|
+
except Exception as e:
|
|
847
|
+
return {'success': False, 'error': str(e)}
|
|
848
|
+
|
|
849
|
+
def _create_large_dataset(self, tool_count: int, settings_per_tool: int) -> Dict[str, Any]:
|
|
850
|
+
"""Create large dataset for performance testing."""
|
|
851
|
+
data = {
|
|
852
|
+
'export_path': 'test',
|
|
853
|
+
'input_tabs': [''] * 7,
|
|
854
|
+
'output_tabs': [''] * 7,
|
|
855
|
+
'tool_settings': {}
|
|
856
|
+
}
|
|
857
|
+
|
|
858
|
+
for i in range(tool_count):
|
|
859
|
+
tool_settings = {}
|
|
860
|
+
for j in range(settings_per_tool):
|
|
861
|
+
tool_settings[f'setting_{j}'] = f'value_{i}_{j}'
|
|
862
|
+
|
|
863
|
+
# Add some complex nested structures
|
|
864
|
+
tool_settings['nested'] = {
|
|
865
|
+
'level1': {
|
|
866
|
+
'level2': {
|
|
867
|
+
'array': [f'item_{k}' for k in range(10)],
|
|
868
|
+
'number': i * j if j > 0 else i,
|
|
869
|
+
'boolean': (i + j) % 2 == 0
|
|
870
|
+
}
|
|
871
|
+
}
|
|
872
|
+
}
|
|
873
|
+
|
|
874
|
+
data['tool_settings'][f'Tool_{i}'] = tool_settings
|
|
875
|
+
|
|
876
|
+
return data
|
|
877
|
+
|
|
878
|
+
def _create_circular_reference_data(self) -> Dict[str, Any]:
|
|
879
|
+
"""Create data with circular references (should be handled gracefully)."""
|
|
880
|
+
# Note: JSON doesn't support circular references, so this creates deeply nested structure
|
|
881
|
+
data = {'export_path': 'test', 'tool_settings': {}}
|
|
882
|
+
|
|
883
|
+
# Create deeply nested structure that might cause issues
|
|
884
|
+
nested = data
|
|
885
|
+
for i in range(100):
|
|
886
|
+
nested['next'] = {'level': i}
|
|
887
|
+
nested = nested['next']
|
|
888
|
+
|
|
889
|
+
return data
|
|
890
|
+
|
|
891
|
+
def _create_deeply_nested_data(self) -> Dict[str, Any]:
|
|
892
|
+
"""Create extremely nested data structure."""
|
|
893
|
+
data = {
|
|
894
|
+
'export_path': 'test',
|
|
895
|
+
'tool_settings': {
|
|
896
|
+
'Nested Tool': {}
|
|
897
|
+
}
|
|
898
|
+
}
|
|
899
|
+
|
|
900
|
+
# Create 50 levels of nesting
|
|
901
|
+
nested = data['tool_settings']['Nested Tool']
|
|
902
|
+
for i in range(50):
|
|
903
|
+
nested[f'level_{i}'] = {}
|
|
904
|
+
nested = nested[f'level_{i}']
|
|
905
|
+
|
|
906
|
+
nested['final_value'] = 'deep_value'
|
|
907
|
+
|
|
908
|
+
return data
|
|
909
|
+
|
|
910
|
+
def _create_special_characters_data(self) -> Dict[str, Any]:
|
|
911
|
+
"""Create data with special characters and edge cases."""
|
|
912
|
+
return {
|
|
913
|
+
'export_path': 'test',
|
|
914
|
+
'tool_settings': {
|
|
915
|
+
'Special Chars Tool': {
|
|
916
|
+
'null_bytes': 'test\x00null',
|
|
917
|
+
'control_chars': 'test\x01\x02\x03control',
|
|
918
|
+
'quotes': 'test"with\'quotes',
|
|
919
|
+
'backslashes': 'test\\with\\backslashes',
|
|
920
|
+
'newlines': 'test\nwith\nnewlines',
|
|
921
|
+
'tabs': 'test\twith\ttabs',
|
|
922
|
+
'unicode_escape': 'test\\u0041unicode'
|
|
923
|
+
}
|
|
924
|
+
}
|
|
925
|
+
}
|
|
926
|
+
|
|
927
|
+
def _create_large_strings_data(self) -> Dict[str, Any]:
|
|
928
|
+
"""Create data with very large string values."""
|
|
929
|
+
large_string = 'x' * 1000000 # 1MB string
|
|
930
|
+
|
|
931
|
+
return {
|
|
932
|
+
'export_path': 'test',
|
|
933
|
+
'tool_settings': {
|
|
934
|
+
'Large String Tool': {
|
|
935
|
+
'large_value': large_string,
|
|
936
|
+
'large_array': [large_string[:1000] for _ in range(1000)]
|
|
937
|
+
}
|
|
938
|
+
}
|
|
939
|
+
}
|
|
940
|
+
|
|
941
|
+
def _create_invalid_unicode_data(self) -> Dict[str, Any]:
|
|
942
|
+
"""Create data with potentially problematic Unicode."""
|
|
943
|
+
return {
|
|
944
|
+
'export_path': 'test',
|
|
945
|
+
'tool_settings': {
|
|
946
|
+
'Unicode Edge Cases': {
|
|
947
|
+
'surrogate_pairs': '𝕳𝖊𝖑𝖑𝖔 𝖂𝖔𝖗𝖑𝖉',
|
|
948
|
+
'combining_chars': 'e\u0301\u0302\u0303', # e with multiple combining marks
|
|
949
|
+
'rtl_text': 'مرحبا بالعالم',
|
|
950
|
+
'mixed_scripts': 'Hello世界Мир',
|
|
951
|
+
'zero_width': 'test\u200bzero\u200bwidth'
|
|
952
|
+
}
|
|
953
|
+
}
|
|
954
|
+
}
|
|
955
|
+
|
|
956
|
+
def _create_mixed_types_data(self) -> Dict[str, Any]:
|
|
957
|
+
"""Create data with mixed and edge case data types."""
|
|
958
|
+
return {
|
|
959
|
+
'export_path': 'test',
|
|
960
|
+
'tool_settings': {
|
|
961
|
+
'Mixed Types Tool': {
|
|
962
|
+
'string': 'test',
|
|
963
|
+
'integer': 42,
|
|
964
|
+
'float': 3.14159,
|
|
965
|
+
'boolean_true': True,
|
|
966
|
+
'boolean_false': False,
|
|
967
|
+
'null_value': None,
|
|
968
|
+
'empty_string': '',
|
|
969
|
+
'empty_array': [],
|
|
970
|
+
'empty_object': {},
|
|
971
|
+
'large_number': 9223372036854775807, # Max int64
|
|
972
|
+
'small_number': -9223372036854775808,
|
|
973
|
+
'scientific_notation': 1.23e-10
|
|
974
|
+
}
|
|
975
|
+
}
|
|
976
|
+
}
|
|
977
|
+
|
|
978
|
+
def _create_temp_json_file(self, data: Dict[str, Any], filename: str) -> str:
|
|
979
|
+
"""Create temporary JSON file with test data."""
|
|
980
|
+
temp_path = self._create_temp_file_path(filename)
|
|
981
|
+
|
|
982
|
+
with open(temp_path, 'w', encoding='utf-8') as f:
|
|
983
|
+
json.dump(data, f, indent=2, ensure_ascii=False)
|
|
984
|
+
|
|
985
|
+
self.temp_files.append(temp_path)
|
|
986
|
+
return temp_path
|
|
987
|
+
|
|
988
|
+
def _create_temp_file_path(self, filename: str) -> str:
|
|
989
|
+
"""Create temporary file path."""
|
|
990
|
+
temp_dir = tempfile.gettempdir()
|
|
991
|
+
return os.path.join(temp_dir, f"migration_test_{filename}")
|
|
992
|
+
|
|
993
|
+
def _compare_unicode_data(self, original: Dict[str, Any], restored: Dict[str, Any]) -> bool:
|
|
994
|
+
"""Compare Unicode data preservation."""
|
|
995
|
+
try:
|
|
996
|
+
# Convert to JSON strings for comparison
|
|
997
|
+
original_str = json.dumps(original, sort_keys=True, ensure_ascii=False)
|
|
998
|
+
restored_str = json.dumps(restored, sort_keys=True, ensure_ascii=False)
|
|
999
|
+
|
|
1000
|
+
return original_str == restored_str
|
|
1001
|
+
|
|
1002
|
+
except Exception:
|
|
1003
|
+
return False
|
|
1004
|
+
|
|
1005
|
+
def _validate_encrypted_data_preservation(self, original: Dict[str, Any], restored: Dict[str, Any]) -> bool:
|
|
1006
|
+
"""Validate that encrypted data (ENC: prefixed) is preserved."""
|
|
1007
|
+
try:
|
|
1008
|
+
def find_encrypted_values(data, path=""):
|
|
1009
|
+
encrypted = {}
|
|
1010
|
+
if isinstance(data, dict):
|
|
1011
|
+
for key, value in data.items():
|
|
1012
|
+
current_path = f"{path}.{key}" if path else key
|
|
1013
|
+
if isinstance(value, str) and value.startswith('ENC:'):
|
|
1014
|
+
encrypted[current_path] = value
|
|
1015
|
+
elif isinstance(value, (dict, list)):
|
|
1016
|
+
encrypted.update(find_encrypted_values(value, current_path))
|
|
1017
|
+
elif isinstance(data, list):
|
|
1018
|
+
for i, item in enumerate(data):
|
|
1019
|
+
current_path = f"{path}[{i}]"
|
|
1020
|
+
if isinstance(item, (dict, list)):
|
|
1021
|
+
encrypted.update(find_encrypted_values(item, current_path))
|
|
1022
|
+
return encrypted
|
|
1023
|
+
|
|
1024
|
+
original_encrypted = find_encrypted_values(original)
|
|
1025
|
+
restored_encrypted = find_encrypted_values(restored)
|
|
1026
|
+
|
|
1027
|
+
return original_encrypted == restored_encrypted
|
|
1028
|
+
|
|
1029
|
+
except Exception:
|
|
1030
|
+
return False
|
|
1031
|
+
|
|
1032
|
+
def cleanup_test_environment(self) -> None:
|
|
1033
|
+
"""Clean up test environment and temporary files."""
|
|
1034
|
+
try:
|
|
1035
|
+
# Close database connections
|
|
1036
|
+
if self.connection_manager:
|
|
1037
|
+
self.connection_manager.close_all_connections()
|
|
1038
|
+
|
|
1039
|
+
# Remove temporary files
|
|
1040
|
+
for temp_file in self.temp_files:
|
|
1041
|
+
try:
|
|
1042
|
+
if os.path.exists(temp_file):
|
|
1043
|
+
os.remove(temp_file)
|
|
1044
|
+
except Exception as e:
|
|
1045
|
+
self.logger.warning(f"Failed to remove temp file {temp_file}: {e}")
|
|
1046
|
+
|
|
1047
|
+
self.logger.info("Test environment cleanup completed")
|
|
1048
|
+
|
|
1049
|
+
except Exception as e:
|
|
1050
|
+
self.logger.warning(f"Test cleanup failed: {e}")
|
|
1051
|
+
|
|
1052
|
+
|
|
1053
|
+
# Convenience function for running tests
|
|
1054
|
+
def run_comprehensive_migration_tests() -> Dict[str, Any]:
|
|
1055
|
+
"""
|
|
1056
|
+
Run comprehensive migration tests and return results.
|
|
1057
|
+
|
|
1058
|
+
Returns:
|
|
1059
|
+
Dictionary with test results and summary
|
|
1060
|
+
"""
|
|
1061
|
+
test_suite = MigrationTestSuite()
|
|
1062
|
+
return test_suite.run_all_tests()
|
|
1063
|
+
|
|
1064
|
+
|
|
1065
|
+
if __name__ == "__main__":
|
|
1066
|
+
# Run tests if executed directly
|
|
1067
|
+
logging.basicConfig(level=logging.INFO)
|
|
1068
|
+
results = run_comprehensive_migration_tests()
|
|
1069
|
+
|
|
1070
|
+
print("\n" + "="*80)
|
|
1071
|
+
print("MIGRATION TEST SUITE RESULTS")
|
|
1072
|
+
print("="*80)
|
|
1073
|
+
|
|
1074
|
+
summary = results.get('test_summary', {})
|
|
1075
|
+
print(f"Total Categories: {summary.get('total_categories', 0)}")
|
|
1076
|
+
print(f"Passed Categories: {summary.get('passed_categories', 0)}")
|
|
1077
|
+
print(f"Failed Categories: {summary.get('failed_categories', 0)}")
|
|
1078
|
+
print(f"Overall Success: {results.get('overall_success', False)}")
|
|
1079
|
+
|
|
1080
|
+
if not results.get('overall_success', False):
|
|
1081
|
+
print("\nFAILED CATEGORIES:")
|
|
1082
|
+
for category, result in results.get('category_results', {}).items():
|
|
1083
|
+
if not result.get('success', False):
|
|
1084
|
+
print(f" - {category}: {result.get('error', 'Unknown error')}")
|
|
1085
|
+
|
|
1086
1086
|
print("\n" + "="*80)
|