pomera-ai-commander 1.1.1 → 1.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +105 -680
- package/bin/pomera-ai-commander.js +62 -62
- package/core/__init__.py +65 -65
- package/core/app_context.py +482 -482
- package/core/async_text_processor.py +421 -421
- package/core/backup_manager.py +655 -655
- package/core/backup_recovery_manager.py +1199 -1033
- package/core/content_hash_cache.py +508 -508
- package/core/context_menu.py +313 -313
- package/core/data_directory.py +549 -0
- package/core/data_validator.py +1066 -1066
- package/core/database_connection_manager.py +744 -744
- package/core/database_curl_settings_manager.py +608 -608
- package/core/database_promera_ai_settings_manager.py +446 -446
- package/core/database_schema.py +411 -411
- package/core/database_schema_manager.py +395 -395
- package/core/database_settings_manager.py +1507 -1507
- package/core/database_settings_manager_interface.py +456 -456
- package/core/dialog_manager.py +734 -734
- package/core/diff_utils.py +239 -0
- package/core/efficient_line_numbers.py +540 -510
- package/core/error_handler.py +746 -746
- package/core/error_service.py +431 -431
- package/core/event_consolidator.py +511 -511
- package/core/mcp/__init__.py +43 -43
- package/core/mcp/find_replace_diff.py +334 -0
- package/core/mcp/protocol.py +288 -288
- package/core/mcp/schema.py +251 -251
- package/core/mcp/server_stdio.py +299 -299
- package/core/mcp/tool_registry.py +2699 -2345
- package/core/memento.py +275 -0
- package/core/memory_efficient_text_widget.py +711 -711
- package/core/migration_manager.py +914 -914
- package/core/migration_test_suite.py +1085 -1085
- package/core/migration_validator.py +1143 -1143
- package/core/optimized_find_replace.py +714 -714
- package/core/optimized_pattern_engine.py +424 -424
- package/core/optimized_search_highlighter.py +552 -552
- package/core/performance_monitor.py +674 -674
- package/core/persistence_manager.py +712 -712
- package/core/progressive_stats_calculator.py +632 -632
- package/core/regex_pattern_cache.py +529 -529
- package/core/regex_pattern_library.py +350 -350
- package/core/search_operation_manager.py +434 -434
- package/core/settings_defaults_registry.py +1087 -1087
- package/core/settings_integrity_validator.py +1111 -1111
- package/core/settings_serializer.py +557 -557
- package/core/settings_validator.py +1823 -1823
- package/core/smart_stats_calculator.py +709 -709
- package/core/statistics_update_manager.py +619 -619
- package/core/stats_config_manager.py +858 -858
- package/core/streaming_text_handler.py +723 -723
- package/core/task_scheduler.py +596 -596
- package/core/update_pattern_library.py +168 -168
- package/core/visibility_monitor.py +596 -596
- package/core/widget_cache.py +498 -498
- package/mcp.json +51 -61
- package/migrate_data.py +127 -0
- package/package.json +64 -57
- package/pomera.py +7883 -7482
- package/pomera_mcp_server.py +183 -144
- package/requirements.txt +33 -0
- package/scripts/Dockerfile.alpine +43 -0
- package/scripts/Dockerfile.gui-test +54 -0
- package/scripts/Dockerfile.linux +43 -0
- package/scripts/Dockerfile.test-linux +80 -0
- package/scripts/Dockerfile.ubuntu +39 -0
- package/scripts/README.md +53 -0
- package/scripts/build-all.bat +113 -0
- package/scripts/build-docker.bat +53 -0
- package/scripts/build-docker.sh +55 -0
- package/scripts/build-optimized.bat +101 -0
- package/scripts/build.sh +78 -0
- package/scripts/docker-compose.test.yml +27 -0
- package/scripts/docker-compose.yml +32 -0
- package/scripts/postinstall.js +62 -0
- package/scripts/requirements-minimal.txt +33 -0
- package/scripts/test-linux-simple.bat +28 -0
- package/scripts/validate-release-workflow.py +450 -0
- package/tools/__init__.py +4 -4
- package/tools/ai_tools.py +2891 -2891
- package/tools/ascii_art_generator.py +352 -352
- package/tools/base64_tools.py +183 -183
- package/tools/base_tool.py +511 -511
- package/tools/case_tool.py +308 -308
- package/tools/column_tools.py +395 -395
- package/tools/cron_tool.py +884 -884
- package/tools/curl_history.py +600 -600
- package/tools/curl_processor.py +1207 -1207
- package/tools/curl_settings.py +502 -502
- package/tools/curl_tool.py +5467 -5467
- package/tools/diff_viewer.py +1817 -1072
- package/tools/email_extraction_tool.py +248 -248
- package/tools/email_header_analyzer.py +425 -425
- package/tools/extraction_tools.py +250 -250
- package/tools/find_replace.py +2289 -1750
- package/tools/folder_file_reporter.py +1463 -1463
- package/tools/folder_file_reporter_adapter.py +480 -480
- package/tools/generator_tools.py +1216 -1216
- package/tools/hash_generator.py +255 -255
- package/tools/html_tool.py +656 -656
- package/tools/jsonxml_tool.py +729 -729
- package/tools/line_tools.py +419 -419
- package/tools/markdown_tools.py +561 -561
- package/tools/mcp_widget.py +1417 -1417
- package/tools/notes_widget.py +978 -973
- package/tools/number_base_converter.py +372 -372
- package/tools/regex_extractor.py +571 -571
- package/tools/slug_generator.py +310 -310
- package/tools/sorter_tools.py +458 -458
- package/tools/string_escape_tool.py +392 -392
- package/tools/text_statistics_tool.py +365 -365
- package/tools/text_wrapper.py +430 -430
- package/tools/timestamp_converter.py +421 -421
- package/tools/tool_loader.py +710 -710
- package/tools/translator_tools.py +522 -522
- package/tools/url_link_extractor.py +261 -261
- package/tools/url_parser.py +204 -204
- package/tools/whitespace_tools.py +355 -355
- package/tools/word_frequency_counter.py +146 -146
- package/core/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/__pycache__/app_context.cpython-313.pyc +0 -0
- package/core/__pycache__/async_text_processor.cpython-313.pyc +0 -0
- package/core/__pycache__/backup_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/backup_recovery_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/content_hash_cache.cpython-313.pyc +0 -0
- package/core/__pycache__/context_menu.cpython-313.pyc +0 -0
- package/core/__pycache__/data_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/database_connection_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_curl_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_promera_ai_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_schema.cpython-313.pyc +0 -0
- package/core/__pycache__/database_schema_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_settings_manager_interface.cpython-313.pyc +0 -0
- package/core/__pycache__/dialog_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/efficient_line_numbers.cpython-313.pyc +0 -0
- package/core/__pycache__/error_handler.cpython-313.pyc +0 -0
- package/core/__pycache__/error_service.cpython-313.pyc +0 -0
- package/core/__pycache__/event_consolidator.cpython-313.pyc +0 -0
- package/core/__pycache__/memory_efficient_text_widget.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_test_suite.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_find_replace.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_pattern_engine.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_search_highlighter.cpython-313.pyc +0 -0
- package/core/__pycache__/performance_monitor.cpython-313.pyc +0 -0
- package/core/__pycache__/persistence_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/progressive_stats_calculator.cpython-313.pyc +0 -0
- package/core/__pycache__/regex_pattern_cache.cpython-313.pyc +0 -0
- package/core/__pycache__/regex_pattern_library.cpython-313.pyc +0 -0
- package/core/__pycache__/search_operation_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_defaults_registry.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_integrity_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_serializer.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/smart_stats_calculator.cpython-313.pyc +0 -0
- package/core/__pycache__/statistics_update_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/stats_config_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/streaming_text_handler.cpython-313.pyc +0 -0
- package/core/__pycache__/task_scheduler.cpython-313.pyc +0 -0
- package/core/__pycache__/visibility_monitor.cpython-313.pyc +0 -0
- package/core/__pycache__/widget_cache.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/protocol.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/schema.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/server_stdio.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/tool_registry.cpython-313.pyc +0 -0
- package/tools/__pycache__/__init__.cpython-313.pyc +0 -0
- package/tools/__pycache__/ai_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/ascii_art_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/base64_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/base_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/case_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/column_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/cron_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_history.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_processor.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_settings.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/diff_viewer.cpython-313.pyc +0 -0
- package/tools/__pycache__/email_extraction_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/email_header_analyzer.cpython-313.pyc +0 -0
- package/tools/__pycache__/extraction_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/find_replace.cpython-313.pyc +0 -0
- package/tools/__pycache__/folder_file_reporter.cpython-313.pyc +0 -0
- package/tools/__pycache__/folder_file_reporter_adapter.cpython-313.pyc +0 -0
- package/tools/__pycache__/generator_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/hash_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/html_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/huggingface_helper.cpython-313.pyc +0 -0
- package/tools/__pycache__/jsonxml_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/line_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/list_comparator.cpython-313.pyc +0 -0
- package/tools/__pycache__/markdown_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/mcp_widget.cpython-313.pyc +0 -0
- package/tools/__pycache__/notes_widget.cpython-313.pyc +0 -0
- package/tools/__pycache__/number_base_converter.cpython-313.pyc +0 -0
- package/tools/__pycache__/regex_extractor.cpython-313.pyc +0 -0
- package/tools/__pycache__/slug_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/sorter_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/string_escape_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/text_statistics_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/text_wrapper.cpython-313.pyc +0 -0
- package/tools/__pycache__/timestamp_converter.cpython-313.pyc +0 -0
- package/tools/__pycache__/tool_loader.cpython-313.pyc +0 -0
- package/tools/__pycache__/translator_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/url_link_extractor.cpython-313.pyc +0 -0
- package/tools/__pycache__/url_parser.cpython-313.pyc +0 -0
- package/tools/__pycache__/whitespace_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/word_frequency_counter.cpython-313.pyc +0 -0
|
@@ -1,1112 +1,1112 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Settings Integrity Validator for Database Migration
|
|
3
|
-
|
|
4
|
-
This module provides comprehensive validation tools for settings integrity,
|
|
5
|
-
ensuring data consistency and detecting corruption or invalid configurations
|
|
6
|
-
across the entire settings system.
|
|
7
|
-
|
|
8
|
-
Features:
|
|
9
|
-
- Deep validation of settings structure and content
|
|
10
|
-
- Cross-reference validation between related settings
|
|
11
|
-
- Tool-specific configuration validation
|
|
12
|
-
- Performance settings validation
|
|
13
|
-
- Tab content integrity checks
|
|
14
|
-
- Encrypted data validation
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
import json
|
|
18
|
-
import re
|
|
19
|
-
import logging
|
|
20
|
-
from typing import Dict, List, Tuple, Any, Optional, Union, Set
|
|
21
|
-
from datetime import datetime
|
|
22
|
-
from dataclasses import dataclass
|
|
23
|
-
from enum import Enum
|
|
24
|
-
from pathlib import Path
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
class IntegrityLevel(Enum):
|
|
28
|
-
"""Levels of integrity validation."""
|
|
29
|
-
BASIC = "basic"
|
|
30
|
-
STANDARD = "standard"
|
|
31
|
-
COMPREHENSIVE = "comprehensive"
|
|
32
|
-
STRICT = "strict"
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
class IntegrityIssueType(Enum):
|
|
36
|
-
"""Types of integrity issues."""
|
|
37
|
-
MISSING_REQUIRED = "missing_required"
|
|
38
|
-
INVALID_TYPE = "invalid_type"
|
|
39
|
-
INVALID_VALUE = "invalid_value"
|
|
40
|
-
INCONSISTENT_DATA = "inconsistent_data"
|
|
41
|
-
CORRUPTED_DATA = "corrupted_data"
|
|
42
|
-
SECURITY_ISSUE = "security_issue"
|
|
43
|
-
PERFORMANCE_ISSUE = "performance_issue"
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
@dataclass
|
|
47
|
-
class IntegrityIssue:
|
|
48
|
-
"""Information about an integrity issue."""
|
|
49
|
-
issue_type: IntegrityIssueType
|
|
50
|
-
severity: str # "low", "medium", "high", "critical"
|
|
51
|
-
message: str
|
|
52
|
-
location: str # Path to the problematic setting
|
|
53
|
-
expected_value: Optional[Any] = None
|
|
54
|
-
actual_value: Optional[Any] = None
|
|
55
|
-
suggestion: Optional[str] = None
|
|
56
|
-
auto_fixable: bool = False
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
class SettingsIntegrityValidator:
|
|
60
|
-
"""
|
|
61
|
-
Comprehensive settings integrity validator.
|
|
62
|
-
|
|
63
|
-
Validates settings data structure, content, and consistency
|
|
64
|
-
to ensure system stability and security.
|
|
65
|
-
"""
|
|
66
|
-
|
|
67
|
-
def __init__(self, validation_level: IntegrityLevel = IntegrityLevel.STANDARD):
|
|
68
|
-
"""
|
|
69
|
-
Initialize the integrity validator.
|
|
70
|
-
|
|
71
|
-
Args:
|
|
72
|
-
validation_level: Level of validation to perform
|
|
73
|
-
"""
|
|
74
|
-
self.validation_level = validation_level
|
|
75
|
-
self.logger = logging.getLogger(__name__)
|
|
76
|
-
|
|
77
|
-
# Validation rules
|
|
78
|
-
self._core_settings_rules = self._initialize_core_settings_rules()
|
|
79
|
-
self._tool_settings_rules = self._initialize_tool_settings_rules()
|
|
80
|
-
self._performance_rules = self._initialize_performance_rules()
|
|
81
|
-
self._security_rules = self._initialize_security_rules()
|
|
82
|
-
|
|
83
|
-
# Known tool configurations
|
|
84
|
-
self._known_tools = {
|
|
85
|
-
'AI Tools', 'Base64 Encoder/Decoder', 'Case Tool', 'Cron Tool', 'Diff Viewer',
|
|
86
|
-
'Email Extraction Tool', 'Email Header Analyzer', 'Find & Replace Text',
|
|
87
|
-
'Folder File Reporter', 'Generator Tools', 'HTML Extraction Tool',
|
|
88
|
-
'JSON/XML Tool', 'Sorter Tools', 'Translator Tools', 'URL Parser',
|
|
89
|
-
'URL and Link Extractor', 'Word Frequency Counter',
|
|
90
|
-
# AI Provider configurations (stored as separate tool settings)
|
|
91
|
-
'Google AI', 'Anthropic AI', 'OpenAI', 'AWS Bedrock', 'Cohere AI',
|
|
92
|
-
'HuggingFace AI', 'Groq AI', 'OpenRouterAI', 'LM Studio'
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
# Validation statistics
|
|
96
|
-
self._validation_stats = {
|
|
97
|
-
'total_checks': 0,
|
|
98
|
-
'issues_found': 0,
|
|
99
|
-
'auto_fixes_applied': 0,
|
|
100
|
-
'validation_time': 0.0
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
def validate_settings_integrity(self, settings_data: Dict[str, Any],
|
|
104
|
-
apply_fixes: bool = False) -> List[IntegrityIssue]:
|
|
105
|
-
"""
|
|
106
|
-
Perform comprehensive integrity validation on settings data.
|
|
107
|
-
|
|
108
|
-
Args:
|
|
109
|
-
settings_data: Settings data to validate
|
|
110
|
-
apply_fixes: Whether to apply automatic fixes
|
|
111
|
-
|
|
112
|
-
Returns:
|
|
113
|
-
List of integrity issues found
|
|
114
|
-
"""
|
|
115
|
-
start_time = datetime.now()
|
|
116
|
-
issues = []
|
|
117
|
-
|
|
118
|
-
try:
|
|
119
|
-
self.logger.info(f"Starting settings integrity validation (level: {self.validation_level.value})")
|
|
120
|
-
|
|
121
|
-
# Core settings validation
|
|
122
|
-
core_issues = self._validate_core_settings(settings_data)
|
|
123
|
-
issues.extend(core_issues)
|
|
124
|
-
|
|
125
|
-
# Tool settings validation
|
|
126
|
-
tool_issues = self._validate_tool_settings(settings_data)
|
|
127
|
-
issues.extend(tool_issues)
|
|
128
|
-
|
|
129
|
-
# Tab content validation
|
|
130
|
-
tab_issues = self._validate_tab_content(settings_data)
|
|
131
|
-
issues.extend(tab_issues)
|
|
132
|
-
|
|
133
|
-
# Performance settings validation
|
|
134
|
-
perf_issues = self._validate_performance_settings(settings_data)
|
|
135
|
-
issues.extend(perf_issues)
|
|
136
|
-
|
|
137
|
-
# Font settings validation
|
|
138
|
-
font_issues = self._validate_font_settings(settings_data)
|
|
139
|
-
issues.extend(font_issues)
|
|
140
|
-
|
|
141
|
-
# Dialog settings validation
|
|
142
|
-
dialog_issues = self._validate_dialog_settings(settings_data)
|
|
143
|
-
issues.extend(dialog_issues)
|
|
144
|
-
|
|
145
|
-
# Cross-reference validation
|
|
146
|
-
if self.validation_level in [IntegrityLevel.COMPREHENSIVE, IntegrityLevel.STRICT]:
|
|
147
|
-
cross_ref_issues = self._validate_cross_references(settings_data)
|
|
148
|
-
issues.extend(cross_ref_issues)
|
|
149
|
-
|
|
150
|
-
# Security validation
|
|
151
|
-
security_issues = self._validate_security(settings_data)
|
|
152
|
-
issues.extend(security_issues)
|
|
153
|
-
|
|
154
|
-
# Apply automatic fixes if requested
|
|
155
|
-
if apply_fixes:
|
|
156
|
-
fixed_count = self._apply_automatic_fixes(settings_data, issues)
|
|
157
|
-
self._validation_stats['auto_fixes_applied'] = fixed_count
|
|
158
|
-
|
|
159
|
-
# Update statistics
|
|
160
|
-
validation_time = (datetime.now() - start_time).total_seconds()
|
|
161
|
-
self._validation_stats.update({
|
|
162
|
-
'total_checks': self._validation_stats['total_checks'] + 1,
|
|
163
|
-
'issues_found': len(issues),
|
|
164
|
-
'validation_time': validation_time
|
|
165
|
-
})
|
|
166
|
-
|
|
167
|
-
# Log summary
|
|
168
|
-
self._log_validation_summary(issues, validation_time)
|
|
169
|
-
|
|
170
|
-
return issues
|
|
171
|
-
|
|
172
|
-
except Exception as e:
|
|
173
|
-
self.logger.error(f"Settings integrity validation failed: {e}")
|
|
174
|
-
return [IntegrityIssue(
|
|
175
|
-
issue_type=IntegrityIssueType.CORRUPTED_DATA,
|
|
176
|
-
severity="critical",
|
|
177
|
-
message=f"Validation process failed: {e}",
|
|
178
|
-
location="validation_process"
|
|
179
|
-
)]
|
|
180
|
-
|
|
181
|
-
def validate_tool_configuration(self, tool_name: str,
|
|
182
|
-
tool_config: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
183
|
-
"""
|
|
184
|
-
Validate a specific tool's configuration.
|
|
185
|
-
|
|
186
|
-
Args:
|
|
187
|
-
tool_name: Name of the tool
|
|
188
|
-
tool_config: Tool configuration data
|
|
189
|
-
|
|
190
|
-
Returns:
|
|
191
|
-
List of integrity issues for the tool
|
|
192
|
-
"""
|
|
193
|
-
issues = []
|
|
194
|
-
|
|
195
|
-
try:
|
|
196
|
-
# Check if tool is known
|
|
197
|
-
if tool_name not in self._known_tools:
|
|
198
|
-
issues.append(IntegrityIssue(
|
|
199
|
-
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
200
|
-
severity="medium",
|
|
201
|
-
message=f"Unknown tool configuration: {tool_name}",
|
|
202
|
-
location=f"tool_settings.{tool_name}",
|
|
203
|
-
suggestion="Verify tool name is correct"
|
|
204
|
-
))
|
|
205
|
-
|
|
206
|
-
# Tool-specific validation
|
|
207
|
-
if tool_name in self._tool_settings_rules:
|
|
208
|
-
rules = self._tool_settings_rules[tool_name]
|
|
209
|
-
tool_issues = self._validate_against_rules(
|
|
210
|
-
tool_config, rules, f"tool_settings.{tool_name}"
|
|
211
|
-
)
|
|
212
|
-
issues.extend(tool_issues)
|
|
213
|
-
|
|
214
|
-
# Special validations for specific tools
|
|
215
|
-
if tool_name == 'cURL Tool':
|
|
216
|
-
curl_issues = self._validate_curl_tool_config(tool_config)
|
|
217
|
-
issues.extend(curl_issues)
|
|
218
|
-
elif tool_name == 'AI Tools':
|
|
219
|
-
ai_issues = self._validate_ai_tools_config(tool_config)
|
|
220
|
-
issues.extend(ai_issues)
|
|
221
|
-
elif tool_name == 'Find & Replace':
|
|
222
|
-
fr_issues = self._validate_find_replace_config(tool_config)
|
|
223
|
-
issues.extend(fr_issues)
|
|
224
|
-
|
|
225
|
-
return issues
|
|
226
|
-
|
|
227
|
-
except Exception as e:
|
|
228
|
-
return [IntegrityIssue(
|
|
229
|
-
issue_type=IntegrityIssueType.CORRUPTED_DATA,
|
|
230
|
-
severity="high",
|
|
231
|
-
message=f"Tool configuration validation failed for {tool_name}: {e}",
|
|
232
|
-
location=f"tool_settings.{tool_name}"
|
|
233
|
-
)]
|
|
234
|
-
|
|
235
|
-
def validate_encrypted_data(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
236
|
-
"""
|
|
237
|
-
Validate encrypted data integrity.
|
|
238
|
-
|
|
239
|
-
Args:
|
|
240
|
-
settings_data: Settings data to check for encrypted values
|
|
241
|
-
|
|
242
|
-
Returns:
|
|
243
|
-
List of integrity issues related to encrypted data
|
|
244
|
-
"""
|
|
245
|
-
issues = []
|
|
246
|
-
|
|
247
|
-
try:
|
|
248
|
-
# Find all encrypted values (those starting with "ENC:")
|
|
249
|
-
encrypted_values = self._find_encrypted_values(settings_data)
|
|
250
|
-
|
|
251
|
-
for location, value in encrypted_values:
|
|
252
|
-
# Validate encryption format
|
|
253
|
-
if not value.startswith("ENC:"):
|
|
254
|
-
issues.append(IntegrityIssue(
|
|
255
|
-
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
256
|
-
severity="high",
|
|
257
|
-
message=f"Invalid encryption format at {location}",
|
|
258
|
-
location=location,
|
|
259
|
-
actual_value=value[:20] + "..." if len(value) > 20 else value,
|
|
260
|
-
suggestion="Ensure encrypted values start with 'ENC:'"
|
|
261
|
-
))
|
|
262
|
-
continue
|
|
263
|
-
|
|
264
|
-
# Extract encrypted data
|
|
265
|
-
encrypted_data = value[4:] # Remove "ENC:" prefix
|
|
266
|
-
|
|
267
|
-
# Validate base64 format
|
|
268
|
-
if not self._is_valid_base64(encrypted_data):
|
|
269
|
-
issues.append(IntegrityIssue(
|
|
270
|
-
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
271
|
-
severity="high",
|
|
272
|
-
message=f"Invalid base64 encoding in encrypted data at {location}",
|
|
273
|
-
location=location,
|
|
274
|
-
suggestion="Re-encrypt the value to fix encoding"
|
|
275
|
-
))
|
|
276
|
-
|
|
277
|
-
# Check for minimum encryption length
|
|
278
|
-
if len(encrypted_data) < 16:
|
|
279
|
-
issues.append(IntegrityIssue(
|
|
280
|
-
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
281
|
-
severity="medium",
|
|
282
|
-
message=f"Encrypted data appears too short at {location}",
|
|
283
|
-
location=location,
|
|
284
|
-
suggestion="Verify encryption was applied correctly"
|
|
285
|
-
))
|
|
286
|
-
|
|
287
|
-
return issues
|
|
288
|
-
|
|
289
|
-
except Exception as e:
|
|
290
|
-
return [IntegrityIssue(
|
|
291
|
-
issue_type=IntegrityIssueType.CORRUPTED_DATA,
|
|
292
|
-
severity="high",
|
|
293
|
-
message=f"Encrypted data validation failed: {e}",
|
|
294
|
-
location="encrypted_data_validation"
|
|
295
|
-
)]
|
|
296
|
-
|
|
297
|
-
def get_validation_report(self, issues: List[IntegrityIssue]) -> Dict[str, Any]:
|
|
298
|
-
"""
|
|
299
|
-
Generate a comprehensive validation report.
|
|
300
|
-
|
|
301
|
-
Args:
|
|
302
|
-
issues: List of integrity issues
|
|
303
|
-
|
|
304
|
-
Returns:
|
|
305
|
-
Validation report dictionary
|
|
306
|
-
"""
|
|
307
|
-
report = {
|
|
308
|
-
'timestamp': datetime.now().isoformat(),
|
|
309
|
-
'validation_level': self.validation_level.value,
|
|
310
|
-
'total_issues': len(issues),
|
|
311
|
-
'issues_by_type': {},
|
|
312
|
-
'issues_by_severity': {},
|
|
313
|
-
'auto_fixable_issues': 0,
|
|
314
|
-
'critical_issues': [],
|
|
315
|
-
'recommendations': [],
|
|
316
|
-
'validation_statistics': self._validation_stats.copy()
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
# Count by type
|
|
320
|
-
for issue_type in IntegrityIssueType:
|
|
321
|
-
count = len([i for i in issues if i.issue_type == issue_type])
|
|
322
|
-
report['issues_by_type'][issue_type.value] = count
|
|
323
|
-
|
|
324
|
-
# Count by severity
|
|
325
|
-
severities = ['low', 'medium', 'high', 'critical']
|
|
326
|
-
for severity in severities:
|
|
327
|
-
count = len([i for i in issues if i.severity == severity])
|
|
328
|
-
report['issues_by_severity'][severity] = count
|
|
329
|
-
|
|
330
|
-
# Count auto-fixable issues
|
|
331
|
-
report['auto_fixable_issues'] = len([i for i in issues if i.auto_fixable])
|
|
332
|
-
|
|
333
|
-
# Critical issues details
|
|
334
|
-
critical_issues = [i for i in issues if i.severity == 'critical']
|
|
335
|
-
report['critical_issues'] = [
|
|
336
|
-
{
|
|
337
|
-
'type': issue.issue_type.value,
|
|
338
|
-
'message': issue.message,
|
|
339
|
-
'location': issue.location,
|
|
340
|
-
'auto_fixable': issue.auto_fixable
|
|
341
|
-
}
|
|
342
|
-
for issue in critical_issues
|
|
343
|
-
]
|
|
344
|
-
|
|
345
|
-
# All issues details for UI display
|
|
346
|
-
report['all_issues'] = [
|
|
347
|
-
{
|
|
348
|
-
'type': issue.issue_type.value,
|
|
349
|
-
'severity': issue.severity,
|
|
350
|
-
'message': issue.message,
|
|
351
|
-
'location': issue.location,
|
|
352
|
-
'auto_fixable': issue.auto_fixable,
|
|
353
|
-
'suggestion': issue.suggestion,
|
|
354
|
-
'expected_value': issue.expected_value,
|
|
355
|
-
'actual_value': issue.actual_value
|
|
356
|
-
}
|
|
357
|
-
for issue in issues
|
|
358
|
-
]
|
|
359
|
-
|
|
360
|
-
# Generate recommendations
|
|
361
|
-
report['recommendations'] = self._generate_recommendations(issues)
|
|
362
|
-
|
|
363
|
-
return report
|
|
364
|
-
|
|
365
|
-
# Private validation methods
|
|
366
|
-
|
|
367
|
-
def _validate_core_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
368
|
-
"""Validate core application settings."""
|
|
369
|
-
issues = []
|
|
370
|
-
|
|
371
|
-
# Check required core settings
|
|
372
|
-
required_settings = [
|
|
373
|
-
'export_path', 'debug_level', 'selected_tool',
|
|
374
|
-
'active_input_tab', 'active_output_tab'
|
|
375
|
-
]
|
|
376
|
-
|
|
377
|
-
for setting in required_settings:
|
|
378
|
-
if setting not in settings_data:
|
|
379
|
-
issues.append(IntegrityIssue(
|
|
380
|
-
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
381
|
-
severity="critical",
|
|
382
|
-
message=f"Required core setting '{setting}' is missing",
|
|
383
|
-
location=setting,
|
|
384
|
-
suggestion=f"Add default value for {setting}",
|
|
385
|
-
auto_fixable=True
|
|
386
|
-
))
|
|
387
|
-
|
|
388
|
-
# Validate core settings against rules
|
|
389
|
-
for setting, value in settings_data.items():
|
|
390
|
-
if setting in self._core_settings_rules:
|
|
391
|
-
rules = self._core_settings_rules[setting]
|
|
392
|
-
setting_issues = self._validate_against_rules(
|
|
393
|
-
{setting: value}, {setting: rules}, setting
|
|
394
|
-
)
|
|
395
|
-
issues.extend(setting_issues)
|
|
396
|
-
|
|
397
|
-
return issues
|
|
398
|
-
|
|
399
|
-
def _validate_tool_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
400
|
-
"""Validate tool settings structure and content."""
|
|
401
|
-
issues = []
|
|
402
|
-
|
|
403
|
-
tool_settings = settings_data.get('tool_settings', {})
|
|
404
|
-
|
|
405
|
-
if not isinstance(tool_settings, dict):
|
|
406
|
-
issues.append(IntegrityIssue(
|
|
407
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
408
|
-
severity="critical",
|
|
409
|
-
message="tool_settings must be a dictionary",
|
|
410
|
-
location="tool_settings",
|
|
411
|
-
expected_value="dict",
|
|
412
|
-
actual_value=type(tool_settings).__name__,
|
|
413
|
-
auto_fixable=True
|
|
414
|
-
))
|
|
415
|
-
return issues
|
|
416
|
-
|
|
417
|
-
# Validate each tool's configuration
|
|
418
|
-
for tool_name, tool_config in tool_settings.items():
|
|
419
|
-
tool_issues = self.validate_tool_configuration(tool_name, tool_config)
|
|
420
|
-
issues.extend(tool_issues)
|
|
421
|
-
|
|
422
|
-
return issues
|
|
423
|
-
|
|
424
|
-
def _validate_tab_content(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
425
|
-
"""Validate tab content arrays."""
|
|
426
|
-
issues = []
|
|
427
|
-
|
|
428
|
-
for tab_type in ['input_tabs', 'output_tabs']:
|
|
429
|
-
if tab_type not in settings_data:
|
|
430
|
-
issues.append(IntegrityIssue(
|
|
431
|
-
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
432
|
-
severity="high",
|
|
433
|
-
message=f"Required tab array '{tab_type}' is missing",
|
|
434
|
-
location=tab_type,
|
|
435
|
-
suggestion=f"Add empty {tab_type} array with 7 elements",
|
|
436
|
-
auto_fixable=True
|
|
437
|
-
))
|
|
438
|
-
continue
|
|
439
|
-
|
|
440
|
-
tabs = settings_data[tab_type]
|
|
441
|
-
|
|
442
|
-
if not isinstance(tabs, list):
|
|
443
|
-
issues.append(IntegrityIssue(
|
|
444
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
445
|
-
severity="high",
|
|
446
|
-
message=f"{tab_type} must be a list",
|
|
447
|
-
location=tab_type,
|
|
448
|
-
expected_value="list",
|
|
449
|
-
actual_value=type(tabs).__name__,
|
|
450
|
-
auto_fixable=True
|
|
451
|
-
))
|
|
452
|
-
continue
|
|
453
|
-
|
|
454
|
-
if len(tabs) != 7:
|
|
455
|
-
issues.append(IntegrityIssue(
|
|
456
|
-
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
457
|
-
severity="high",
|
|
458
|
-
message=f"{tab_type} must have exactly 7 elements",
|
|
459
|
-
location=tab_type,
|
|
460
|
-
expected_value=7,
|
|
461
|
-
actual_value=len(tabs),
|
|
462
|
-
suggestion="Resize array to 7 elements",
|
|
463
|
-
auto_fixable=True
|
|
464
|
-
))
|
|
465
|
-
|
|
466
|
-
# Validate each tab content
|
|
467
|
-
for i, content in enumerate(tabs):
|
|
468
|
-
if not isinstance(content, str):
|
|
469
|
-
issues.append(IntegrityIssue(
|
|
470
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
471
|
-
severity="medium",
|
|
472
|
-
message=f"{tab_type}[{i}] must be a string",
|
|
473
|
-
location=f"{tab_type}[{i}]",
|
|
474
|
-
expected_value="string",
|
|
475
|
-
actual_value=type(content).__name__,
|
|
476
|
-
auto_fixable=True
|
|
477
|
-
))
|
|
478
|
-
|
|
479
|
-
return issues
|
|
480
|
-
|
|
481
|
-
def _validate_performance_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
482
|
-
"""Validate performance settings."""
|
|
483
|
-
issues = []
|
|
484
|
-
|
|
485
|
-
perf_settings = settings_data.get('performance_settings', {})
|
|
486
|
-
|
|
487
|
-
if not isinstance(perf_settings, dict):
|
|
488
|
-
issues.append(IntegrityIssue(
|
|
489
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
490
|
-
severity="medium",
|
|
491
|
-
message="performance_settings must be a dictionary",
|
|
492
|
-
location="performance_settings",
|
|
493
|
-
expected_value="dict",
|
|
494
|
-
actual_value=type(perf_settings).__name__,
|
|
495
|
-
auto_fixable=True
|
|
496
|
-
))
|
|
497
|
-
return issues
|
|
498
|
-
|
|
499
|
-
# Validate against performance rules
|
|
500
|
-
for category, rules in self._performance_rules.items():
|
|
501
|
-
if category in perf_settings:
|
|
502
|
-
category_issues = self._validate_against_rules(
|
|
503
|
-
perf_settings[category], rules, f"performance_settings.{category}"
|
|
504
|
-
)
|
|
505
|
-
issues.extend(category_issues)
|
|
506
|
-
|
|
507
|
-
return issues
|
|
508
|
-
|
|
509
|
-
def _validate_font_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
510
|
-
"""Validate font settings."""
|
|
511
|
-
issues = []
|
|
512
|
-
|
|
513
|
-
font_settings = settings_data.get('font_settings', {})
|
|
514
|
-
|
|
515
|
-
if not isinstance(font_settings, dict):
|
|
516
|
-
issues.append(IntegrityIssue(
|
|
517
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
518
|
-
severity="low",
|
|
519
|
-
message="font_settings must be a dictionary",
|
|
520
|
-
location="font_settings",
|
|
521
|
-
expected_value="dict",
|
|
522
|
-
actual_value=type(font_settings).__name__,
|
|
523
|
-
auto_fixable=True
|
|
524
|
-
))
|
|
525
|
-
return issues
|
|
526
|
-
|
|
527
|
-
# Validate font configurations
|
|
528
|
-
for font_type, font_config in font_settings.items():
|
|
529
|
-
if not isinstance(font_config, dict):
|
|
530
|
-
issues.append(IntegrityIssue(
|
|
531
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
532
|
-
severity="low",
|
|
533
|
-
message=f"font_settings.{font_type} must be a dictionary",
|
|
534
|
-
location=f"font_settings.{font_type}",
|
|
535
|
-
expected_value="dict",
|
|
536
|
-
actual_value=type(font_config).__name__,
|
|
537
|
-
auto_fixable=True
|
|
538
|
-
))
|
|
539
|
-
continue
|
|
540
|
-
|
|
541
|
-
# Validate font properties
|
|
542
|
-
if 'family' in font_config:
|
|
543
|
-
if not isinstance(font_config['family'], str) or not font_config['family'].strip():
|
|
544
|
-
issues.append(IntegrityIssue(
|
|
545
|
-
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
546
|
-
severity="medium",
|
|
547
|
-
message=f"Font family for {font_type} must be a non-empty string",
|
|
548
|
-
location=f"font_settings.{font_type}.family",
|
|
549
|
-
actual_value=font_config['family'],
|
|
550
|
-
auto_fixable=True
|
|
551
|
-
))
|
|
552
|
-
|
|
553
|
-
if 'size' in font_config:
|
|
554
|
-
size = font_config['size']
|
|
555
|
-
if not isinstance(size, int) or size < 6 or size > 72:
|
|
556
|
-
issues.append(IntegrityIssue(
|
|
557
|
-
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
558
|
-
severity="medium",
|
|
559
|
-
message=f"Font size for {font_type} must be between 6 and 72",
|
|
560
|
-
location=f"font_settings.{font_type}.size",
|
|
561
|
-
expected_value="6-72",
|
|
562
|
-
actual_value=size,
|
|
563
|
-
auto_fixable=True
|
|
564
|
-
))
|
|
565
|
-
|
|
566
|
-
return issues
|
|
567
|
-
|
|
568
|
-
def _validate_dialog_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
569
|
-
"""Validate dialog settings."""
|
|
570
|
-
issues = []
|
|
571
|
-
|
|
572
|
-
dialog_settings = settings_data.get('dialog_settings', {})
|
|
573
|
-
|
|
574
|
-
if not isinstance(dialog_settings, dict):
|
|
575
|
-
issues.append(IntegrityIssue(
|
|
576
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
577
|
-
severity="low",
|
|
578
|
-
message="dialog_settings must be a dictionary",
|
|
579
|
-
location="dialog_settings",
|
|
580
|
-
expected_value="dict",
|
|
581
|
-
actual_value=type(dialog_settings).__name__,
|
|
582
|
-
auto_fixable=True
|
|
583
|
-
))
|
|
584
|
-
return issues
|
|
585
|
-
|
|
586
|
-
# Validate dialog categories
|
|
587
|
-
valid_categories = ['success', 'confirmation', 'warning', 'error']
|
|
588
|
-
|
|
589
|
-
for category, config in dialog_settings.items():
|
|
590
|
-
if category not in valid_categories:
|
|
591
|
-
issues.append(IntegrityIssue(
|
|
592
|
-
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
593
|
-
severity="low",
|
|
594
|
-
message=f"Unknown dialog category: {category}",
|
|
595
|
-
location=f"dialog_settings.{category}",
|
|
596
|
-
suggestion=f"Valid categories: {', '.join(valid_categories)}"
|
|
597
|
-
))
|
|
598
|
-
continue
|
|
599
|
-
|
|
600
|
-
if not isinstance(config, dict):
|
|
601
|
-
issues.append(IntegrityIssue(
|
|
602
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
603
|
-
severity="low",
|
|
604
|
-
message=f"dialog_settings.{category} must be a dictionary",
|
|
605
|
-
location=f"dialog_settings.{category}",
|
|
606
|
-
expected_value="dict",
|
|
607
|
-
actual_value=type(config).__name__,
|
|
608
|
-
auto_fixable=True
|
|
609
|
-
))
|
|
610
|
-
continue
|
|
611
|
-
|
|
612
|
-
# Validate enabled property
|
|
613
|
-
if 'enabled' in config:
|
|
614
|
-
if not isinstance(config['enabled'], bool):
|
|
615
|
-
issues.append(IntegrityIssue(
|
|
616
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
617
|
-
severity="medium",
|
|
618
|
-
message=f"dialog_settings.{category}.enabled must be boolean",
|
|
619
|
-
location=f"dialog_settings.{category}.enabled",
|
|
620
|
-
expected_value="bool",
|
|
621
|
-
actual_value=type(config['enabled']).__name__,
|
|
622
|
-
auto_fixable=True
|
|
623
|
-
))
|
|
624
|
-
|
|
625
|
-
# Check if error dialogs are locked (cannot be disabled)
|
|
626
|
-
if category == 'error' and config.get('enabled') is False:
|
|
627
|
-
issues.append(IntegrityIssue(
|
|
628
|
-
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
629
|
-
severity="high",
|
|
630
|
-
message="Error dialogs cannot be disabled for security reasons",
|
|
631
|
-
location="dialog_settings.error.enabled",
|
|
632
|
-
expected_value=True,
|
|
633
|
-
actual_value=False,
|
|
634
|
-
suggestion="Error dialogs must remain enabled",
|
|
635
|
-
auto_fixable=True
|
|
636
|
-
))
|
|
637
|
-
|
|
638
|
-
return issues
|
|
639
|
-
|
|
640
|
-
def _validate_cross_references(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
641
|
-
"""Validate cross-references between settings."""
|
|
642
|
-
issues = []
|
|
643
|
-
|
|
644
|
-
# Validate active tab indices
|
|
645
|
-
active_input_tab = settings_data.get('active_input_tab', 0)
|
|
646
|
-
active_output_tab = settings_data.get('active_output_tab', 0)
|
|
647
|
-
|
|
648
|
-
input_tabs = settings_data.get('input_tabs', [])
|
|
649
|
-
output_tabs = settings_data.get('output_tabs', [])
|
|
650
|
-
|
|
651
|
-
if isinstance(input_tabs, list) and (active_input_tab < 0 or active_input_tab >= len(input_tabs)):
|
|
652
|
-
issues.append(IntegrityIssue(
|
|
653
|
-
issue_type=IntegrityIssueType.INCONSISTENT_DATA,
|
|
654
|
-
severity="medium",
|
|
655
|
-
message=f"active_input_tab ({active_input_tab}) is out of range for input_tabs length ({len(input_tabs)})",
|
|
656
|
-
location="active_input_tab",
|
|
657
|
-
suggestion="Set active_input_tab to a valid index",
|
|
658
|
-
auto_fixable=True
|
|
659
|
-
))
|
|
660
|
-
|
|
661
|
-
if isinstance(output_tabs, list) and (active_output_tab < 0 or active_output_tab >= len(output_tabs)):
|
|
662
|
-
issues.append(IntegrityIssue(
|
|
663
|
-
issue_type=IntegrityIssueType.INCONSISTENT_DATA,
|
|
664
|
-
severity="medium",
|
|
665
|
-
message=f"active_output_tab ({active_output_tab}) is out of range for output_tabs length ({len(output_tabs)})",
|
|
666
|
-
location="active_output_tab",
|
|
667
|
-
suggestion="Set active_output_tab to a valid index",
|
|
668
|
-
auto_fixable=True
|
|
669
|
-
))
|
|
670
|
-
|
|
671
|
-
# Validate selected tool exists in tool_settings
|
|
672
|
-
selected_tool = settings_data.get('selected_tool')
|
|
673
|
-
tool_settings = settings_data.get('tool_settings', {})
|
|
674
|
-
|
|
675
|
-
if selected_tool and selected_tool not in tool_settings:
|
|
676
|
-
issues.append(IntegrityIssue(
|
|
677
|
-
issue_type=IntegrityIssueType.INCONSISTENT_DATA,
|
|
678
|
-
severity="medium",
|
|
679
|
-
message=f"Selected tool '{selected_tool}' not found in tool_settings",
|
|
680
|
-
location="selected_tool",
|
|
681
|
-
suggestion="Add configuration for selected tool or change selection",
|
|
682
|
-
auto_fixable=False
|
|
683
|
-
))
|
|
684
|
-
|
|
685
|
-
return issues
|
|
686
|
-
|
|
687
|
-
def _validate_security(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
688
|
-
"""Validate security-related settings."""
|
|
689
|
-
issues = []
|
|
690
|
-
|
|
691
|
-
# Validate encrypted data
|
|
692
|
-
encrypted_issues = self.validate_encrypted_data(settings_data)
|
|
693
|
-
issues.extend(encrypted_issues)
|
|
694
|
-
|
|
695
|
-
# Check for potential security issues in tool settings
|
|
696
|
-
tool_settings = settings_data.get('tool_settings', {})
|
|
697
|
-
|
|
698
|
-
for tool_name, tool_config in tool_settings.items():
|
|
699
|
-
if isinstance(tool_config, dict):
|
|
700
|
-
# Check for API keys that should be encrypted
|
|
701
|
-
for key, value in tool_config.items():
|
|
702
|
-
if 'api_key' in key.lower() or 'password' in key.lower() or 'token' in key.lower():
|
|
703
|
-
if isinstance(value, str) and not value.startswith('ENC:') and value.strip():
|
|
704
|
-
issues.append(IntegrityIssue(
|
|
705
|
-
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
706
|
-
severity="high",
|
|
707
|
-
message=f"Sensitive data '{key}' in {tool_name} should be encrypted",
|
|
708
|
-
location=f"tool_settings.{tool_name}.{key}",
|
|
709
|
-
suggestion="Encrypt sensitive values with 'ENC:' prefix"
|
|
710
|
-
))
|
|
711
|
-
|
|
712
|
-
return issues
|
|
713
|
-
|
|
714
|
-
def _validate_against_rules(self, data: Dict[str, Any], rules: Dict[str, Any],
|
|
715
|
-
location_prefix: str) -> List[IntegrityIssue]:
|
|
716
|
-
"""Validate data against a set of rules."""
|
|
717
|
-
issues = []
|
|
718
|
-
|
|
719
|
-
for key, rule in rules.items():
|
|
720
|
-
if key not in data:
|
|
721
|
-
if rule.get('required', False):
|
|
722
|
-
issues.append(IntegrityIssue(
|
|
723
|
-
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
724
|
-
severity=rule.get('severity', 'medium'),
|
|
725
|
-
message=f"Required setting '{key}' is missing",
|
|
726
|
-
location=f"{location_prefix}.{key}",
|
|
727
|
-
suggestion=f"Add {key} with appropriate value",
|
|
728
|
-
auto_fixable=rule.get('auto_fixable', False)
|
|
729
|
-
))
|
|
730
|
-
continue
|
|
731
|
-
|
|
732
|
-
value = data[key]
|
|
733
|
-
|
|
734
|
-
# Type validation
|
|
735
|
-
if 'type' in rule:
|
|
736
|
-
expected_type = rule['type']
|
|
737
|
-
if not isinstance(value, expected_type):
|
|
738
|
-
issues.append(IntegrityIssue(
|
|
739
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
740
|
-
severity=rule.get('severity', 'medium'),
|
|
741
|
-
message=f"Setting '{key}' has wrong type",
|
|
742
|
-
location=f"{location_prefix}.{key}",
|
|
743
|
-
expected_value=expected_type.__name__,
|
|
744
|
-
actual_value=type(value).__name__,
|
|
745
|
-
auto_fixable=rule.get('auto_fixable', False)
|
|
746
|
-
))
|
|
747
|
-
|
|
748
|
-
# Range validation for numeric types
|
|
749
|
-
if isinstance(value, (int, float)):
|
|
750
|
-
if 'min' in rule and value < rule['min']:
|
|
751
|
-
issues.append(IntegrityIssue(
|
|
752
|
-
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
753
|
-
severity=rule.get('severity', 'medium'),
|
|
754
|
-
message=f"Setting '{key}' value {value} is below minimum {rule['min']}",
|
|
755
|
-
location=f"{location_prefix}.{key}",
|
|
756
|
-
expected_value=f">= {rule['min']}",
|
|
757
|
-
actual_value=value,
|
|
758
|
-
auto_fixable=rule.get('auto_fixable', False)
|
|
759
|
-
))
|
|
760
|
-
|
|
761
|
-
if 'max' in rule and value > rule['max']:
|
|
762
|
-
issues.append(IntegrityIssue(
|
|
763
|
-
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
764
|
-
severity=rule.get('severity', 'medium'),
|
|
765
|
-
message=f"Setting '{key}' value {value} is above maximum {rule['max']}",
|
|
766
|
-
location=f"{location_prefix}.{key}",
|
|
767
|
-
expected_value=f"<= {rule['max']}",
|
|
768
|
-
actual_value=value,
|
|
769
|
-
auto_fixable=rule.get('auto_fixable', False)
|
|
770
|
-
))
|
|
771
|
-
|
|
772
|
-
# Pattern validation for strings
|
|
773
|
-
if isinstance(value, str) and 'pattern' in rule:
|
|
774
|
-
if not re.match(rule['pattern'], value):
|
|
775
|
-
issues.append(IntegrityIssue(
|
|
776
|
-
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
777
|
-
severity=rule.get('severity', 'medium'),
|
|
778
|
-
message=f"Setting '{key}' value doesn't match expected pattern",
|
|
779
|
-
location=f"{location_prefix}.{key}",
|
|
780
|
-
actual_value=value,
|
|
781
|
-
suggestion=rule.get('pattern_description', 'Check value format'),
|
|
782
|
-
auto_fixable=rule.get('auto_fixable', False)
|
|
783
|
-
))
|
|
784
|
-
|
|
785
|
-
# Enum validation
|
|
786
|
-
if 'enum' in rule and value not in rule['enum']:
|
|
787
|
-
issues.append(IntegrityIssue(
|
|
788
|
-
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
789
|
-
severity=rule.get('severity', 'medium'),
|
|
790
|
-
message=f"Setting '{key}' has invalid value",
|
|
791
|
-
location=f"{location_prefix}.{key}",
|
|
792
|
-
expected_value=f"One of: {rule['enum']}",
|
|
793
|
-
actual_value=value,
|
|
794
|
-
auto_fixable=rule.get('auto_fixable', False)
|
|
795
|
-
))
|
|
796
|
-
|
|
797
|
-
return issues
|
|
798
|
-
|
|
799
|
-
# Tool-specific validation methods
|
|
800
|
-
|
|
801
|
-
def _validate_curl_tool_config(self, config: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
802
|
-
"""Validate cURL tool specific configuration."""
|
|
803
|
-
issues = []
|
|
804
|
-
|
|
805
|
-
# Validate history array if present
|
|
806
|
-
if 'history' in config:
|
|
807
|
-
history = config['history']
|
|
808
|
-
if isinstance(history, list):
|
|
809
|
-
for i, entry in enumerate(history):
|
|
810
|
-
if not isinstance(entry, dict):
|
|
811
|
-
issues.append(IntegrityIssue(
|
|
812
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
813
|
-
severity="low",
|
|
814
|
-
message=f"cURL history entry {i} must be a dictionary",
|
|
815
|
-
location=f"tool_settings.cURL Tool.history[{i}]",
|
|
816
|
-
auto_fixable=True
|
|
817
|
-
))
|
|
818
|
-
continue
|
|
819
|
-
|
|
820
|
-
# Validate required history fields
|
|
821
|
-
required_fields = ['timestamp', 'method', 'url', 'status_code']
|
|
822
|
-
for field in required_fields:
|
|
823
|
-
if field not in entry:
|
|
824
|
-
issues.append(IntegrityIssue(
|
|
825
|
-
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
826
|
-
severity="low",
|
|
827
|
-
message=f"cURL history entry {i} missing required field '{field}'",
|
|
828
|
-
location=f"tool_settings.cURL Tool.history[{i}].{field}",
|
|
829
|
-
auto_fixable=True
|
|
830
|
-
))
|
|
831
|
-
|
|
832
|
-
return issues
|
|
833
|
-
|
|
834
|
-
def _validate_ai_tools_config(self, config: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
835
|
-
"""Validate AI tools configuration."""
|
|
836
|
-
issues = []
|
|
837
|
-
|
|
838
|
-
# Check for API key encryption
|
|
839
|
-
if 'API_KEY' in config:
|
|
840
|
-
api_key = config['API_KEY']
|
|
841
|
-
if isinstance(api_key, str) and api_key.strip() and not api_key.startswith('ENC:'):
|
|
842
|
-
issues.append(IntegrityIssue(
|
|
843
|
-
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
844
|
-
severity="high",
|
|
845
|
-
message="AI Tools API key should be encrypted",
|
|
846
|
-
location="tool_settings.AI Tools.API_KEY",
|
|
847
|
-
suggestion="Encrypt API key with 'ENC:' prefix"
|
|
848
|
-
))
|
|
849
|
-
|
|
850
|
-
# Validate model list
|
|
851
|
-
if 'MODELS_LIST' in config:
|
|
852
|
-
models = config['MODELS_LIST']
|
|
853
|
-
if not isinstance(models, list):
|
|
854
|
-
issues.append(IntegrityIssue(
|
|
855
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
856
|
-
severity="medium",
|
|
857
|
-
message="AI Tools MODELS_LIST must be an array",
|
|
858
|
-
location="tool_settings.AI Tools.MODELS_LIST",
|
|
859
|
-
expected_value="array",
|
|
860
|
-
actual_value=type(models).__name__,
|
|
861
|
-
auto_fixable=True
|
|
862
|
-
))
|
|
863
|
-
|
|
864
|
-
return issues
|
|
865
|
-
|
|
866
|
-
def _validate_find_replace_config(self, config: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
867
|
-
"""Validate Find & Replace tool configuration."""
|
|
868
|
-
issues = []
|
|
869
|
-
|
|
870
|
-
# Validate pattern library if present
|
|
871
|
-
if 'pattern_library' in config:
|
|
872
|
-
patterns = config['pattern_library']
|
|
873
|
-
if isinstance(patterns, list):
|
|
874
|
-
for i, pattern in enumerate(patterns):
|
|
875
|
-
if not isinstance(pattern, dict):
|
|
876
|
-
issues.append(IntegrityIssue(
|
|
877
|
-
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
878
|
-
severity="low",
|
|
879
|
-
message=f"Pattern library entry {i} must be a dictionary",
|
|
880
|
-
location=f"tool_settings.Find & Replace.pattern_library[{i}]",
|
|
881
|
-
auto_fixable=True
|
|
882
|
-
))
|
|
883
|
-
continue
|
|
884
|
-
|
|
885
|
-
# Validate pattern structure
|
|
886
|
-
if 'pattern' not in pattern:
|
|
887
|
-
issues.append(IntegrityIssue(
|
|
888
|
-
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
889
|
-
severity="medium",
|
|
890
|
-
message=f"Pattern library entry {i} missing 'pattern' field",
|
|
891
|
-
location=f"tool_settings.Find & Replace.pattern_library[{i}].pattern",
|
|
892
|
-
auto_fixable=True
|
|
893
|
-
))
|
|
894
|
-
|
|
895
|
-
return issues
|
|
896
|
-
|
|
897
|
-
# Helper methods
|
|
898
|
-
|
|
899
|
-
def _find_encrypted_values(self, data: Any, path: str = "") -> List[Tuple[str, str]]:
|
|
900
|
-
"""Find all encrypted values in the data structure."""
|
|
901
|
-
encrypted_values = []
|
|
902
|
-
|
|
903
|
-
if isinstance(data, dict):
|
|
904
|
-
for key, value in data.items():
|
|
905
|
-
current_path = f"{path}.{key}" if path else key
|
|
906
|
-
if isinstance(value, str) and value.startswith("ENC:"):
|
|
907
|
-
encrypted_values.append((current_path, value))
|
|
908
|
-
elif isinstance(value, (dict, list)):
|
|
909
|
-
encrypted_values.extend(self._find_encrypted_values(value, current_path))
|
|
910
|
-
elif isinstance(data, list):
|
|
911
|
-
for i, item in enumerate(data):
|
|
912
|
-
current_path = f"{path}[{i}]"
|
|
913
|
-
if isinstance(item, str) and item.startswith("ENC:"):
|
|
914
|
-
encrypted_values.append((current_path, item))
|
|
915
|
-
elif isinstance(item, (dict, list)):
|
|
916
|
-
encrypted_values.extend(self._find_encrypted_values(item, current_path))
|
|
917
|
-
|
|
918
|
-
return encrypted_values
|
|
919
|
-
|
|
920
|
-
def _is_valid_base64(self, data: str) -> bool:
|
|
921
|
-
"""Check if string is valid base64."""
|
|
922
|
-
import base64
|
|
923
|
-
try:
|
|
924
|
-
base64.b64decode(data, validate=True)
|
|
925
|
-
return True
|
|
926
|
-
except Exception:
|
|
927
|
-
return False
|
|
928
|
-
|
|
929
|
-
def _apply_automatic_fixes(self, settings_data: Dict[str, Any],
|
|
930
|
-
issues: List[IntegrityIssue]) -> int:
|
|
931
|
-
"""Apply automatic fixes to settings data."""
|
|
932
|
-
fixed_count = 0
|
|
933
|
-
|
|
934
|
-
for issue in issues:
|
|
935
|
-
if not issue.auto_fixable:
|
|
936
|
-
continue
|
|
937
|
-
|
|
938
|
-
try:
|
|
939
|
-
# Apply fix based on issue type
|
|
940
|
-
if issue.issue_type == IntegrityIssueType.MISSING_REQUIRED:
|
|
941
|
-
fixed_count += self._fix_missing_required(settings_data, issue)
|
|
942
|
-
elif issue.issue_type == IntegrityIssueType.INVALID_TYPE:
|
|
943
|
-
fixed_count += self._fix_invalid_type(settings_data, issue)
|
|
944
|
-
elif issue.issue_type == IntegrityIssueType.INVALID_VALUE:
|
|
945
|
-
fixed_count += self._fix_invalid_value(settings_data, issue)
|
|
946
|
-
|
|
947
|
-
except Exception as e:
|
|
948
|
-
self.logger.warning(f"Failed to apply automatic fix for {issue.location}: {e}")
|
|
949
|
-
|
|
950
|
-
return fixed_count
|
|
951
|
-
|
|
952
|
-
def _fix_missing_required(self, settings_data: Dict[str, Any],
|
|
953
|
-
issue: IntegrityIssue) -> int:
|
|
954
|
-
"""Fix missing required settings."""
|
|
955
|
-
# Implementation would add default values for missing required settings
|
|
956
|
-
return 0
|
|
957
|
-
|
|
958
|
-
def _fix_invalid_type(self, settings_data: Dict[str, Any],
|
|
959
|
-
issue: IntegrityIssue) -> int:
|
|
960
|
-
"""Fix invalid type issues."""
|
|
961
|
-
# Implementation would convert values to correct types
|
|
962
|
-
return 0
|
|
963
|
-
|
|
964
|
-
def _fix_invalid_value(self, settings_data: Dict[str, Any],
|
|
965
|
-
issue: IntegrityIssue) -> int:
|
|
966
|
-
"""Fix invalid value issues."""
|
|
967
|
-
# Implementation would correct invalid values
|
|
968
|
-
return 0
|
|
969
|
-
|
|
970
|
-
def _initialize_core_settings_rules(self) -> Dict[str, Any]:
|
|
971
|
-
"""Initialize validation rules for core settings."""
|
|
972
|
-
return {
|
|
973
|
-
'export_path': {
|
|
974
|
-
'type': str,
|
|
975
|
-
'required': True,
|
|
976
|
-
'pattern': r'^.+$', # Non-empty
|
|
977
|
-
'severity': 'critical',
|
|
978
|
-
'auto_fixable': True
|
|
979
|
-
},
|
|
980
|
-
'debug_level': {
|
|
981
|
-
'type': str,
|
|
982
|
-
'required': True,
|
|
983
|
-
'enum': ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
|
|
984
|
-
'severity': 'high',
|
|
985
|
-
'auto_fixable': True
|
|
986
|
-
},
|
|
987
|
-
'selected_tool': {
|
|
988
|
-
'type': str,
|
|
989
|
-
'required': True,
|
|
990
|
-
'pattern': r'^.+$', # Non-empty
|
|
991
|
-
'severity': 'medium',
|
|
992
|
-
'auto_fixable': True
|
|
993
|
-
},
|
|
994
|
-
'active_input_tab': {
|
|
995
|
-
'type': int,
|
|
996
|
-
'required': True,
|
|
997
|
-
'min': 0,
|
|
998
|
-
'max': 6,
|
|
999
|
-
'severity': 'medium',
|
|
1000
|
-
'auto_fixable': True
|
|
1001
|
-
},
|
|
1002
|
-
'active_output_tab': {
|
|
1003
|
-
'type': int,
|
|
1004
|
-
'required': True,
|
|
1005
|
-
'min': 0,
|
|
1006
|
-
'max': 6,
|
|
1007
|
-
'severity': 'medium',
|
|
1008
|
-
'auto_fixable': True
|
|
1009
|
-
}
|
|
1010
|
-
}
|
|
1011
|
-
|
|
1012
|
-
def _initialize_tool_settings_rules(self) -> Dict[str, Any]:
|
|
1013
|
-
"""Initialize validation rules for tool settings."""
|
|
1014
|
-
return {
|
|
1015
|
-
'cURL Tool': {
|
|
1016
|
-
'default_timeout': {
|
|
1017
|
-
'type': int,
|
|
1018
|
-
'min': 1,
|
|
1019
|
-
'max': 3600,
|
|
1020
|
-
'severity': 'medium',
|
|
1021
|
-
'auto_fixable': True
|
|
1022
|
-
},
|
|
1023
|
-
'follow_redirects': {
|
|
1024
|
-
'type': bool,
|
|
1025
|
-
'severity': 'low',
|
|
1026
|
-
'auto_fixable': True
|
|
1027
|
-
},
|
|
1028
|
-
'verify_ssl': {
|
|
1029
|
-
'type': bool,
|
|
1030
|
-
'severity': 'low',
|
|
1031
|
-
'auto_fixable': True
|
|
1032
|
-
},
|
|
1033
|
-
'max_redirects': {
|
|
1034
|
-
'type': int,
|
|
1035
|
-
'min': 0,
|
|
1036
|
-
'max': 50,
|
|
1037
|
-
'severity': 'medium',
|
|
1038
|
-
'auto_fixable': True
|
|
1039
|
-
}
|
|
1040
|
-
}
|
|
1041
|
-
}
|
|
1042
|
-
|
|
1043
|
-
def _initialize_performance_rules(self) -> Dict[str, Any]:
|
|
1044
|
-
"""Initialize validation rules for performance settings."""
|
|
1045
|
-
return {
|
|
1046
|
-
'async_processing': {
|
|
1047
|
-
'enabled': {
|
|
1048
|
-
'type': bool,
|
|
1049
|
-
'severity': 'low',
|
|
1050
|
-
'auto_fixable': True
|
|
1051
|
-
},
|
|
1052
|
-
'threshold_kb': {
|
|
1053
|
-
'type': int,
|
|
1054
|
-
'min': 1,
|
|
1055
|
-
'max': 10000,
|
|
1056
|
-
'severity': 'low',
|
|
1057
|
-
'auto_fixable': True
|
|
1058
|
-
}
|
|
1059
|
-
}
|
|
1060
|
-
}
|
|
1061
|
-
|
|
1062
|
-
def _initialize_security_rules(self) -> Dict[str, Any]:
|
|
1063
|
-
"""Initialize security validation rules."""
|
|
1064
|
-
return {
|
|
1065
|
-
'encrypted_fields': [
|
|
1066
|
-
'api_key', 'password', 'token', 'secret', 'key'
|
|
1067
|
-
],
|
|
1068
|
-
'sensitive_patterns': [
|
|
1069
|
-
r'(?i)api[_-]?key',
|
|
1070
|
-
r'(?i)password',
|
|
1071
|
-
r'(?i)token',
|
|
1072
|
-
r'(?i)secret'
|
|
1073
|
-
]
|
|
1074
|
-
}
|
|
1075
|
-
|
|
1076
|
-
def _log_validation_summary(self, issues: List[IntegrityIssue], validation_time: float) -> None:
|
|
1077
|
-
"""Log validation summary."""
|
|
1078
|
-
if not issues:
|
|
1079
|
-
self.logger.info(f"Settings integrity validation completed in {validation_time:.2f}s - no issues found")
|
|
1080
|
-
return
|
|
1081
|
-
|
|
1082
|
-
severity_counts = {}
|
|
1083
|
-
for issue in issues:
|
|
1084
|
-
severity_counts[issue.severity] = severity_counts.get(issue.severity, 0) + 1
|
|
1085
|
-
|
|
1086
|
-
summary = f"Settings integrity validation completed in {validation_time:.2f}s - {len(issues)} issues found: "
|
|
1087
|
-
summary += ", ".join([f"{count} {severity}" for severity, count in severity_counts.items()])
|
|
1088
|
-
|
|
1089
|
-
if 'critical' in severity_counts:
|
|
1090
|
-
self.logger.error(summary)
|
|
1091
|
-
elif 'high' in severity_counts:
|
|
1092
|
-
self.logger.warning(summary)
|
|
1093
|
-
else:
|
|
1094
|
-
self.logger.info(summary)
|
|
1095
|
-
|
|
1096
|
-
def _generate_recommendations(self, issues: List[IntegrityIssue]) -> List[str]:
|
|
1097
|
-
"""Generate recommendations based on validation issues."""
|
|
1098
|
-
recommendations = []
|
|
1099
|
-
|
|
1100
|
-
critical_count = len([i for i in issues if i.severity == 'critical'])
|
|
1101
|
-
if critical_count > 0:
|
|
1102
|
-
recommendations.append(f"Address {critical_count} critical issues immediately")
|
|
1103
|
-
|
|
1104
|
-
security_count = len([i for i in issues if i.issue_type == IntegrityIssueType.SECURITY_ISSUE])
|
|
1105
|
-
if security_count > 0:
|
|
1106
|
-
recommendations.append(f"Review {security_count} security issues")
|
|
1107
|
-
|
|
1108
|
-
auto_fixable_count = len([i for i in issues if i.auto_fixable])
|
|
1109
|
-
if auto_fixable_count > 0:
|
|
1110
|
-
recommendations.append(f"Run automatic repair for {auto_fixable_count} fixable issues")
|
|
1111
|
-
|
|
1
|
+
"""
|
|
2
|
+
Settings Integrity Validator for Database Migration
|
|
3
|
+
|
|
4
|
+
This module provides comprehensive validation tools for settings integrity,
|
|
5
|
+
ensuring data consistency and detecting corruption or invalid configurations
|
|
6
|
+
across the entire settings system.
|
|
7
|
+
|
|
8
|
+
Features:
|
|
9
|
+
- Deep validation of settings structure and content
|
|
10
|
+
- Cross-reference validation between related settings
|
|
11
|
+
- Tool-specific configuration validation
|
|
12
|
+
- Performance settings validation
|
|
13
|
+
- Tab content integrity checks
|
|
14
|
+
- Encrypted data validation
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import json
|
|
18
|
+
import re
|
|
19
|
+
import logging
|
|
20
|
+
from typing import Dict, List, Tuple, Any, Optional, Union, Set
|
|
21
|
+
from datetime import datetime
|
|
22
|
+
from dataclasses import dataclass
|
|
23
|
+
from enum import Enum
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class IntegrityLevel(Enum):
|
|
28
|
+
"""Levels of integrity validation."""
|
|
29
|
+
BASIC = "basic"
|
|
30
|
+
STANDARD = "standard"
|
|
31
|
+
COMPREHENSIVE = "comprehensive"
|
|
32
|
+
STRICT = "strict"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class IntegrityIssueType(Enum):
|
|
36
|
+
"""Types of integrity issues."""
|
|
37
|
+
MISSING_REQUIRED = "missing_required"
|
|
38
|
+
INVALID_TYPE = "invalid_type"
|
|
39
|
+
INVALID_VALUE = "invalid_value"
|
|
40
|
+
INCONSISTENT_DATA = "inconsistent_data"
|
|
41
|
+
CORRUPTED_DATA = "corrupted_data"
|
|
42
|
+
SECURITY_ISSUE = "security_issue"
|
|
43
|
+
PERFORMANCE_ISSUE = "performance_issue"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@dataclass
|
|
47
|
+
class IntegrityIssue:
|
|
48
|
+
"""Information about an integrity issue."""
|
|
49
|
+
issue_type: IntegrityIssueType
|
|
50
|
+
severity: str # "low", "medium", "high", "critical"
|
|
51
|
+
message: str
|
|
52
|
+
location: str # Path to the problematic setting
|
|
53
|
+
expected_value: Optional[Any] = None
|
|
54
|
+
actual_value: Optional[Any] = None
|
|
55
|
+
suggestion: Optional[str] = None
|
|
56
|
+
auto_fixable: bool = False
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class SettingsIntegrityValidator:
|
|
60
|
+
"""
|
|
61
|
+
Comprehensive settings integrity validator.
|
|
62
|
+
|
|
63
|
+
Validates settings data structure, content, and consistency
|
|
64
|
+
to ensure system stability and security.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(self, validation_level: IntegrityLevel = IntegrityLevel.STANDARD):
|
|
68
|
+
"""
|
|
69
|
+
Initialize the integrity validator.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
validation_level: Level of validation to perform
|
|
73
|
+
"""
|
|
74
|
+
self.validation_level = validation_level
|
|
75
|
+
self.logger = logging.getLogger(__name__)
|
|
76
|
+
|
|
77
|
+
# Validation rules
|
|
78
|
+
self._core_settings_rules = self._initialize_core_settings_rules()
|
|
79
|
+
self._tool_settings_rules = self._initialize_tool_settings_rules()
|
|
80
|
+
self._performance_rules = self._initialize_performance_rules()
|
|
81
|
+
self._security_rules = self._initialize_security_rules()
|
|
82
|
+
|
|
83
|
+
# Known tool configurations
|
|
84
|
+
self._known_tools = {
|
|
85
|
+
'AI Tools', 'Base64 Encoder/Decoder', 'Case Tool', 'Cron Tool', 'Diff Viewer',
|
|
86
|
+
'Email Extraction Tool', 'Email Header Analyzer', 'Find & Replace Text',
|
|
87
|
+
'Folder File Reporter', 'Generator Tools', 'HTML Extraction Tool',
|
|
88
|
+
'JSON/XML Tool', 'Sorter Tools', 'Translator Tools', 'URL Parser',
|
|
89
|
+
'URL and Link Extractor', 'Word Frequency Counter',
|
|
90
|
+
# AI Provider configurations (stored as separate tool settings)
|
|
91
|
+
'Google AI', 'Anthropic AI', 'OpenAI', 'AWS Bedrock', 'Cohere AI',
|
|
92
|
+
'HuggingFace AI', 'Groq AI', 'OpenRouterAI', 'LM Studio'
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
# Validation statistics
|
|
96
|
+
self._validation_stats = {
|
|
97
|
+
'total_checks': 0,
|
|
98
|
+
'issues_found': 0,
|
|
99
|
+
'auto_fixes_applied': 0,
|
|
100
|
+
'validation_time': 0.0
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
def validate_settings_integrity(self, settings_data: Dict[str, Any],
|
|
104
|
+
apply_fixes: bool = False) -> List[IntegrityIssue]:
|
|
105
|
+
"""
|
|
106
|
+
Perform comprehensive integrity validation on settings data.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
settings_data: Settings data to validate
|
|
110
|
+
apply_fixes: Whether to apply automatic fixes
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
List of integrity issues found
|
|
114
|
+
"""
|
|
115
|
+
start_time = datetime.now()
|
|
116
|
+
issues = []
|
|
117
|
+
|
|
118
|
+
try:
|
|
119
|
+
self.logger.info(f"Starting settings integrity validation (level: {self.validation_level.value})")
|
|
120
|
+
|
|
121
|
+
# Core settings validation
|
|
122
|
+
core_issues = self._validate_core_settings(settings_data)
|
|
123
|
+
issues.extend(core_issues)
|
|
124
|
+
|
|
125
|
+
# Tool settings validation
|
|
126
|
+
tool_issues = self._validate_tool_settings(settings_data)
|
|
127
|
+
issues.extend(tool_issues)
|
|
128
|
+
|
|
129
|
+
# Tab content validation
|
|
130
|
+
tab_issues = self._validate_tab_content(settings_data)
|
|
131
|
+
issues.extend(tab_issues)
|
|
132
|
+
|
|
133
|
+
# Performance settings validation
|
|
134
|
+
perf_issues = self._validate_performance_settings(settings_data)
|
|
135
|
+
issues.extend(perf_issues)
|
|
136
|
+
|
|
137
|
+
# Font settings validation
|
|
138
|
+
font_issues = self._validate_font_settings(settings_data)
|
|
139
|
+
issues.extend(font_issues)
|
|
140
|
+
|
|
141
|
+
# Dialog settings validation
|
|
142
|
+
dialog_issues = self._validate_dialog_settings(settings_data)
|
|
143
|
+
issues.extend(dialog_issues)
|
|
144
|
+
|
|
145
|
+
# Cross-reference validation
|
|
146
|
+
if self.validation_level in [IntegrityLevel.COMPREHENSIVE, IntegrityLevel.STRICT]:
|
|
147
|
+
cross_ref_issues = self._validate_cross_references(settings_data)
|
|
148
|
+
issues.extend(cross_ref_issues)
|
|
149
|
+
|
|
150
|
+
# Security validation
|
|
151
|
+
security_issues = self._validate_security(settings_data)
|
|
152
|
+
issues.extend(security_issues)
|
|
153
|
+
|
|
154
|
+
# Apply automatic fixes if requested
|
|
155
|
+
if apply_fixes:
|
|
156
|
+
fixed_count = self._apply_automatic_fixes(settings_data, issues)
|
|
157
|
+
self._validation_stats['auto_fixes_applied'] = fixed_count
|
|
158
|
+
|
|
159
|
+
# Update statistics
|
|
160
|
+
validation_time = (datetime.now() - start_time).total_seconds()
|
|
161
|
+
self._validation_stats.update({
|
|
162
|
+
'total_checks': self._validation_stats['total_checks'] + 1,
|
|
163
|
+
'issues_found': len(issues),
|
|
164
|
+
'validation_time': validation_time
|
|
165
|
+
})
|
|
166
|
+
|
|
167
|
+
# Log summary
|
|
168
|
+
self._log_validation_summary(issues, validation_time)
|
|
169
|
+
|
|
170
|
+
return issues
|
|
171
|
+
|
|
172
|
+
except Exception as e:
|
|
173
|
+
self.logger.error(f"Settings integrity validation failed: {e}")
|
|
174
|
+
return [IntegrityIssue(
|
|
175
|
+
issue_type=IntegrityIssueType.CORRUPTED_DATA,
|
|
176
|
+
severity="critical",
|
|
177
|
+
message=f"Validation process failed: {e}",
|
|
178
|
+
location="validation_process"
|
|
179
|
+
)]
|
|
180
|
+
|
|
181
|
+
def validate_tool_configuration(self, tool_name: str,
|
|
182
|
+
tool_config: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
183
|
+
"""
|
|
184
|
+
Validate a specific tool's configuration.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
tool_name: Name of the tool
|
|
188
|
+
tool_config: Tool configuration data
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
List of integrity issues for the tool
|
|
192
|
+
"""
|
|
193
|
+
issues = []
|
|
194
|
+
|
|
195
|
+
try:
|
|
196
|
+
# Check if tool is known
|
|
197
|
+
if tool_name not in self._known_tools:
|
|
198
|
+
issues.append(IntegrityIssue(
|
|
199
|
+
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
200
|
+
severity="medium",
|
|
201
|
+
message=f"Unknown tool configuration: {tool_name}",
|
|
202
|
+
location=f"tool_settings.{tool_name}",
|
|
203
|
+
suggestion="Verify tool name is correct"
|
|
204
|
+
))
|
|
205
|
+
|
|
206
|
+
# Tool-specific validation
|
|
207
|
+
if tool_name in self._tool_settings_rules:
|
|
208
|
+
rules = self._tool_settings_rules[tool_name]
|
|
209
|
+
tool_issues = self._validate_against_rules(
|
|
210
|
+
tool_config, rules, f"tool_settings.{tool_name}"
|
|
211
|
+
)
|
|
212
|
+
issues.extend(tool_issues)
|
|
213
|
+
|
|
214
|
+
# Special validations for specific tools
|
|
215
|
+
if tool_name == 'cURL Tool':
|
|
216
|
+
curl_issues = self._validate_curl_tool_config(tool_config)
|
|
217
|
+
issues.extend(curl_issues)
|
|
218
|
+
elif tool_name == 'AI Tools':
|
|
219
|
+
ai_issues = self._validate_ai_tools_config(tool_config)
|
|
220
|
+
issues.extend(ai_issues)
|
|
221
|
+
elif tool_name == 'Find & Replace':
|
|
222
|
+
fr_issues = self._validate_find_replace_config(tool_config)
|
|
223
|
+
issues.extend(fr_issues)
|
|
224
|
+
|
|
225
|
+
return issues
|
|
226
|
+
|
|
227
|
+
except Exception as e:
|
|
228
|
+
return [IntegrityIssue(
|
|
229
|
+
issue_type=IntegrityIssueType.CORRUPTED_DATA,
|
|
230
|
+
severity="high",
|
|
231
|
+
message=f"Tool configuration validation failed for {tool_name}: {e}",
|
|
232
|
+
location=f"tool_settings.{tool_name}"
|
|
233
|
+
)]
|
|
234
|
+
|
|
235
|
+
def validate_encrypted_data(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
236
|
+
"""
|
|
237
|
+
Validate encrypted data integrity.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
settings_data: Settings data to check for encrypted values
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
List of integrity issues related to encrypted data
|
|
244
|
+
"""
|
|
245
|
+
issues = []
|
|
246
|
+
|
|
247
|
+
try:
|
|
248
|
+
# Find all encrypted values (those starting with "ENC:")
|
|
249
|
+
encrypted_values = self._find_encrypted_values(settings_data)
|
|
250
|
+
|
|
251
|
+
for location, value in encrypted_values:
|
|
252
|
+
# Validate encryption format
|
|
253
|
+
if not value.startswith("ENC:"):
|
|
254
|
+
issues.append(IntegrityIssue(
|
|
255
|
+
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
256
|
+
severity="high",
|
|
257
|
+
message=f"Invalid encryption format at {location}",
|
|
258
|
+
location=location,
|
|
259
|
+
actual_value=value[:20] + "..." if len(value) > 20 else value,
|
|
260
|
+
suggestion="Ensure encrypted values start with 'ENC:'"
|
|
261
|
+
))
|
|
262
|
+
continue
|
|
263
|
+
|
|
264
|
+
# Extract encrypted data
|
|
265
|
+
encrypted_data = value[4:] # Remove "ENC:" prefix
|
|
266
|
+
|
|
267
|
+
# Validate base64 format
|
|
268
|
+
if not self._is_valid_base64(encrypted_data):
|
|
269
|
+
issues.append(IntegrityIssue(
|
|
270
|
+
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
271
|
+
severity="high",
|
|
272
|
+
message=f"Invalid base64 encoding in encrypted data at {location}",
|
|
273
|
+
location=location,
|
|
274
|
+
suggestion="Re-encrypt the value to fix encoding"
|
|
275
|
+
))
|
|
276
|
+
|
|
277
|
+
# Check for minimum encryption length
|
|
278
|
+
if len(encrypted_data) < 16:
|
|
279
|
+
issues.append(IntegrityIssue(
|
|
280
|
+
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
281
|
+
severity="medium",
|
|
282
|
+
message=f"Encrypted data appears too short at {location}",
|
|
283
|
+
location=location,
|
|
284
|
+
suggestion="Verify encryption was applied correctly"
|
|
285
|
+
))
|
|
286
|
+
|
|
287
|
+
return issues
|
|
288
|
+
|
|
289
|
+
except Exception as e:
|
|
290
|
+
return [IntegrityIssue(
|
|
291
|
+
issue_type=IntegrityIssueType.CORRUPTED_DATA,
|
|
292
|
+
severity="high",
|
|
293
|
+
message=f"Encrypted data validation failed: {e}",
|
|
294
|
+
location="encrypted_data_validation"
|
|
295
|
+
)]
|
|
296
|
+
|
|
297
|
+
def get_validation_report(self, issues: List[IntegrityIssue]) -> Dict[str, Any]:
|
|
298
|
+
"""
|
|
299
|
+
Generate a comprehensive validation report.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
issues: List of integrity issues
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
Validation report dictionary
|
|
306
|
+
"""
|
|
307
|
+
report = {
|
|
308
|
+
'timestamp': datetime.now().isoformat(),
|
|
309
|
+
'validation_level': self.validation_level.value,
|
|
310
|
+
'total_issues': len(issues),
|
|
311
|
+
'issues_by_type': {},
|
|
312
|
+
'issues_by_severity': {},
|
|
313
|
+
'auto_fixable_issues': 0,
|
|
314
|
+
'critical_issues': [],
|
|
315
|
+
'recommendations': [],
|
|
316
|
+
'validation_statistics': self._validation_stats.copy()
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
# Count by type
|
|
320
|
+
for issue_type in IntegrityIssueType:
|
|
321
|
+
count = len([i for i in issues if i.issue_type == issue_type])
|
|
322
|
+
report['issues_by_type'][issue_type.value] = count
|
|
323
|
+
|
|
324
|
+
# Count by severity
|
|
325
|
+
severities = ['low', 'medium', 'high', 'critical']
|
|
326
|
+
for severity in severities:
|
|
327
|
+
count = len([i for i in issues if i.severity == severity])
|
|
328
|
+
report['issues_by_severity'][severity] = count
|
|
329
|
+
|
|
330
|
+
# Count auto-fixable issues
|
|
331
|
+
report['auto_fixable_issues'] = len([i for i in issues if i.auto_fixable])
|
|
332
|
+
|
|
333
|
+
# Critical issues details
|
|
334
|
+
critical_issues = [i for i in issues if i.severity == 'critical']
|
|
335
|
+
report['critical_issues'] = [
|
|
336
|
+
{
|
|
337
|
+
'type': issue.issue_type.value,
|
|
338
|
+
'message': issue.message,
|
|
339
|
+
'location': issue.location,
|
|
340
|
+
'auto_fixable': issue.auto_fixable
|
|
341
|
+
}
|
|
342
|
+
for issue in critical_issues
|
|
343
|
+
]
|
|
344
|
+
|
|
345
|
+
# All issues details for UI display
|
|
346
|
+
report['all_issues'] = [
|
|
347
|
+
{
|
|
348
|
+
'type': issue.issue_type.value,
|
|
349
|
+
'severity': issue.severity,
|
|
350
|
+
'message': issue.message,
|
|
351
|
+
'location': issue.location,
|
|
352
|
+
'auto_fixable': issue.auto_fixable,
|
|
353
|
+
'suggestion': issue.suggestion,
|
|
354
|
+
'expected_value': issue.expected_value,
|
|
355
|
+
'actual_value': issue.actual_value
|
|
356
|
+
}
|
|
357
|
+
for issue in issues
|
|
358
|
+
]
|
|
359
|
+
|
|
360
|
+
# Generate recommendations
|
|
361
|
+
report['recommendations'] = self._generate_recommendations(issues)
|
|
362
|
+
|
|
363
|
+
return report
|
|
364
|
+
|
|
365
|
+
# Private validation methods
|
|
366
|
+
|
|
367
|
+
def _validate_core_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
368
|
+
"""Validate core application settings."""
|
|
369
|
+
issues = []
|
|
370
|
+
|
|
371
|
+
# Check required core settings
|
|
372
|
+
required_settings = [
|
|
373
|
+
'export_path', 'debug_level', 'selected_tool',
|
|
374
|
+
'active_input_tab', 'active_output_tab'
|
|
375
|
+
]
|
|
376
|
+
|
|
377
|
+
for setting in required_settings:
|
|
378
|
+
if setting not in settings_data:
|
|
379
|
+
issues.append(IntegrityIssue(
|
|
380
|
+
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
381
|
+
severity="critical",
|
|
382
|
+
message=f"Required core setting '{setting}' is missing",
|
|
383
|
+
location=setting,
|
|
384
|
+
suggestion=f"Add default value for {setting}",
|
|
385
|
+
auto_fixable=True
|
|
386
|
+
))
|
|
387
|
+
|
|
388
|
+
# Validate core settings against rules
|
|
389
|
+
for setting, value in settings_data.items():
|
|
390
|
+
if setting in self._core_settings_rules:
|
|
391
|
+
rules = self._core_settings_rules[setting]
|
|
392
|
+
setting_issues = self._validate_against_rules(
|
|
393
|
+
{setting: value}, {setting: rules}, setting
|
|
394
|
+
)
|
|
395
|
+
issues.extend(setting_issues)
|
|
396
|
+
|
|
397
|
+
return issues
|
|
398
|
+
|
|
399
|
+
def _validate_tool_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
400
|
+
"""Validate tool settings structure and content."""
|
|
401
|
+
issues = []
|
|
402
|
+
|
|
403
|
+
tool_settings = settings_data.get('tool_settings', {})
|
|
404
|
+
|
|
405
|
+
if not isinstance(tool_settings, dict):
|
|
406
|
+
issues.append(IntegrityIssue(
|
|
407
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
408
|
+
severity="critical",
|
|
409
|
+
message="tool_settings must be a dictionary",
|
|
410
|
+
location="tool_settings",
|
|
411
|
+
expected_value="dict",
|
|
412
|
+
actual_value=type(tool_settings).__name__,
|
|
413
|
+
auto_fixable=True
|
|
414
|
+
))
|
|
415
|
+
return issues
|
|
416
|
+
|
|
417
|
+
# Validate each tool's configuration
|
|
418
|
+
for tool_name, tool_config in tool_settings.items():
|
|
419
|
+
tool_issues = self.validate_tool_configuration(tool_name, tool_config)
|
|
420
|
+
issues.extend(tool_issues)
|
|
421
|
+
|
|
422
|
+
return issues
|
|
423
|
+
|
|
424
|
+
def _validate_tab_content(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
425
|
+
"""Validate tab content arrays."""
|
|
426
|
+
issues = []
|
|
427
|
+
|
|
428
|
+
for tab_type in ['input_tabs', 'output_tabs']:
|
|
429
|
+
if tab_type not in settings_data:
|
|
430
|
+
issues.append(IntegrityIssue(
|
|
431
|
+
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
432
|
+
severity="high",
|
|
433
|
+
message=f"Required tab array '{tab_type}' is missing",
|
|
434
|
+
location=tab_type,
|
|
435
|
+
suggestion=f"Add empty {tab_type} array with 7 elements",
|
|
436
|
+
auto_fixable=True
|
|
437
|
+
))
|
|
438
|
+
continue
|
|
439
|
+
|
|
440
|
+
tabs = settings_data[tab_type]
|
|
441
|
+
|
|
442
|
+
if not isinstance(tabs, list):
|
|
443
|
+
issues.append(IntegrityIssue(
|
|
444
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
445
|
+
severity="high",
|
|
446
|
+
message=f"{tab_type} must be a list",
|
|
447
|
+
location=tab_type,
|
|
448
|
+
expected_value="list",
|
|
449
|
+
actual_value=type(tabs).__name__,
|
|
450
|
+
auto_fixable=True
|
|
451
|
+
))
|
|
452
|
+
continue
|
|
453
|
+
|
|
454
|
+
if len(tabs) != 7:
|
|
455
|
+
issues.append(IntegrityIssue(
|
|
456
|
+
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
457
|
+
severity="high",
|
|
458
|
+
message=f"{tab_type} must have exactly 7 elements",
|
|
459
|
+
location=tab_type,
|
|
460
|
+
expected_value=7,
|
|
461
|
+
actual_value=len(tabs),
|
|
462
|
+
suggestion="Resize array to 7 elements",
|
|
463
|
+
auto_fixable=True
|
|
464
|
+
))
|
|
465
|
+
|
|
466
|
+
# Validate each tab content
|
|
467
|
+
for i, content in enumerate(tabs):
|
|
468
|
+
if not isinstance(content, str):
|
|
469
|
+
issues.append(IntegrityIssue(
|
|
470
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
471
|
+
severity="medium",
|
|
472
|
+
message=f"{tab_type}[{i}] must be a string",
|
|
473
|
+
location=f"{tab_type}[{i}]",
|
|
474
|
+
expected_value="string",
|
|
475
|
+
actual_value=type(content).__name__,
|
|
476
|
+
auto_fixable=True
|
|
477
|
+
))
|
|
478
|
+
|
|
479
|
+
return issues
|
|
480
|
+
|
|
481
|
+
def _validate_performance_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
482
|
+
"""Validate performance settings."""
|
|
483
|
+
issues = []
|
|
484
|
+
|
|
485
|
+
perf_settings = settings_data.get('performance_settings', {})
|
|
486
|
+
|
|
487
|
+
if not isinstance(perf_settings, dict):
|
|
488
|
+
issues.append(IntegrityIssue(
|
|
489
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
490
|
+
severity="medium",
|
|
491
|
+
message="performance_settings must be a dictionary",
|
|
492
|
+
location="performance_settings",
|
|
493
|
+
expected_value="dict",
|
|
494
|
+
actual_value=type(perf_settings).__name__,
|
|
495
|
+
auto_fixable=True
|
|
496
|
+
))
|
|
497
|
+
return issues
|
|
498
|
+
|
|
499
|
+
# Validate against performance rules
|
|
500
|
+
for category, rules in self._performance_rules.items():
|
|
501
|
+
if category in perf_settings:
|
|
502
|
+
category_issues = self._validate_against_rules(
|
|
503
|
+
perf_settings[category], rules, f"performance_settings.{category}"
|
|
504
|
+
)
|
|
505
|
+
issues.extend(category_issues)
|
|
506
|
+
|
|
507
|
+
return issues
|
|
508
|
+
|
|
509
|
+
def _validate_font_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
510
|
+
"""Validate font settings."""
|
|
511
|
+
issues = []
|
|
512
|
+
|
|
513
|
+
font_settings = settings_data.get('font_settings', {})
|
|
514
|
+
|
|
515
|
+
if not isinstance(font_settings, dict):
|
|
516
|
+
issues.append(IntegrityIssue(
|
|
517
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
518
|
+
severity="low",
|
|
519
|
+
message="font_settings must be a dictionary",
|
|
520
|
+
location="font_settings",
|
|
521
|
+
expected_value="dict",
|
|
522
|
+
actual_value=type(font_settings).__name__,
|
|
523
|
+
auto_fixable=True
|
|
524
|
+
))
|
|
525
|
+
return issues
|
|
526
|
+
|
|
527
|
+
# Validate font configurations
|
|
528
|
+
for font_type, font_config in font_settings.items():
|
|
529
|
+
if not isinstance(font_config, dict):
|
|
530
|
+
issues.append(IntegrityIssue(
|
|
531
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
532
|
+
severity="low",
|
|
533
|
+
message=f"font_settings.{font_type} must be a dictionary",
|
|
534
|
+
location=f"font_settings.{font_type}",
|
|
535
|
+
expected_value="dict",
|
|
536
|
+
actual_value=type(font_config).__name__,
|
|
537
|
+
auto_fixable=True
|
|
538
|
+
))
|
|
539
|
+
continue
|
|
540
|
+
|
|
541
|
+
# Validate font properties
|
|
542
|
+
if 'family' in font_config:
|
|
543
|
+
if not isinstance(font_config['family'], str) or not font_config['family'].strip():
|
|
544
|
+
issues.append(IntegrityIssue(
|
|
545
|
+
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
546
|
+
severity="medium",
|
|
547
|
+
message=f"Font family for {font_type} must be a non-empty string",
|
|
548
|
+
location=f"font_settings.{font_type}.family",
|
|
549
|
+
actual_value=font_config['family'],
|
|
550
|
+
auto_fixable=True
|
|
551
|
+
))
|
|
552
|
+
|
|
553
|
+
if 'size' in font_config:
|
|
554
|
+
size = font_config['size']
|
|
555
|
+
if not isinstance(size, int) or size < 6 or size > 72:
|
|
556
|
+
issues.append(IntegrityIssue(
|
|
557
|
+
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
558
|
+
severity="medium",
|
|
559
|
+
message=f"Font size for {font_type} must be between 6 and 72",
|
|
560
|
+
location=f"font_settings.{font_type}.size",
|
|
561
|
+
expected_value="6-72",
|
|
562
|
+
actual_value=size,
|
|
563
|
+
auto_fixable=True
|
|
564
|
+
))
|
|
565
|
+
|
|
566
|
+
return issues
|
|
567
|
+
|
|
568
|
+
def _validate_dialog_settings(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
569
|
+
"""Validate dialog settings."""
|
|
570
|
+
issues = []
|
|
571
|
+
|
|
572
|
+
dialog_settings = settings_data.get('dialog_settings', {})
|
|
573
|
+
|
|
574
|
+
if not isinstance(dialog_settings, dict):
|
|
575
|
+
issues.append(IntegrityIssue(
|
|
576
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
577
|
+
severity="low",
|
|
578
|
+
message="dialog_settings must be a dictionary",
|
|
579
|
+
location="dialog_settings",
|
|
580
|
+
expected_value="dict",
|
|
581
|
+
actual_value=type(dialog_settings).__name__,
|
|
582
|
+
auto_fixable=True
|
|
583
|
+
))
|
|
584
|
+
return issues
|
|
585
|
+
|
|
586
|
+
# Validate dialog categories
|
|
587
|
+
valid_categories = ['success', 'confirmation', 'warning', 'error']
|
|
588
|
+
|
|
589
|
+
for category, config in dialog_settings.items():
|
|
590
|
+
if category not in valid_categories:
|
|
591
|
+
issues.append(IntegrityIssue(
|
|
592
|
+
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
593
|
+
severity="low",
|
|
594
|
+
message=f"Unknown dialog category: {category}",
|
|
595
|
+
location=f"dialog_settings.{category}",
|
|
596
|
+
suggestion=f"Valid categories: {', '.join(valid_categories)}"
|
|
597
|
+
))
|
|
598
|
+
continue
|
|
599
|
+
|
|
600
|
+
if not isinstance(config, dict):
|
|
601
|
+
issues.append(IntegrityIssue(
|
|
602
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
603
|
+
severity="low",
|
|
604
|
+
message=f"dialog_settings.{category} must be a dictionary",
|
|
605
|
+
location=f"dialog_settings.{category}",
|
|
606
|
+
expected_value="dict",
|
|
607
|
+
actual_value=type(config).__name__,
|
|
608
|
+
auto_fixable=True
|
|
609
|
+
))
|
|
610
|
+
continue
|
|
611
|
+
|
|
612
|
+
# Validate enabled property
|
|
613
|
+
if 'enabled' in config:
|
|
614
|
+
if not isinstance(config['enabled'], bool):
|
|
615
|
+
issues.append(IntegrityIssue(
|
|
616
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
617
|
+
severity="medium",
|
|
618
|
+
message=f"dialog_settings.{category}.enabled must be boolean",
|
|
619
|
+
location=f"dialog_settings.{category}.enabled",
|
|
620
|
+
expected_value="bool",
|
|
621
|
+
actual_value=type(config['enabled']).__name__,
|
|
622
|
+
auto_fixable=True
|
|
623
|
+
))
|
|
624
|
+
|
|
625
|
+
# Check if error dialogs are locked (cannot be disabled)
|
|
626
|
+
if category == 'error' and config.get('enabled') is False:
|
|
627
|
+
issues.append(IntegrityIssue(
|
|
628
|
+
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
629
|
+
severity="high",
|
|
630
|
+
message="Error dialogs cannot be disabled for security reasons",
|
|
631
|
+
location="dialog_settings.error.enabled",
|
|
632
|
+
expected_value=True,
|
|
633
|
+
actual_value=False,
|
|
634
|
+
suggestion="Error dialogs must remain enabled",
|
|
635
|
+
auto_fixable=True
|
|
636
|
+
))
|
|
637
|
+
|
|
638
|
+
return issues
|
|
639
|
+
|
|
640
|
+
def _validate_cross_references(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
641
|
+
"""Validate cross-references between settings."""
|
|
642
|
+
issues = []
|
|
643
|
+
|
|
644
|
+
# Validate active tab indices
|
|
645
|
+
active_input_tab = settings_data.get('active_input_tab', 0)
|
|
646
|
+
active_output_tab = settings_data.get('active_output_tab', 0)
|
|
647
|
+
|
|
648
|
+
input_tabs = settings_data.get('input_tabs', [])
|
|
649
|
+
output_tabs = settings_data.get('output_tabs', [])
|
|
650
|
+
|
|
651
|
+
if isinstance(input_tabs, list) and (active_input_tab < 0 or active_input_tab >= len(input_tabs)):
|
|
652
|
+
issues.append(IntegrityIssue(
|
|
653
|
+
issue_type=IntegrityIssueType.INCONSISTENT_DATA,
|
|
654
|
+
severity="medium",
|
|
655
|
+
message=f"active_input_tab ({active_input_tab}) is out of range for input_tabs length ({len(input_tabs)})",
|
|
656
|
+
location="active_input_tab",
|
|
657
|
+
suggestion="Set active_input_tab to a valid index",
|
|
658
|
+
auto_fixable=True
|
|
659
|
+
))
|
|
660
|
+
|
|
661
|
+
if isinstance(output_tabs, list) and (active_output_tab < 0 or active_output_tab >= len(output_tabs)):
|
|
662
|
+
issues.append(IntegrityIssue(
|
|
663
|
+
issue_type=IntegrityIssueType.INCONSISTENT_DATA,
|
|
664
|
+
severity="medium",
|
|
665
|
+
message=f"active_output_tab ({active_output_tab}) is out of range for output_tabs length ({len(output_tabs)})",
|
|
666
|
+
location="active_output_tab",
|
|
667
|
+
suggestion="Set active_output_tab to a valid index",
|
|
668
|
+
auto_fixable=True
|
|
669
|
+
))
|
|
670
|
+
|
|
671
|
+
# Validate selected tool exists in tool_settings
|
|
672
|
+
selected_tool = settings_data.get('selected_tool')
|
|
673
|
+
tool_settings = settings_data.get('tool_settings', {})
|
|
674
|
+
|
|
675
|
+
if selected_tool and selected_tool not in tool_settings:
|
|
676
|
+
issues.append(IntegrityIssue(
|
|
677
|
+
issue_type=IntegrityIssueType.INCONSISTENT_DATA,
|
|
678
|
+
severity="medium",
|
|
679
|
+
message=f"Selected tool '{selected_tool}' not found in tool_settings",
|
|
680
|
+
location="selected_tool",
|
|
681
|
+
suggestion="Add configuration for selected tool or change selection",
|
|
682
|
+
auto_fixable=False
|
|
683
|
+
))
|
|
684
|
+
|
|
685
|
+
return issues
|
|
686
|
+
|
|
687
|
+
def _validate_security(self, settings_data: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
688
|
+
"""Validate security-related settings."""
|
|
689
|
+
issues = []
|
|
690
|
+
|
|
691
|
+
# Validate encrypted data
|
|
692
|
+
encrypted_issues = self.validate_encrypted_data(settings_data)
|
|
693
|
+
issues.extend(encrypted_issues)
|
|
694
|
+
|
|
695
|
+
# Check for potential security issues in tool settings
|
|
696
|
+
tool_settings = settings_data.get('tool_settings', {})
|
|
697
|
+
|
|
698
|
+
for tool_name, tool_config in tool_settings.items():
|
|
699
|
+
if isinstance(tool_config, dict):
|
|
700
|
+
# Check for API keys that should be encrypted
|
|
701
|
+
for key, value in tool_config.items():
|
|
702
|
+
if 'api_key' in key.lower() or 'password' in key.lower() or 'token' in key.lower():
|
|
703
|
+
if isinstance(value, str) and not value.startswith('ENC:') and value.strip():
|
|
704
|
+
issues.append(IntegrityIssue(
|
|
705
|
+
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
706
|
+
severity="high",
|
|
707
|
+
message=f"Sensitive data '{key}' in {tool_name} should be encrypted",
|
|
708
|
+
location=f"tool_settings.{tool_name}.{key}",
|
|
709
|
+
suggestion="Encrypt sensitive values with 'ENC:' prefix"
|
|
710
|
+
))
|
|
711
|
+
|
|
712
|
+
return issues
|
|
713
|
+
|
|
714
|
+
def _validate_against_rules(self, data: Dict[str, Any], rules: Dict[str, Any],
|
|
715
|
+
location_prefix: str) -> List[IntegrityIssue]:
|
|
716
|
+
"""Validate data against a set of rules."""
|
|
717
|
+
issues = []
|
|
718
|
+
|
|
719
|
+
for key, rule in rules.items():
|
|
720
|
+
if key not in data:
|
|
721
|
+
if rule.get('required', False):
|
|
722
|
+
issues.append(IntegrityIssue(
|
|
723
|
+
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
724
|
+
severity=rule.get('severity', 'medium'),
|
|
725
|
+
message=f"Required setting '{key}' is missing",
|
|
726
|
+
location=f"{location_prefix}.{key}",
|
|
727
|
+
suggestion=f"Add {key} with appropriate value",
|
|
728
|
+
auto_fixable=rule.get('auto_fixable', False)
|
|
729
|
+
))
|
|
730
|
+
continue
|
|
731
|
+
|
|
732
|
+
value = data[key]
|
|
733
|
+
|
|
734
|
+
# Type validation
|
|
735
|
+
if 'type' in rule:
|
|
736
|
+
expected_type = rule['type']
|
|
737
|
+
if not isinstance(value, expected_type):
|
|
738
|
+
issues.append(IntegrityIssue(
|
|
739
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
740
|
+
severity=rule.get('severity', 'medium'),
|
|
741
|
+
message=f"Setting '{key}' has wrong type",
|
|
742
|
+
location=f"{location_prefix}.{key}",
|
|
743
|
+
expected_value=expected_type.__name__,
|
|
744
|
+
actual_value=type(value).__name__,
|
|
745
|
+
auto_fixable=rule.get('auto_fixable', False)
|
|
746
|
+
))
|
|
747
|
+
|
|
748
|
+
# Range validation for numeric types
|
|
749
|
+
if isinstance(value, (int, float)):
|
|
750
|
+
if 'min' in rule and value < rule['min']:
|
|
751
|
+
issues.append(IntegrityIssue(
|
|
752
|
+
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
753
|
+
severity=rule.get('severity', 'medium'),
|
|
754
|
+
message=f"Setting '{key}' value {value} is below minimum {rule['min']}",
|
|
755
|
+
location=f"{location_prefix}.{key}",
|
|
756
|
+
expected_value=f">= {rule['min']}",
|
|
757
|
+
actual_value=value,
|
|
758
|
+
auto_fixable=rule.get('auto_fixable', False)
|
|
759
|
+
))
|
|
760
|
+
|
|
761
|
+
if 'max' in rule and value > rule['max']:
|
|
762
|
+
issues.append(IntegrityIssue(
|
|
763
|
+
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
764
|
+
severity=rule.get('severity', 'medium'),
|
|
765
|
+
message=f"Setting '{key}' value {value} is above maximum {rule['max']}",
|
|
766
|
+
location=f"{location_prefix}.{key}",
|
|
767
|
+
expected_value=f"<= {rule['max']}",
|
|
768
|
+
actual_value=value,
|
|
769
|
+
auto_fixable=rule.get('auto_fixable', False)
|
|
770
|
+
))
|
|
771
|
+
|
|
772
|
+
# Pattern validation for strings
|
|
773
|
+
if isinstance(value, str) and 'pattern' in rule:
|
|
774
|
+
if not re.match(rule['pattern'], value):
|
|
775
|
+
issues.append(IntegrityIssue(
|
|
776
|
+
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
777
|
+
severity=rule.get('severity', 'medium'),
|
|
778
|
+
message=f"Setting '{key}' value doesn't match expected pattern",
|
|
779
|
+
location=f"{location_prefix}.{key}",
|
|
780
|
+
actual_value=value,
|
|
781
|
+
suggestion=rule.get('pattern_description', 'Check value format'),
|
|
782
|
+
auto_fixable=rule.get('auto_fixable', False)
|
|
783
|
+
))
|
|
784
|
+
|
|
785
|
+
# Enum validation
|
|
786
|
+
if 'enum' in rule and value not in rule['enum']:
|
|
787
|
+
issues.append(IntegrityIssue(
|
|
788
|
+
issue_type=IntegrityIssueType.INVALID_VALUE,
|
|
789
|
+
severity=rule.get('severity', 'medium'),
|
|
790
|
+
message=f"Setting '{key}' has invalid value",
|
|
791
|
+
location=f"{location_prefix}.{key}",
|
|
792
|
+
expected_value=f"One of: {rule['enum']}",
|
|
793
|
+
actual_value=value,
|
|
794
|
+
auto_fixable=rule.get('auto_fixable', False)
|
|
795
|
+
))
|
|
796
|
+
|
|
797
|
+
return issues
|
|
798
|
+
|
|
799
|
+
# Tool-specific validation methods
|
|
800
|
+
|
|
801
|
+
def _validate_curl_tool_config(self, config: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
802
|
+
"""Validate cURL tool specific configuration."""
|
|
803
|
+
issues = []
|
|
804
|
+
|
|
805
|
+
# Validate history array if present
|
|
806
|
+
if 'history' in config:
|
|
807
|
+
history = config['history']
|
|
808
|
+
if isinstance(history, list):
|
|
809
|
+
for i, entry in enumerate(history):
|
|
810
|
+
if not isinstance(entry, dict):
|
|
811
|
+
issues.append(IntegrityIssue(
|
|
812
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
813
|
+
severity="low",
|
|
814
|
+
message=f"cURL history entry {i} must be a dictionary",
|
|
815
|
+
location=f"tool_settings.cURL Tool.history[{i}]",
|
|
816
|
+
auto_fixable=True
|
|
817
|
+
))
|
|
818
|
+
continue
|
|
819
|
+
|
|
820
|
+
# Validate required history fields
|
|
821
|
+
required_fields = ['timestamp', 'method', 'url', 'status_code']
|
|
822
|
+
for field in required_fields:
|
|
823
|
+
if field not in entry:
|
|
824
|
+
issues.append(IntegrityIssue(
|
|
825
|
+
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
826
|
+
severity="low",
|
|
827
|
+
message=f"cURL history entry {i} missing required field '{field}'",
|
|
828
|
+
location=f"tool_settings.cURL Tool.history[{i}].{field}",
|
|
829
|
+
auto_fixable=True
|
|
830
|
+
))
|
|
831
|
+
|
|
832
|
+
return issues
|
|
833
|
+
|
|
834
|
+
def _validate_ai_tools_config(self, config: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
835
|
+
"""Validate AI tools configuration."""
|
|
836
|
+
issues = []
|
|
837
|
+
|
|
838
|
+
# Check for API key encryption
|
|
839
|
+
if 'API_KEY' in config:
|
|
840
|
+
api_key = config['API_KEY']
|
|
841
|
+
if isinstance(api_key, str) and api_key.strip() and not api_key.startswith('ENC:'):
|
|
842
|
+
issues.append(IntegrityIssue(
|
|
843
|
+
issue_type=IntegrityIssueType.SECURITY_ISSUE,
|
|
844
|
+
severity="high",
|
|
845
|
+
message="AI Tools API key should be encrypted",
|
|
846
|
+
location="tool_settings.AI Tools.API_KEY",
|
|
847
|
+
suggestion="Encrypt API key with 'ENC:' prefix"
|
|
848
|
+
))
|
|
849
|
+
|
|
850
|
+
# Validate model list
|
|
851
|
+
if 'MODELS_LIST' in config:
|
|
852
|
+
models = config['MODELS_LIST']
|
|
853
|
+
if not isinstance(models, list):
|
|
854
|
+
issues.append(IntegrityIssue(
|
|
855
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
856
|
+
severity="medium",
|
|
857
|
+
message="AI Tools MODELS_LIST must be an array",
|
|
858
|
+
location="tool_settings.AI Tools.MODELS_LIST",
|
|
859
|
+
expected_value="array",
|
|
860
|
+
actual_value=type(models).__name__,
|
|
861
|
+
auto_fixable=True
|
|
862
|
+
))
|
|
863
|
+
|
|
864
|
+
return issues
|
|
865
|
+
|
|
866
|
+
def _validate_find_replace_config(self, config: Dict[str, Any]) -> List[IntegrityIssue]:
|
|
867
|
+
"""Validate Find & Replace tool configuration."""
|
|
868
|
+
issues = []
|
|
869
|
+
|
|
870
|
+
# Validate pattern library if present
|
|
871
|
+
if 'pattern_library' in config:
|
|
872
|
+
patterns = config['pattern_library']
|
|
873
|
+
if isinstance(patterns, list):
|
|
874
|
+
for i, pattern in enumerate(patterns):
|
|
875
|
+
if not isinstance(pattern, dict):
|
|
876
|
+
issues.append(IntegrityIssue(
|
|
877
|
+
issue_type=IntegrityIssueType.INVALID_TYPE,
|
|
878
|
+
severity="low",
|
|
879
|
+
message=f"Pattern library entry {i} must be a dictionary",
|
|
880
|
+
location=f"tool_settings.Find & Replace.pattern_library[{i}]",
|
|
881
|
+
auto_fixable=True
|
|
882
|
+
))
|
|
883
|
+
continue
|
|
884
|
+
|
|
885
|
+
# Validate pattern structure
|
|
886
|
+
if 'pattern' not in pattern:
|
|
887
|
+
issues.append(IntegrityIssue(
|
|
888
|
+
issue_type=IntegrityIssueType.MISSING_REQUIRED,
|
|
889
|
+
severity="medium",
|
|
890
|
+
message=f"Pattern library entry {i} missing 'pattern' field",
|
|
891
|
+
location=f"tool_settings.Find & Replace.pattern_library[{i}].pattern",
|
|
892
|
+
auto_fixable=True
|
|
893
|
+
))
|
|
894
|
+
|
|
895
|
+
return issues
|
|
896
|
+
|
|
897
|
+
# Helper methods
|
|
898
|
+
|
|
899
|
+
def _find_encrypted_values(self, data: Any, path: str = "") -> List[Tuple[str, str]]:
|
|
900
|
+
"""Find all encrypted values in the data structure."""
|
|
901
|
+
encrypted_values = []
|
|
902
|
+
|
|
903
|
+
if isinstance(data, dict):
|
|
904
|
+
for key, value in data.items():
|
|
905
|
+
current_path = f"{path}.{key}" if path else key
|
|
906
|
+
if isinstance(value, str) and value.startswith("ENC:"):
|
|
907
|
+
encrypted_values.append((current_path, value))
|
|
908
|
+
elif isinstance(value, (dict, list)):
|
|
909
|
+
encrypted_values.extend(self._find_encrypted_values(value, current_path))
|
|
910
|
+
elif isinstance(data, list):
|
|
911
|
+
for i, item in enumerate(data):
|
|
912
|
+
current_path = f"{path}[{i}]"
|
|
913
|
+
if isinstance(item, str) and item.startswith("ENC:"):
|
|
914
|
+
encrypted_values.append((current_path, item))
|
|
915
|
+
elif isinstance(item, (dict, list)):
|
|
916
|
+
encrypted_values.extend(self._find_encrypted_values(item, current_path))
|
|
917
|
+
|
|
918
|
+
return encrypted_values
|
|
919
|
+
|
|
920
|
+
def _is_valid_base64(self, data: str) -> bool:
|
|
921
|
+
"""Check if string is valid base64."""
|
|
922
|
+
import base64
|
|
923
|
+
try:
|
|
924
|
+
base64.b64decode(data, validate=True)
|
|
925
|
+
return True
|
|
926
|
+
except Exception:
|
|
927
|
+
return False
|
|
928
|
+
|
|
929
|
+
def _apply_automatic_fixes(self, settings_data: Dict[str, Any],
|
|
930
|
+
issues: List[IntegrityIssue]) -> int:
|
|
931
|
+
"""Apply automatic fixes to settings data."""
|
|
932
|
+
fixed_count = 0
|
|
933
|
+
|
|
934
|
+
for issue in issues:
|
|
935
|
+
if not issue.auto_fixable:
|
|
936
|
+
continue
|
|
937
|
+
|
|
938
|
+
try:
|
|
939
|
+
# Apply fix based on issue type
|
|
940
|
+
if issue.issue_type == IntegrityIssueType.MISSING_REQUIRED:
|
|
941
|
+
fixed_count += self._fix_missing_required(settings_data, issue)
|
|
942
|
+
elif issue.issue_type == IntegrityIssueType.INVALID_TYPE:
|
|
943
|
+
fixed_count += self._fix_invalid_type(settings_data, issue)
|
|
944
|
+
elif issue.issue_type == IntegrityIssueType.INVALID_VALUE:
|
|
945
|
+
fixed_count += self._fix_invalid_value(settings_data, issue)
|
|
946
|
+
|
|
947
|
+
except Exception as e:
|
|
948
|
+
self.logger.warning(f"Failed to apply automatic fix for {issue.location}: {e}")
|
|
949
|
+
|
|
950
|
+
return fixed_count
|
|
951
|
+
|
|
952
|
+
def _fix_missing_required(self, settings_data: Dict[str, Any],
|
|
953
|
+
issue: IntegrityIssue) -> int:
|
|
954
|
+
"""Fix missing required settings."""
|
|
955
|
+
# Implementation would add default values for missing required settings
|
|
956
|
+
return 0
|
|
957
|
+
|
|
958
|
+
def _fix_invalid_type(self, settings_data: Dict[str, Any],
|
|
959
|
+
issue: IntegrityIssue) -> int:
|
|
960
|
+
"""Fix invalid type issues."""
|
|
961
|
+
# Implementation would convert values to correct types
|
|
962
|
+
return 0
|
|
963
|
+
|
|
964
|
+
def _fix_invalid_value(self, settings_data: Dict[str, Any],
|
|
965
|
+
issue: IntegrityIssue) -> int:
|
|
966
|
+
"""Fix invalid value issues."""
|
|
967
|
+
# Implementation would correct invalid values
|
|
968
|
+
return 0
|
|
969
|
+
|
|
970
|
+
def _initialize_core_settings_rules(self) -> Dict[str, Any]:
|
|
971
|
+
"""Initialize validation rules for core settings."""
|
|
972
|
+
return {
|
|
973
|
+
'export_path': {
|
|
974
|
+
'type': str,
|
|
975
|
+
'required': True,
|
|
976
|
+
'pattern': r'^.+$', # Non-empty
|
|
977
|
+
'severity': 'critical',
|
|
978
|
+
'auto_fixable': True
|
|
979
|
+
},
|
|
980
|
+
'debug_level': {
|
|
981
|
+
'type': str,
|
|
982
|
+
'required': True,
|
|
983
|
+
'enum': ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
|
|
984
|
+
'severity': 'high',
|
|
985
|
+
'auto_fixable': True
|
|
986
|
+
},
|
|
987
|
+
'selected_tool': {
|
|
988
|
+
'type': str,
|
|
989
|
+
'required': True,
|
|
990
|
+
'pattern': r'^.+$', # Non-empty
|
|
991
|
+
'severity': 'medium',
|
|
992
|
+
'auto_fixable': True
|
|
993
|
+
},
|
|
994
|
+
'active_input_tab': {
|
|
995
|
+
'type': int,
|
|
996
|
+
'required': True,
|
|
997
|
+
'min': 0,
|
|
998
|
+
'max': 6,
|
|
999
|
+
'severity': 'medium',
|
|
1000
|
+
'auto_fixable': True
|
|
1001
|
+
},
|
|
1002
|
+
'active_output_tab': {
|
|
1003
|
+
'type': int,
|
|
1004
|
+
'required': True,
|
|
1005
|
+
'min': 0,
|
|
1006
|
+
'max': 6,
|
|
1007
|
+
'severity': 'medium',
|
|
1008
|
+
'auto_fixable': True
|
|
1009
|
+
}
|
|
1010
|
+
}
|
|
1011
|
+
|
|
1012
|
+
def _initialize_tool_settings_rules(self) -> Dict[str, Any]:
|
|
1013
|
+
"""Initialize validation rules for tool settings."""
|
|
1014
|
+
return {
|
|
1015
|
+
'cURL Tool': {
|
|
1016
|
+
'default_timeout': {
|
|
1017
|
+
'type': int,
|
|
1018
|
+
'min': 1,
|
|
1019
|
+
'max': 3600,
|
|
1020
|
+
'severity': 'medium',
|
|
1021
|
+
'auto_fixable': True
|
|
1022
|
+
},
|
|
1023
|
+
'follow_redirects': {
|
|
1024
|
+
'type': bool,
|
|
1025
|
+
'severity': 'low',
|
|
1026
|
+
'auto_fixable': True
|
|
1027
|
+
},
|
|
1028
|
+
'verify_ssl': {
|
|
1029
|
+
'type': bool,
|
|
1030
|
+
'severity': 'low',
|
|
1031
|
+
'auto_fixable': True
|
|
1032
|
+
},
|
|
1033
|
+
'max_redirects': {
|
|
1034
|
+
'type': int,
|
|
1035
|
+
'min': 0,
|
|
1036
|
+
'max': 50,
|
|
1037
|
+
'severity': 'medium',
|
|
1038
|
+
'auto_fixable': True
|
|
1039
|
+
}
|
|
1040
|
+
}
|
|
1041
|
+
}
|
|
1042
|
+
|
|
1043
|
+
def _initialize_performance_rules(self) -> Dict[str, Any]:
|
|
1044
|
+
"""Initialize validation rules for performance settings."""
|
|
1045
|
+
return {
|
|
1046
|
+
'async_processing': {
|
|
1047
|
+
'enabled': {
|
|
1048
|
+
'type': bool,
|
|
1049
|
+
'severity': 'low',
|
|
1050
|
+
'auto_fixable': True
|
|
1051
|
+
},
|
|
1052
|
+
'threshold_kb': {
|
|
1053
|
+
'type': int,
|
|
1054
|
+
'min': 1,
|
|
1055
|
+
'max': 10000,
|
|
1056
|
+
'severity': 'low',
|
|
1057
|
+
'auto_fixable': True
|
|
1058
|
+
}
|
|
1059
|
+
}
|
|
1060
|
+
}
|
|
1061
|
+
|
|
1062
|
+
def _initialize_security_rules(self) -> Dict[str, Any]:
|
|
1063
|
+
"""Initialize security validation rules."""
|
|
1064
|
+
return {
|
|
1065
|
+
'encrypted_fields': [
|
|
1066
|
+
'api_key', 'password', 'token', 'secret', 'key'
|
|
1067
|
+
],
|
|
1068
|
+
'sensitive_patterns': [
|
|
1069
|
+
r'(?i)api[_-]?key',
|
|
1070
|
+
r'(?i)password',
|
|
1071
|
+
r'(?i)token',
|
|
1072
|
+
r'(?i)secret'
|
|
1073
|
+
]
|
|
1074
|
+
}
|
|
1075
|
+
|
|
1076
|
+
def _log_validation_summary(self, issues: List[IntegrityIssue], validation_time: float) -> None:
|
|
1077
|
+
"""Log validation summary."""
|
|
1078
|
+
if not issues:
|
|
1079
|
+
self.logger.info(f"Settings integrity validation completed in {validation_time:.2f}s - no issues found")
|
|
1080
|
+
return
|
|
1081
|
+
|
|
1082
|
+
severity_counts = {}
|
|
1083
|
+
for issue in issues:
|
|
1084
|
+
severity_counts[issue.severity] = severity_counts.get(issue.severity, 0) + 1
|
|
1085
|
+
|
|
1086
|
+
summary = f"Settings integrity validation completed in {validation_time:.2f}s - {len(issues)} issues found: "
|
|
1087
|
+
summary += ", ".join([f"{count} {severity}" for severity, count in severity_counts.items()])
|
|
1088
|
+
|
|
1089
|
+
if 'critical' in severity_counts:
|
|
1090
|
+
self.logger.error(summary)
|
|
1091
|
+
elif 'high' in severity_counts:
|
|
1092
|
+
self.logger.warning(summary)
|
|
1093
|
+
else:
|
|
1094
|
+
self.logger.info(summary)
|
|
1095
|
+
|
|
1096
|
+
def _generate_recommendations(self, issues: List[IntegrityIssue]) -> List[str]:
|
|
1097
|
+
"""Generate recommendations based on validation issues."""
|
|
1098
|
+
recommendations = []
|
|
1099
|
+
|
|
1100
|
+
critical_count = len([i for i in issues if i.severity == 'critical'])
|
|
1101
|
+
if critical_count > 0:
|
|
1102
|
+
recommendations.append(f"Address {critical_count} critical issues immediately")
|
|
1103
|
+
|
|
1104
|
+
security_count = len([i for i in issues if i.issue_type == IntegrityIssueType.SECURITY_ISSUE])
|
|
1105
|
+
if security_count > 0:
|
|
1106
|
+
recommendations.append(f"Review {security_count} security issues")
|
|
1107
|
+
|
|
1108
|
+
auto_fixable_count = len([i for i in issues if i.auto_fixable])
|
|
1109
|
+
if auto_fixable_count > 0:
|
|
1110
|
+
recommendations.append(f"Run automatic repair for {auto_fixable_count} fixable issues")
|
|
1111
|
+
|
|
1112
1112
|
return recommendations
|