pomera-ai-commander 0.1.0 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +105 -680
- package/bin/pomera-ai-commander.js +62 -62
- package/core/__init__.py +65 -65
- package/core/app_context.py +482 -482
- package/core/async_text_processor.py +421 -421
- package/core/backup_manager.py +655 -655
- package/core/backup_recovery_manager.py +1033 -1033
- package/core/content_hash_cache.py +508 -508
- package/core/context_menu.py +313 -313
- package/core/data_validator.py +1066 -1066
- package/core/database_connection_manager.py +744 -744
- package/core/database_curl_settings_manager.py +608 -608
- package/core/database_promera_ai_settings_manager.py +446 -446
- package/core/database_schema.py +411 -411
- package/core/database_schema_manager.py +395 -395
- package/core/database_settings_manager.py +1507 -1507
- package/core/database_settings_manager_interface.py +456 -456
- package/core/dialog_manager.py +734 -734
- package/core/efficient_line_numbers.py +510 -510
- package/core/error_handler.py +746 -746
- package/core/error_service.py +431 -431
- package/core/event_consolidator.py +511 -511
- package/core/mcp/__init__.py +43 -43
- package/core/mcp/protocol.py +288 -288
- package/core/mcp/schema.py +251 -251
- package/core/mcp/server_stdio.py +299 -299
- package/core/mcp/tool_registry.py +2372 -2345
- package/core/memory_efficient_text_widget.py +711 -711
- package/core/migration_manager.py +914 -914
- package/core/migration_test_suite.py +1085 -1085
- package/core/migration_validator.py +1143 -1143
- package/core/optimized_find_replace.py +714 -714
- package/core/optimized_pattern_engine.py +424 -424
- package/core/optimized_search_highlighter.py +552 -552
- package/core/performance_monitor.py +674 -674
- package/core/persistence_manager.py +712 -712
- package/core/progressive_stats_calculator.py +632 -632
- package/core/regex_pattern_cache.py +529 -529
- package/core/regex_pattern_library.py +350 -350
- package/core/search_operation_manager.py +434 -434
- package/core/settings_defaults_registry.py +1087 -1087
- package/core/settings_integrity_validator.py +1111 -1111
- package/core/settings_serializer.py +557 -557
- package/core/settings_validator.py +1823 -1823
- package/core/smart_stats_calculator.py +709 -709
- package/core/statistics_update_manager.py +619 -619
- package/core/stats_config_manager.py +858 -858
- package/core/streaming_text_handler.py +723 -723
- package/core/task_scheduler.py +596 -596
- package/core/update_pattern_library.py +168 -168
- package/core/visibility_monitor.py +596 -596
- package/core/widget_cache.py +498 -498
- package/mcp.json +51 -61
- package/package.json +61 -57
- package/pomera.py +7482 -7482
- package/pomera_mcp_server.py +183 -144
- package/requirements.txt +32 -0
- package/tools/__init__.py +4 -4
- package/tools/ai_tools.py +2891 -2891
- package/tools/ascii_art_generator.py +352 -352
- package/tools/base64_tools.py +183 -183
- package/tools/base_tool.py +511 -511
- package/tools/case_tool.py +308 -308
- package/tools/column_tools.py +395 -395
- package/tools/cron_tool.py +884 -884
- package/tools/curl_history.py +600 -600
- package/tools/curl_processor.py +1207 -1207
- package/tools/curl_settings.py +502 -502
- package/tools/curl_tool.py +5467 -5467
- package/tools/diff_viewer.py +1071 -1071
- package/tools/email_extraction_tool.py +248 -248
- package/tools/email_header_analyzer.py +425 -425
- package/tools/extraction_tools.py +250 -250
- package/tools/find_replace.py +1750 -1750
- package/tools/folder_file_reporter.py +1463 -1463
- package/tools/folder_file_reporter_adapter.py +480 -480
- package/tools/generator_tools.py +1216 -1216
- package/tools/hash_generator.py +255 -255
- package/tools/html_tool.py +656 -656
- package/tools/jsonxml_tool.py +729 -729
- package/tools/line_tools.py +419 -419
- package/tools/markdown_tools.py +561 -561
- package/tools/mcp_widget.py +1417 -1417
- package/tools/notes_widget.py +973 -973
- package/tools/number_base_converter.py +372 -372
- package/tools/regex_extractor.py +571 -571
- package/tools/slug_generator.py +310 -310
- package/tools/sorter_tools.py +458 -458
- package/tools/string_escape_tool.py +392 -392
- package/tools/text_statistics_tool.py +365 -365
- package/tools/text_wrapper.py +430 -430
- package/tools/timestamp_converter.py +421 -421
- package/tools/tool_loader.py +710 -710
- package/tools/translator_tools.py +522 -522
- package/tools/url_link_extractor.py +261 -261
- package/tools/url_parser.py +204 -204
- package/tools/whitespace_tools.py +355 -355
- package/tools/word_frequency_counter.py +146 -146
- package/core/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/__pycache__/app_context.cpython-313.pyc +0 -0
- package/core/__pycache__/async_text_processor.cpython-313.pyc +0 -0
- package/core/__pycache__/backup_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/backup_recovery_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/content_hash_cache.cpython-313.pyc +0 -0
- package/core/__pycache__/context_menu.cpython-313.pyc +0 -0
- package/core/__pycache__/data_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/database_connection_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_curl_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_promera_ai_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_schema.cpython-313.pyc +0 -0
- package/core/__pycache__/database_schema_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_settings_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/database_settings_manager_interface.cpython-313.pyc +0 -0
- package/core/__pycache__/dialog_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/efficient_line_numbers.cpython-313.pyc +0 -0
- package/core/__pycache__/error_handler.cpython-313.pyc +0 -0
- package/core/__pycache__/error_service.cpython-313.pyc +0 -0
- package/core/__pycache__/event_consolidator.cpython-313.pyc +0 -0
- package/core/__pycache__/memory_efficient_text_widget.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_test_suite.cpython-313.pyc +0 -0
- package/core/__pycache__/migration_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_find_replace.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_pattern_engine.cpython-313.pyc +0 -0
- package/core/__pycache__/optimized_search_highlighter.cpython-313.pyc +0 -0
- package/core/__pycache__/performance_monitor.cpython-313.pyc +0 -0
- package/core/__pycache__/persistence_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/progressive_stats_calculator.cpython-313.pyc +0 -0
- package/core/__pycache__/regex_pattern_cache.cpython-313.pyc +0 -0
- package/core/__pycache__/regex_pattern_library.cpython-313.pyc +0 -0
- package/core/__pycache__/search_operation_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_defaults_registry.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_integrity_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_serializer.cpython-313.pyc +0 -0
- package/core/__pycache__/settings_validator.cpython-313.pyc +0 -0
- package/core/__pycache__/smart_stats_calculator.cpython-313.pyc +0 -0
- package/core/__pycache__/statistics_update_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/stats_config_manager.cpython-313.pyc +0 -0
- package/core/__pycache__/streaming_text_handler.cpython-313.pyc +0 -0
- package/core/__pycache__/task_scheduler.cpython-313.pyc +0 -0
- package/core/__pycache__/visibility_monitor.cpython-313.pyc +0 -0
- package/core/__pycache__/widget_cache.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/protocol.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/schema.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/server_stdio.cpython-313.pyc +0 -0
- package/core/mcp/__pycache__/tool_registry.cpython-313.pyc +0 -0
- package/tools/__pycache__/__init__.cpython-313.pyc +0 -0
- package/tools/__pycache__/ai_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/ascii_art_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/base64_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/base_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/case_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/column_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/cron_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_history.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_processor.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_settings.cpython-313.pyc +0 -0
- package/tools/__pycache__/curl_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/diff_viewer.cpython-313.pyc +0 -0
- package/tools/__pycache__/email_extraction_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/email_header_analyzer.cpython-313.pyc +0 -0
- package/tools/__pycache__/extraction_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/find_replace.cpython-313.pyc +0 -0
- package/tools/__pycache__/folder_file_reporter.cpython-313.pyc +0 -0
- package/tools/__pycache__/folder_file_reporter_adapter.cpython-313.pyc +0 -0
- package/tools/__pycache__/generator_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/hash_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/html_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/huggingface_helper.cpython-313.pyc +0 -0
- package/tools/__pycache__/jsonxml_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/line_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/list_comparator.cpython-313.pyc +0 -0
- package/tools/__pycache__/markdown_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/mcp_widget.cpython-313.pyc +0 -0
- package/tools/__pycache__/notes_widget.cpython-313.pyc +0 -0
- package/tools/__pycache__/number_base_converter.cpython-313.pyc +0 -0
- package/tools/__pycache__/regex_extractor.cpython-313.pyc +0 -0
- package/tools/__pycache__/slug_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/sorter_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/string_escape_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/text_statistics_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/text_wrapper.cpython-313.pyc +0 -0
- package/tools/__pycache__/timestamp_converter.cpython-313.pyc +0 -0
- package/tools/__pycache__/tool_loader.cpython-313.pyc +0 -0
- package/tools/__pycache__/translator_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/url_link_extractor.cpython-313.pyc +0 -0
- package/tools/__pycache__/url_parser.cpython-313.pyc +0 -0
- package/tools/__pycache__/whitespace_tools.cpython-313.pyc +0 -0
- package/tools/__pycache__/word_frequency_counter.cpython-313.pyc +0 -0
|
@@ -1,713 +1,713 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Enhanced Persistence Manager for Database Settings
|
|
3
|
-
|
|
4
|
-
This module provides comprehensive persistence management including
|
|
5
|
-
configurable backup intervals, disk persistence triggers, backup rotation,
|
|
6
|
-
corruption recovery, and monitoring for backup success/failure.
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
import os
|
|
10
|
-
import sqlite3
|
|
11
|
-
import threading
|
|
12
|
-
import time
|
|
13
|
-
import logging
|
|
14
|
-
import json
|
|
15
|
-
import hashlib
|
|
16
|
-
from typing import Dict, List, Optional, Any, Callable
|
|
17
|
-
from datetime import datetime, timedelta
|
|
18
|
-
from pathlib import Path
|
|
19
|
-
from dataclasses import dataclass
|
|
20
|
-
from enum import Enum
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class PersistenceEventType(Enum):
|
|
24
|
-
"""Types of persistence events."""
|
|
25
|
-
BACKUP_CREATED = "backup_created"
|
|
26
|
-
BACKUP_FAILED = "backup_failed"
|
|
27
|
-
PERSISTENCE_TRIGGERED = "persistence_triggered"
|
|
28
|
-
CORRUPTION_DETECTED = "corruption_detected"
|
|
29
|
-
RECOVERY_COMPLETED = "recovery_completed"
|
|
30
|
-
CLEANUP_PERFORMED = "cleanup_performed"
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
@dataclass
|
|
34
|
-
class PersistenceConfig:
|
|
35
|
-
"""Configuration for persistence management."""
|
|
36
|
-
backup_interval_seconds: int = 300 # 5 minutes
|
|
37
|
-
change_threshold: int = 100 # Number of changes before backup
|
|
38
|
-
max_backups: int = 10
|
|
39
|
-
max_backup_age_days: int = 30
|
|
40
|
-
enable_compression: bool = True
|
|
41
|
-
enable_integrity_checks: bool = True
|
|
42
|
-
corruption_recovery_enabled: bool = True
|
|
43
|
-
persistence_on_shutdown: bool = True
|
|
44
|
-
|
|
45
|
-
def to_dict(self) -> Dict[str, Any]:
|
|
46
|
-
"""Convert to dictionary."""
|
|
47
|
-
return {
|
|
48
|
-
'backup_interval_seconds': self.backup_interval_seconds,
|
|
49
|
-
'change_threshold': self.change_threshold,
|
|
50
|
-
'max_backups': self.max_backups,
|
|
51
|
-
'max_backup_age_days': self.max_backup_age_days,
|
|
52
|
-
'enable_compression': self.enable_compression,
|
|
53
|
-
'enable_integrity_checks': self.enable_integrity_checks,
|
|
54
|
-
'corruption_recovery_enabled': self.corruption_recovery_enabled,
|
|
55
|
-
'persistence_on_shutdown': self.persistence_on_shutdown
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
@classmethod
|
|
59
|
-
def from_dict(cls, data: Dict[str, Any]) -> 'PersistenceConfig':
|
|
60
|
-
"""Create from dictionary."""
|
|
61
|
-
return cls(**data)
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
@dataclass
|
|
65
|
-
class PersistenceEvent:
|
|
66
|
-
"""Information about a persistence event."""
|
|
67
|
-
event_type: PersistenceEventType
|
|
68
|
-
timestamp: datetime
|
|
69
|
-
details: Dict[str, Any]
|
|
70
|
-
success: bool = True
|
|
71
|
-
error_message: Optional[str] = None
|
|
72
|
-
|
|
73
|
-
def to_dict(self) -> Dict[str, Any]:
|
|
74
|
-
"""Convert to dictionary."""
|
|
75
|
-
return {
|
|
76
|
-
'event_type': self.event_type.value,
|
|
77
|
-
'timestamp': self.timestamp.isoformat(),
|
|
78
|
-
'details': self.details,
|
|
79
|
-
'success': self.success,
|
|
80
|
-
'error_message': self.error_message
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
class DatabaseIntegrityChecker:
|
|
85
|
-
"""Checks database integrity and detects corruption."""
|
|
86
|
-
|
|
87
|
-
def __init__(self, connection_manager):
|
|
88
|
-
"""
|
|
89
|
-
Initialize integrity checker.
|
|
90
|
-
|
|
91
|
-
Args:
|
|
92
|
-
connection_manager: Database connection manager
|
|
93
|
-
"""
|
|
94
|
-
self.connection_manager = connection_manager
|
|
95
|
-
self.logger = logging.getLogger(__name__)
|
|
96
|
-
|
|
97
|
-
def check_integrity(self) -> Dict[str, Any]:
|
|
98
|
-
"""
|
|
99
|
-
Perform comprehensive database integrity check.
|
|
100
|
-
|
|
101
|
-
Returns:
|
|
102
|
-
Dictionary with integrity check results
|
|
103
|
-
"""
|
|
104
|
-
results = {
|
|
105
|
-
'overall_status': 'healthy',
|
|
106
|
-
'checks_performed': [],
|
|
107
|
-
'issues_found': [],
|
|
108
|
-
'recommendations': []
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
try:
|
|
112
|
-
conn = self.connection_manager.get_connection()
|
|
113
|
-
|
|
114
|
-
# 1. SQLite integrity check
|
|
115
|
-
integrity_result = self._check_sqlite_integrity(conn)
|
|
116
|
-
results['checks_performed'].append('sqlite_integrity')
|
|
117
|
-
if not integrity_result['passed']:
|
|
118
|
-
results['overall_status'] = 'corrupted'
|
|
119
|
-
results['issues_found'].extend(integrity_result['issues'])
|
|
120
|
-
|
|
121
|
-
# 2. Schema validation
|
|
122
|
-
schema_result = self._check_schema_validity(conn)
|
|
123
|
-
results['checks_performed'].append('schema_validation')
|
|
124
|
-
if not schema_result['passed']:
|
|
125
|
-
results['overall_status'] = 'schema_issues'
|
|
126
|
-
results['issues_found'].extend(schema_result['issues'])
|
|
127
|
-
|
|
128
|
-
# 3. Data consistency checks
|
|
129
|
-
consistency_result = self._check_data_consistency(conn)
|
|
130
|
-
results['checks_performed'].append('data_consistency')
|
|
131
|
-
if not consistency_result['passed']:
|
|
132
|
-
if results['overall_status'] == 'healthy':
|
|
133
|
-
results['overall_status'] = 'data_issues'
|
|
134
|
-
results['issues_found'].extend(consistency_result['issues'])
|
|
135
|
-
|
|
136
|
-
# 4. Performance checks
|
|
137
|
-
performance_result = self._check_performance_indicators(conn)
|
|
138
|
-
results['checks_performed'].append('performance_indicators')
|
|
139
|
-
results['recommendations'].extend(performance_result['recommendations'])
|
|
140
|
-
|
|
141
|
-
except Exception as e:
|
|
142
|
-
results['overall_status'] = 'check_failed'
|
|
143
|
-
results['issues_found'].append(f"Integrity check failed: {e}")
|
|
144
|
-
self.logger.error(f"Database integrity check failed: {e}")
|
|
145
|
-
|
|
146
|
-
return results
|
|
147
|
-
|
|
148
|
-
def _check_sqlite_integrity(self, conn: sqlite3.Connection) -> Dict[str, Any]:
|
|
149
|
-
"""Check SQLite database integrity."""
|
|
150
|
-
try:
|
|
151
|
-
cursor = conn.execute("PRAGMA integrity_check")
|
|
152
|
-
result = cursor.fetchone()[0]
|
|
153
|
-
|
|
154
|
-
if result == "ok":
|
|
155
|
-
return {'passed': True, 'issues': []}
|
|
156
|
-
else:
|
|
157
|
-
return {
|
|
158
|
-
'passed': False,
|
|
159
|
-
'issues': [f"SQLite integrity check failed: {result}"]
|
|
160
|
-
}
|
|
161
|
-
except Exception as e:
|
|
162
|
-
return {
|
|
163
|
-
'passed': False,
|
|
164
|
-
'issues': [f"Could not perform integrity check: {e}"]
|
|
165
|
-
}
|
|
166
|
-
|
|
167
|
-
def _check_schema_validity(self, conn: sqlite3.Connection) -> Dict[str, Any]:
|
|
168
|
-
"""Check if database schema is valid."""
|
|
169
|
-
issues = []
|
|
170
|
-
|
|
171
|
-
try:
|
|
172
|
-
# Check if required tables exist
|
|
173
|
-
required_tables = [
|
|
174
|
-
'core_settings', 'tool_settings', 'tab_content',
|
|
175
|
-
'performance_settings', 'font_settings', 'dialog_settings',
|
|
176
|
-
'settings_metadata'
|
|
177
|
-
]
|
|
178
|
-
|
|
179
|
-
cursor = conn.execute(
|
|
180
|
-
"SELECT name FROM sqlite_master WHERE type='table'"
|
|
181
|
-
)
|
|
182
|
-
existing_tables = {row[0] for row in cursor.fetchall()}
|
|
183
|
-
|
|
184
|
-
missing_tables = set(required_tables) - existing_tables
|
|
185
|
-
if missing_tables:
|
|
186
|
-
issues.append(f"Missing tables: {missing_tables}")
|
|
187
|
-
|
|
188
|
-
# Check table schemas
|
|
189
|
-
for table in existing_tables:
|
|
190
|
-
if table in required_tables:
|
|
191
|
-
schema_issues = self._validate_table_schema(conn, table)
|
|
192
|
-
issues.extend(schema_issues)
|
|
193
|
-
|
|
194
|
-
except Exception as e:
|
|
195
|
-
issues.append(f"Schema validation error: {e}")
|
|
196
|
-
|
|
197
|
-
return {
|
|
198
|
-
'passed': len(issues) == 0,
|
|
199
|
-
'issues': issues
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
def _validate_table_schema(self, conn: sqlite3.Connection, table_name: str) -> List[str]:
|
|
203
|
-
"""Validate schema for a specific table."""
|
|
204
|
-
issues = []
|
|
205
|
-
|
|
206
|
-
try:
|
|
207
|
-
cursor = conn.execute(f"PRAGMA table_info({table_name})")
|
|
208
|
-
columns = cursor.fetchall()
|
|
209
|
-
|
|
210
|
-
if not columns:
|
|
211
|
-
issues.append(f"Table {table_name} has no columns")
|
|
212
|
-
|
|
213
|
-
# Basic validation - ensure tables have expected structure
|
|
214
|
-
column_names = {col[1] for col in columns}
|
|
215
|
-
|
|
216
|
-
if table_name == 'core_settings':
|
|
217
|
-
required_cols = {'key', 'value', 'data_type'}
|
|
218
|
-
missing = required_cols - column_names
|
|
219
|
-
if missing:
|
|
220
|
-
issues.append(f"core_settings missing columns: {missing}")
|
|
221
|
-
|
|
222
|
-
elif table_name == 'tool_settings':
|
|
223
|
-
required_cols = {'tool_name', 'setting_path', 'setting_value', 'data_type'}
|
|
224
|
-
missing = required_cols - column_names
|
|
225
|
-
if missing:
|
|
226
|
-
issues.append(f"tool_settings missing columns: {missing}")
|
|
227
|
-
|
|
228
|
-
except Exception as e:
|
|
229
|
-
issues.append(f"Could not validate {table_name} schema: {e}")
|
|
230
|
-
|
|
231
|
-
return issues
|
|
232
|
-
|
|
233
|
-
def _check_data_consistency(self, conn: sqlite3.Connection) -> Dict[str, Any]:
|
|
234
|
-
"""Check data consistency across tables."""
|
|
235
|
-
issues = []
|
|
236
|
-
|
|
237
|
-
try:
|
|
238
|
-
# Check for orphaned data
|
|
239
|
-
# Check for invalid data types
|
|
240
|
-
cursor = conn.execute(
|
|
241
|
-
"SELECT COUNT(*) FROM core_settings WHERE key IS NULL OR key = ''"
|
|
242
|
-
)
|
|
243
|
-
null_keys = cursor.fetchone()[0]
|
|
244
|
-
if null_keys > 0:
|
|
245
|
-
issues.append(f"Found {null_keys} core_settings with null/empty keys")
|
|
246
|
-
|
|
247
|
-
# Check tool_settings consistency
|
|
248
|
-
cursor = conn.execute(
|
|
249
|
-
"SELECT COUNT(*) FROM tool_settings WHERE tool_name IS NULL OR tool_name = ''"
|
|
250
|
-
)
|
|
251
|
-
null_tools = cursor.fetchone()[0]
|
|
252
|
-
if null_tools > 0:
|
|
253
|
-
issues.append(f"Found {null_tools} tool_settings with null/empty tool names")
|
|
254
|
-
|
|
255
|
-
except Exception as e:
|
|
256
|
-
issues.append(f"Data consistency check error: {e}")
|
|
257
|
-
|
|
258
|
-
return {
|
|
259
|
-
'passed': len(issues) == 0,
|
|
260
|
-
'issues': issues
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
def _check_performance_indicators(self, conn: sqlite3.Connection) -> Dict[str, Any]:
|
|
264
|
-
"""Check performance indicators and suggest optimizations."""
|
|
265
|
-
recommendations = []
|
|
266
|
-
|
|
267
|
-
try:
|
|
268
|
-
# Check for missing indexes
|
|
269
|
-
cursor = conn.execute(
|
|
270
|
-
"SELECT name FROM sqlite_master WHERE type='index'"
|
|
271
|
-
)
|
|
272
|
-
existing_indexes = {row[0] for row in cursor.fetchall()}
|
|
273
|
-
|
|
274
|
-
recommended_indexes = {
|
|
275
|
-
'idx_core_settings_key',
|
|
276
|
-
'idx_tool_settings_tool_name',
|
|
277
|
-
'idx_tool_settings_path'
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
missing_indexes = recommended_indexes - existing_indexes
|
|
281
|
-
if missing_indexes:
|
|
282
|
-
recommendations.append(f"Consider creating indexes: {missing_indexes}")
|
|
283
|
-
|
|
284
|
-
# Check table sizes
|
|
285
|
-
cursor = conn.execute(
|
|
286
|
-
"SELECT name, COUNT(*) FROM sqlite_master m "
|
|
287
|
-
"LEFT JOIN pragma_table_info(m.name) p ON m.name != p.name "
|
|
288
|
-
"WHERE m.type='table' GROUP BY m.name"
|
|
289
|
-
)
|
|
290
|
-
|
|
291
|
-
for table_name, _ in cursor.fetchall():
|
|
292
|
-
if table_name.startswith('sqlite_'):
|
|
293
|
-
continue
|
|
294
|
-
|
|
295
|
-
try:
|
|
296
|
-
count_cursor = conn.execute(f"SELECT COUNT(*) FROM {table_name}")
|
|
297
|
-
row_count = count_cursor.fetchone()[0]
|
|
298
|
-
|
|
299
|
-
if row_count > 10000:
|
|
300
|
-
recommendations.append(
|
|
301
|
-
f"Table {table_name} has {row_count} rows, consider archiving old data"
|
|
302
|
-
)
|
|
303
|
-
except Exception:
|
|
304
|
-
pass # Skip tables we can't query
|
|
305
|
-
|
|
306
|
-
except Exception as e:
|
|
307
|
-
recommendations.append(f"Performance check error: {e}")
|
|
308
|
-
|
|
309
|
-
return {'recommendations': recommendations}
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
class PersistenceManager:
|
|
313
|
-
"""
|
|
314
|
-
Enhanced persistence manager with comprehensive backup and recovery capabilities.
|
|
315
|
-
"""
|
|
316
|
-
|
|
317
|
-
def __init__(self, connection_manager, backup_manager,
|
|
318
|
-
config: Optional[PersistenceConfig] = None):
|
|
319
|
-
"""
|
|
320
|
-
Initialize persistence manager.
|
|
321
|
-
|
|
322
|
-
Args:
|
|
323
|
-
connection_manager: Database connection manager
|
|
324
|
-
backup_manager: Backup manager instance
|
|
325
|
-
config: Persistence configuration
|
|
326
|
-
"""
|
|
327
|
-
self.connection_manager = connection_manager
|
|
328
|
-
self.backup_manager = backup_manager
|
|
329
|
-
self.config = config or PersistenceConfig()
|
|
330
|
-
|
|
331
|
-
# Components
|
|
332
|
-
self.integrity_checker = DatabaseIntegrityChecker(connection_manager)
|
|
333
|
-
|
|
334
|
-
# State tracking
|
|
335
|
-
self.changes_since_persistence = 0
|
|
336
|
-
self.last_persistence_time = None
|
|
337
|
-
self.last_integrity_check = None
|
|
338
|
-
|
|
339
|
-
# Event tracking
|
|
340
|
-
self.events = []
|
|
341
|
-
self.max_events = 1000
|
|
342
|
-
|
|
343
|
-
# Threading
|
|
344
|
-
self._lock = threading.RLock()
|
|
345
|
-
self._persistence_thread = None
|
|
346
|
-
self._stop_event = threading.Event()
|
|
347
|
-
|
|
348
|
-
# Callbacks
|
|
349
|
-
self._event_callbacks = []
|
|
350
|
-
|
|
351
|
-
# Logging
|
|
352
|
-
self.logger = logging.getLogger(__name__)
|
|
353
|
-
|
|
354
|
-
# Statistics
|
|
355
|
-
self.stats = {
|
|
356
|
-
'total_persistence_operations': 0,
|
|
357
|
-
'successful_persistence_operations': 0,
|
|
358
|
-
'failed_persistence_operations': 0,
|
|
359
|
-
'corruption_incidents': 0,
|
|
360
|
-
'recovery_operations': 0,
|
|
361
|
-
'integrity_checks_performed': 0
|
|
362
|
-
}
|
|
363
|
-
|
|
364
|
-
def start_persistence_monitoring(self) -> None:
|
|
365
|
-
"""Start automatic persistence monitoring."""
|
|
366
|
-
if self._persistence_thread and self._persistence_thread.is_alive():
|
|
367
|
-
return
|
|
368
|
-
|
|
369
|
-
self._stop_event.clear()
|
|
370
|
-
self._persistence_thread = threading.Thread(
|
|
371
|
-
target=self._persistence_worker,
|
|
372
|
-
daemon=True,
|
|
373
|
-
name="PersistenceManager"
|
|
374
|
-
)
|
|
375
|
-
self._persistence_thread.start()
|
|
376
|
-
self.logger.info("Persistence monitoring started")
|
|
377
|
-
|
|
378
|
-
def stop_persistence_monitoring(self) -> None:
|
|
379
|
-
"""Stop automatic persistence monitoring."""
|
|
380
|
-
if self._persistence_thread and self._persistence_thread.is_alive():
|
|
381
|
-
self._stop_event.set()
|
|
382
|
-
self._persistence_thread.join(timeout=10)
|
|
383
|
-
self.logger.info("Persistence monitoring stopped")
|
|
384
|
-
|
|
385
|
-
def _persistence_worker(self) -> None:
|
|
386
|
-
"""Worker thread for automatic persistence operations."""
|
|
387
|
-
while not self._stop_event.is_set():
|
|
388
|
-
try:
|
|
389
|
-
# Check if persistence is needed
|
|
390
|
-
should_persist = self._should_trigger_persistence()
|
|
391
|
-
|
|
392
|
-
if should_persist:
|
|
393
|
-
self._perform_persistence_operation()
|
|
394
|
-
|
|
395
|
-
# Periodic integrity checks
|
|
396
|
-
if self._should_perform_integrity_check():
|
|
397
|
-
self._perform_integrity_check()
|
|
398
|
-
|
|
399
|
-
# Cleanup old backups
|
|
400
|
-
if self._should_perform_cleanup():
|
|
401
|
-
self._perform_cleanup()
|
|
402
|
-
|
|
403
|
-
# Wait before next check
|
|
404
|
-
self._stop_event.wait(min(60, self.config.backup_interval_seconds // 10))
|
|
405
|
-
|
|
406
|
-
except Exception as e:
|
|
407
|
-
self.logger.error(f"Persistence worker error: {e}")
|
|
408
|
-
self._record_event(
|
|
409
|
-
PersistenceEventType.BACKUP_FAILED,
|
|
410
|
-
{'error': str(e)},
|
|
411
|
-
success=False,
|
|
412
|
-
error_message=str(e)
|
|
413
|
-
)
|
|
414
|
-
self._stop_event.wait(60)
|
|
415
|
-
|
|
416
|
-
def _should_trigger_persistence(self) -> bool:
|
|
417
|
-
"""Determine if persistence operation should be triggered."""
|
|
418
|
-
# Time-based trigger
|
|
419
|
-
if self.last_persistence_time is None:
|
|
420
|
-
return True
|
|
421
|
-
|
|
422
|
-
time_since_last = datetime.now() - self.last_persistence_time
|
|
423
|
-
if time_since_last.total_seconds() >= self.config.backup_interval_seconds:
|
|
424
|
-
return True
|
|
425
|
-
|
|
426
|
-
# Change-based trigger
|
|
427
|
-
if self.changes_since_persistence >= self.config.change_threshold:
|
|
428
|
-
return True
|
|
429
|
-
|
|
430
|
-
return False
|
|
431
|
-
|
|
432
|
-
def _should_perform_integrity_check(self) -> bool:
|
|
433
|
-
"""Determine if integrity check should be performed."""
|
|
434
|
-
if not self.config.enable_integrity_checks:
|
|
435
|
-
return False
|
|
436
|
-
|
|
437
|
-
if self.last_integrity_check is None:
|
|
438
|
-
return True
|
|
439
|
-
|
|
440
|
-
# Perform integrity check every 24 hours
|
|
441
|
-
time_since_check = datetime.now() - self.last_integrity_check
|
|
442
|
-
return time_since_check.total_seconds() >= 86400 # 24 hours
|
|
443
|
-
|
|
444
|
-
def _should_perform_cleanup(self) -> bool:
|
|
445
|
-
"""Determine if cleanup should be performed."""
|
|
446
|
-
# Perform cleanup once per day
|
|
447
|
-
return True # Let cleanup method handle frequency
|
|
448
|
-
|
|
449
|
-
def _perform_persistence_operation(self) -> None:
|
|
450
|
-
"""Perform persistence operation (backup)."""
|
|
451
|
-
try:
|
|
452
|
-
with self._lock:
|
|
453
|
-
# Trigger backup
|
|
454
|
-
from .backup_manager import BackupTrigger
|
|
455
|
-
|
|
456
|
-
trigger = BackupTrigger.TIME_BASED
|
|
457
|
-
if self.changes_since_persistence >= self.config.change_threshold:
|
|
458
|
-
trigger = BackupTrigger.CHANGE_BASED
|
|
459
|
-
|
|
460
|
-
backup_info = self.backup_manager.backup_database(
|
|
461
|
-
self.connection_manager,
|
|
462
|
-
trigger=trigger,
|
|
463
|
-
metadata={'persistence_manager': True}
|
|
464
|
-
)
|
|
465
|
-
|
|
466
|
-
if backup_info:
|
|
467
|
-
self.last_persistence_time = datetime.now()
|
|
468
|
-
self.changes_since_persistence = 0
|
|
469
|
-
self.stats['successful_persistence_operations'] += 1
|
|
470
|
-
|
|
471
|
-
self._record_event(
|
|
472
|
-
PersistenceEventType.PERSISTENCE_TRIGGERED,
|
|
473
|
-
{
|
|
474
|
-
'trigger': trigger.value,
|
|
475
|
-
'backup_file': backup_info.filepath,
|
|
476
|
-
'backup_size': backup_info.size_bytes
|
|
477
|
-
}
|
|
478
|
-
)
|
|
479
|
-
else:
|
|
480
|
-
self.stats['failed_persistence_operations'] += 1
|
|
481
|
-
self._record_event(
|
|
482
|
-
PersistenceEventType.BACKUP_FAILED,
|
|
483
|
-
{'trigger': trigger.value},
|
|
484
|
-
success=False,
|
|
485
|
-
error_message="Backup creation failed"
|
|
486
|
-
)
|
|
487
|
-
|
|
488
|
-
self.stats['total_persistence_operations'] += 1
|
|
489
|
-
|
|
490
|
-
except Exception as e:
|
|
491
|
-
self.logger.error(f"Persistence operation failed: {e}")
|
|
492
|
-
self.stats['failed_persistence_operations'] += 1
|
|
493
|
-
self._record_event(
|
|
494
|
-
PersistenceEventType.BACKUP_FAILED,
|
|
495
|
-
{'error': str(e)},
|
|
496
|
-
success=False,
|
|
497
|
-
error_message=str(e)
|
|
498
|
-
)
|
|
499
|
-
|
|
500
|
-
def _perform_integrity_check(self) -> None:
|
|
501
|
-
"""Perform database integrity check."""
|
|
502
|
-
try:
|
|
503
|
-
self.logger.info("Performing database integrity check")
|
|
504
|
-
|
|
505
|
-
integrity_results = self.integrity_checker.check_integrity()
|
|
506
|
-
self.last_integrity_check = datetime.now()
|
|
507
|
-
self.stats['integrity_checks_performed'] += 1
|
|
508
|
-
|
|
509
|
-
if integrity_results['overall_status'] == 'corrupted':
|
|
510
|
-
self.stats['corruption_incidents'] += 1
|
|
511
|
-
self._record_event(
|
|
512
|
-
PersistenceEventType.CORRUPTION_DETECTED,
|
|
513
|
-
integrity_results,
|
|
514
|
-
success=False,
|
|
515
|
-
error_message="Database corruption detected"
|
|
516
|
-
)
|
|
517
|
-
|
|
518
|
-
# Attempt recovery if enabled
|
|
519
|
-
if self.config.corruption_recovery_enabled:
|
|
520
|
-
self._attempt_corruption_recovery()
|
|
521
|
-
|
|
522
|
-
elif integrity_results['overall_status'] != 'healthy':
|
|
523
|
-
self._record_event(
|
|
524
|
-
PersistenceEventType.CORRUPTION_DETECTED,
|
|
525
|
-
integrity_results,
|
|
526
|
-
success=False,
|
|
527
|
-
error_message=f"Database issues detected: {integrity_results['overall_status']}"
|
|
528
|
-
)
|
|
529
|
-
|
|
530
|
-
except Exception as e:
|
|
531
|
-
self.logger.error(f"Integrity check failed: {e}")
|
|
532
|
-
|
|
533
|
-
def _attempt_corruption_recovery(self) -> None:
|
|
534
|
-
"""Attempt to recover from database corruption."""
|
|
535
|
-
try:
|
|
536
|
-
self.logger.warning("Attempting database corruption recovery")
|
|
537
|
-
|
|
538
|
-
# Try to restore from latest backup
|
|
539
|
-
recovery_success = self.backup_manager.restore_from_backup(
|
|
540
|
-
self.connection_manager
|
|
541
|
-
)
|
|
542
|
-
|
|
543
|
-
if recovery_success:
|
|
544
|
-
self.stats['recovery_operations'] += 1
|
|
545
|
-
self._record_event(
|
|
546
|
-
PersistenceEventType.RECOVERY_COMPLETED,
|
|
547
|
-
{'method': 'backup_restore'},
|
|
548
|
-
success=True
|
|
549
|
-
)
|
|
550
|
-
self.logger.info("Database recovery successful")
|
|
551
|
-
else:
|
|
552
|
-
self._record_event(
|
|
553
|
-
PersistenceEventType.RECOVERY_COMPLETED,
|
|
554
|
-
{'method': 'backup_restore'},
|
|
555
|
-
success=False,
|
|
556
|
-
error_message="Backup restore failed"
|
|
557
|
-
)
|
|
558
|
-
self.logger.error("Database recovery failed")
|
|
559
|
-
|
|
560
|
-
except Exception as e:
|
|
561
|
-
self.logger.error(f"Corruption recovery failed: {e}")
|
|
562
|
-
self._record_event(
|
|
563
|
-
PersistenceEventType.RECOVERY_COMPLETED,
|
|
564
|
-
{'method': 'backup_restore', 'error': str(e)},
|
|
565
|
-
success=False,
|
|
566
|
-
error_message=str(e)
|
|
567
|
-
)
|
|
568
|
-
|
|
569
|
-
def _perform_cleanup(self) -> None:
|
|
570
|
-
"""Perform cleanup of old backups and maintenance."""
|
|
571
|
-
try:
|
|
572
|
-
# This is handled by the backup manager's rotation policy
|
|
573
|
-
# We just record the event
|
|
574
|
-
self._record_event(
|
|
575
|
-
PersistenceEventType.CLEANUP_PERFORMED,
|
|
576
|
-
{'timestamp': datetime.now().isoformat()}
|
|
577
|
-
)
|
|
578
|
-
|
|
579
|
-
except Exception as e:
|
|
580
|
-
self.logger.error(f"Cleanup operation failed: {e}")
|
|
581
|
-
|
|
582
|
-
def record_change(self) -> None:
|
|
583
|
-
"""Record a database change for persistence triggering."""
|
|
584
|
-
with self._lock:
|
|
585
|
-
self.changes_since_persistence += 1
|
|
586
|
-
|
|
587
|
-
def force_persistence(self) -> bool:
|
|
588
|
-
"""Force immediate persistence operation."""
|
|
589
|
-
try:
|
|
590
|
-
self._perform_persistence_operation()
|
|
591
|
-
return True
|
|
592
|
-
except Exception as e:
|
|
593
|
-
self.logger.error(f"Forced persistence failed: {e}")
|
|
594
|
-
return False
|
|
595
|
-
|
|
596
|
-
def get_persistence_status(self) -> Dict[str, Any]:
|
|
597
|
-
"""
|
|
598
|
-
Get comprehensive persistence status.
|
|
599
|
-
|
|
600
|
-
Returns:
|
|
601
|
-
Dictionary with persistence status and statistics
|
|
602
|
-
"""
|
|
603
|
-
with self._lock:
|
|
604
|
-
return {
|
|
605
|
-
'config': self.config.to_dict(),
|
|
606
|
-
'status': {
|
|
607
|
-
'changes_since_persistence': self.changes_since_persistence,
|
|
608
|
-
'last_persistence_time': self.last_persistence_time.isoformat() if self.last_persistence_time else None,
|
|
609
|
-
'last_integrity_check': self.last_integrity_check.isoformat() if self.last_integrity_check else None,
|
|
610
|
-
'monitoring_active': self._persistence_thread and self._persistence_thread.is_alive()
|
|
611
|
-
},
|
|
612
|
-
'statistics': self.stats.copy(),
|
|
613
|
-
'recent_events': [
|
|
614
|
-
event.to_dict() for event in self.events[-10:]
|
|
615
|
-
]
|
|
616
|
-
}
|
|
617
|
-
|
|
618
|
-
def _record_event(self, event_type: PersistenceEventType, details: Dict[str, Any],
|
|
619
|
-
success: bool = True, error_message: Optional[str] = None) -> None:
|
|
620
|
-
"""Record a persistence event."""
|
|
621
|
-
event = PersistenceEvent(
|
|
622
|
-
event_type=event_type,
|
|
623
|
-
timestamp=datetime.now(),
|
|
624
|
-
details=details,
|
|
625
|
-
success=success,
|
|
626
|
-
error_message=error_message
|
|
627
|
-
)
|
|
628
|
-
|
|
629
|
-
with self._lock:
|
|
630
|
-
self.events.append(event)
|
|
631
|
-
|
|
632
|
-
# Keep only recent events
|
|
633
|
-
if len(self.events) > self.max_events:
|
|
634
|
-
self.events = self.events[-self.max_events:]
|
|
635
|
-
|
|
636
|
-
# Notify callbacks
|
|
637
|
-
for callback in self._event_callbacks:
|
|
638
|
-
try:
|
|
639
|
-
callback(event)
|
|
640
|
-
except Exception as e:
|
|
641
|
-
self.logger.warning(f"Event callback failed: {e}")
|
|
642
|
-
|
|
643
|
-
def add_event_callback(self, callback: Callable[[PersistenceEvent], None]) -> None:
|
|
644
|
-
"""Add callback for persistence events."""
|
|
645
|
-
self._event_callbacks.append(callback)
|
|
646
|
-
|
|
647
|
-
def remove_event_callback(self, callback: Callable[[PersistenceEvent], None]) -> None:
|
|
648
|
-
"""Remove event callback."""
|
|
649
|
-
if callback in self._event_callbacks:
|
|
650
|
-
self._event_callbacks.remove(callback)
|
|
651
|
-
|
|
652
|
-
def update_config(self, new_config: PersistenceConfig) -> None:
|
|
653
|
-
"""Update persistence configuration."""
|
|
654
|
-
with self._lock:
|
|
655
|
-
self.config = new_config
|
|
656
|
-
|
|
657
|
-
# Update backup manager settings
|
|
658
|
-
if hasattr(self.backup_manager, 'set_backup_interval'):
|
|
659
|
-
self.backup_manager.set_backup_interval(new_config.backup_interval_seconds)
|
|
660
|
-
if hasattr(self.backup_manager, 'set_change_threshold'):
|
|
661
|
-
self.backup_manager.set_change_threshold(new_config.change_threshold)
|
|
662
|
-
|
|
663
|
-
def export_persistence_report(self, filepath: str) -> bool:
|
|
664
|
-
"""
|
|
665
|
-
Export comprehensive persistence report.
|
|
666
|
-
|
|
667
|
-
Args:
|
|
668
|
-
filepath: Target file path
|
|
669
|
-
|
|
670
|
-
Returns:
|
|
671
|
-
True if export successful
|
|
672
|
-
"""
|
|
673
|
-
try:
|
|
674
|
-
report_data = {
|
|
675
|
-
'report_timestamp': datetime.now().isoformat(),
|
|
676
|
-
'persistence_status': self.get_persistence_status(),
|
|
677
|
-
'backup_info': self.backup_manager.get_backup_info() if self.backup_manager else {},
|
|
678
|
-
'integrity_check_results': None
|
|
679
|
-
}
|
|
680
|
-
|
|
681
|
-
# Include latest integrity check results
|
|
682
|
-
if self.config.enable_integrity_checks:
|
|
683
|
-
try:
|
|
684
|
-
integrity_results = self.integrity_checker.check_integrity()
|
|
685
|
-
report_data['integrity_check_results'] = integrity_results
|
|
686
|
-
except Exception as e:
|
|
687
|
-
report_data['integrity_check_error'] = str(e)
|
|
688
|
-
|
|
689
|
-
with open(filepath, 'w') as f:
|
|
690
|
-
json.dump(report_data, f, indent=2, default=str)
|
|
691
|
-
|
|
692
|
-
self.logger.info(f"Persistence report exported to {filepath}")
|
|
693
|
-
return True
|
|
694
|
-
|
|
695
|
-
except Exception as e:
|
|
696
|
-
self.logger.error(f"Failed to export persistence report: {e}")
|
|
697
|
-
return False
|
|
698
|
-
|
|
699
|
-
def __enter__(self):
|
|
700
|
-
"""Context manager entry."""
|
|
701
|
-
self.start_persistence_monitoring()
|
|
702
|
-
return self
|
|
703
|
-
|
|
704
|
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
705
|
-
"""Context manager exit."""
|
|
706
|
-
# Perform final persistence if configured
|
|
707
|
-
if self.config.persistence_on_shutdown:
|
|
708
|
-
try:
|
|
709
|
-
self.force_persistence()
|
|
710
|
-
except Exception as e:
|
|
711
|
-
self.logger.error(f"Shutdown persistence failed: {e}")
|
|
712
|
-
|
|
1
|
+
"""
|
|
2
|
+
Enhanced Persistence Manager for Database Settings
|
|
3
|
+
|
|
4
|
+
This module provides comprehensive persistence management including
|
|
5
|
+
configurable backup intervals, disk persistence triggers, backup rotation,
|
|
6
|
+
corruption recovery, and monitoring for backup success/failure.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
import sqlite3
|
|
11
|
+
import threading
|
|
12
|
+
import time
|
|
13
|
+
import logging
|
|
14
|
+
import json
|
|
15
|
+
import hashlib
|
|
16
|
+
from typing import Dict, List, Optional, Any, Callable
|
|
17
|
+
from datetime import datetime, timedelta
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from dataclasses import dataclass
|
|
20
|
+
from enum import Enum
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class PersistenceEventType(Enum):
|
|
24
|
+
"""Types of persistence events."""
|
|
25
|
+
BACKUP_CREATED = "backup_created"
|
|
26
|
+
BACKUP_FAILED = "backup_failed"
|
|
27
|
+
PERSISTENCE_TRIGGERED = "persistence_triggered"
|
|
28
|
+
CORRUPTION_DETECTED = "corruption_detected"
|
|
29
|
+
RECOVERY_COMPLETED = "recovery_completed"
|
|
30
|
+
CLEANUP_PERFORMED = "cleanup_performed"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class PersistenceConfig:
|
|
35
|
+
"""Configuration for persistence management."""
|
|
36
|
+
backup_interval_seconds: int = 300 # 5 minutes
|
|
37
|
+
change_threshold: int = 100 # Number of changes before backup
|
|
38
|
+
max_backups: int = 10
|
|
39
|
+
max_backup_age_days: int = 30
|
|
40
|
+
enable_compression: bool = True
|
|
41
|
+
enable_integrity_checks: bool = True
|
|
42
|
+
corruption_recovery_enabled: bool = True
|
|
43
|
+
persistence_on_shutdown: bool = True
|
|
44
|
+
|
|
45
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
46
|
+
"""Convert to dictionary."""
|
|
47
|
+
return {
|
|
48
|
+
'backup_interval_seconds': self.backup_interval_seconds,
|
|
49
|
+
'change_threshold': self.change_threshold,
|
|
50
|
+
'max_backups': self.max_backups,
|
|
51
|
+
'max_backup_age_days': self.max_backup_age_days,
|
|
52
|
+
'enable_compression': self.enable_compression,
|
|
53
|
+
'enable_integrity_checks': self.enable_integrity_checks,
|
|
54
|
+
'corruption_recovery_enabled': self.corruption_recovery_enabled,
|
|
55
|
+
'persistence_on_shutdown': self.persistence_on_shutdown
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
@classmethod
|
|
59
|
+
def from_dict(cls, data: Dict[str, Any]) -> 'PersistenceConfig':
|
|
60
|
+
"""Create from dictionary."""
|
|
61
|
+
return cls(**data)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@dataclass
|
|
65
|
+
class PersistenceEvent:
|
|
66
|
+
"""Information about a persistence event."""
|
|
67
|
+
event_type: PersistenceEventType
|
|
68
|
+
timestamp: datetime
|
|
69
|
+
details: Dict[str, Any]
|
|
70
|
+
success: bool = True
|
|
71
|
+
error_message: Optional[str] = None
|
|
72
|
+
|
|
73
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
74
|
+
"""Convert to dictionary."""
|
|
75
|
+
return {
|
|
76
|
+
'event_type': self.event_type.value,
|
|
77
|
+
'timestamp': self.timestamp.isoformat(),
|
|
78
|
+
'details': self.details,
|
|
79
|
+
'success': self.success,
|
|
80
|
+
'error_message': self.error_message
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class DatabaseIntegrityChecker:
|
|
85
|
+
"""Checks database integrity and detects corruption."""
|
|
86
|
+
|
|
87
|
+
def __init__(self, connection_manager):
|
|
88
|
+
"""
|
|
89
|
+
Initialize integrity checker.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
connection_manager: Database connection manager
|
|
93
|
+
"""
|
|
94
|
+
self.connection_manager = connection_manager
|
|
95
|
+
self.logger = logging.getLogger(__name__)
|
|
96
|
+
|
|
97
|
+
def check_integrity(self) -> Dict[str, Any]:
|
|
98
|
+
"""
|
|
99
|
+
Perform comprehensive database integrity check.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Dictionary with integrity check results
|
|
103
|
+
"""
|
|
104
|
+
results = {
|
|
105
|
+
'overall_status': 'healthy',
|
|
106
|
+
'checks_performed': [],
|
|
107
|
+
'issues_found': [],
|
|
108
|
+
'recommendations': []
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
try:
|
|
112
|
+
conn = self.connection_manager.get_connection()
|
|
113
|
+
|
|
114
|
+
# 1. SQLite integrity check
|
|
115
|
+
integrity_result = self._check_sqlite_integrity(conn)
|
|
116
|
+
results['checks_performed'].append('sqlite_integrity')
|
|
117
|
+
if not integrity_result['passed']:
|
|
118
|
+
results['overall_status'] = 'corrupted'
|
|
119
|
+
results['issues_found'].extend(integrity_result['issues'])
|
|
120
|
+
|
|
121
|
+
# 2. Schema validation
|
|
122
|
+
schema_result = self._check_schema_validity(conn)
|
|
123
|
+
results['checks_performed'].append('schema_validation')
|
|
124
|
+
if not schema_result['passed']:
|
|
125
|
+
results['overall_status'] = 'schema_issues'
|
|
126
|
+
results['issues_found'].extend(schema_result['issues'])
|
|
127
|
+
|
|
128
|
+
# 3. Data consistency checks
|
|
129
|
+
consistency_result = self._check_data_consistency(conn)
|
|
130
|
+
results['checks_performed'].append('data_consistency')
|
|
131
|
+
if not consistency_result['passed']:
|
|
132
|
+
if results['overall_status'] == 'healthy':
|
|
133
|
+
results['overall_status'] = 'data_issues'
|
|
134
|
+
results['issues_found'].extend(consistency_result['issues'])
|
|
135
|
+
|
|
136
|
+
# 4. Performance checks
|
|
137
|
+
performance_result = self._check_performance_indicators(conn)
|
|
138
|
+
results['checks_performed'].append('performance_indicators')
|
|
139
|
+
results['recommendations'].extend(performance_result['recommendations'])
|
|
140
|
+
|
|
141
|
+
except Exception as e:
|
|
142
|
+
results['overall_status'] = 'check_failed'
|
|
143
|
+
results['issues_found'].append(f"Integrity check failed: {e}")
|
|
144
|
+
self.logger.error(f"Database integrity check failed: {e}")
|
|
145
|
+
|
|
146
|
+
return results
|
|
147
|
+
|
|
148
|
+
def _check_sqlite_integrity(self, conn: sqlite3.Connection) -> Dict[str, Any]:
|
|
149
|
+
"""Check SQLite database integrity."""
|
|
150
|
+
try:
|
|
151
|
+
cursor = conn.execute("PRAGMA integrity_check")
|
|
152
|
+
result = cursor.fetchone()[0]
|
|
153
|
+
|
|
154
|
+
if result == "ok":
|
|
155
|
+
return {'passed': True, 'issues': []}
|
|
156
|
+
else:
|
|
157
|
+
return {
|
|
158
|
+
'passed': False,
|
|
159
|
+
'issues': [f"SQLite integrity check failed: {result}"]
|
|
160
|
+
}
|
|
161
|
+
except Exception as e:
|
|
162
|
+
return {
|
|
163
|
+
'passed': False,
|
|
164
|
+
'issues': [f"Could not perform integrity check: {e}"]
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
def _check_schema_validity(self, conn: sqlite3.Connection) -> Dict[str, Any]:
|
|
168
|
+
"""Check if database schema is valid."""
|
|
169
|
+
issues = []
|
|
170
|
+
|
|
171
|
+
try:
|
|
172
|
+
# Check if required tables exist
|
|
173
|
+
required_tables = [
|
|
174
|
+
'core_settings', 'tool_settings', 'tab_content',
|
|
175
|
+
'performance_settings', 'font_settings', 'dialog_settings',
|
|
176
|
+
'settings_metadata'
|
|
177
|
+
]
|
|
178
|
+
|
|
179
|
+
cursor = conn.execute(
|
|
180
|
+
"SELECT name FROM sqlite_master WHERE type='table'"
|
|
181
|
+
)
|
|
182
|
+
existing_tables = {row[0] for row in cursor.fetchall()}
|
|
183
|
+
|
|
184
|
+
missing_tables = set(required_tables) - existing_tables
|
|
185
|
+
if missing_tables:
|
|
186
|
+
issues.append(f"Missing tables: {missing_tables}")
|
|
187
|
+
|
|
188
|
+
# Check table schemas
|
|
189
|
+
for table in existing_tables:
|
|
190
|
+
if table in required_tables:
|
|
191
|
+
schema_issues = self._validate_table_schema(conn, table)
|
|
192
|
+
issues.extend(schema_issues)
|
|
193
|
+
|
|
194
|
+
except Exception as e:
|
|
195
|
+
issues.append(f"Schema validation error: {e}")
|
|
196
|
+
|
|
197
|
+
return {
|
|
198
|
+
'passed': len(issues) == 0,
|
|
199
|
+
'issues': issues
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
def _validate_table_schema(self, conn: sqlite3.Connection, table_name: str) -> List[str]:
|
|
203
|
+
"""Validate schema for a specific table."""
|
|
204
|
+
issues = []
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
cursor = conn.execute(f"PRAGMA table_info({table_name})")
|
|
208
|
+
columns = cursor.fetchall()
|
|
209
|
+
|
|
210
|
+
if not columns:
|
|
211
|
+
issues.append(f"Table {table_name} has no columns")
|
|
212
|
+
|
|
213
|
+
# Basic validation - ensure tables have expected structure
|
|
214
|
+
column_names = {col[1] for col in columns}
|
|
215
|
+
|
|
216
|
+
if table_name == 'core_settings':
|
|
217
|
+
required_cols = {'key', 'value', 'data_type'}
|
|
218
|
+
missing = required_cols - column_names
|
|
219
|
+
if missing:
|
|
220
|
+
issues.append(f"core_settings missing columns: {missing}")
|
|
221
|
+
|
|
222
|
+
elif table_name == 'tool_settings':
|
|
223
|
+
required_cols = {'tool_name', 'setting_path', 'setting_value', 'data_type'}
|
|
224
|
+
missing = required_cols - column_names
|
|
225
|
+
if missing:
|
|
226
|
+
issues.append(f"tool_settings missing columns: {missing}")
|
|
227
|
+
|
|
228
|
+
except Exception as e:
|
|
229
|
+
issues.append(f"Could not validate {table_name} schema: {e}")
|
|
230
|
+
|
|
231
|
+
return issues
|
|
232
|
+
|
|
233
|
+
def _check_data_consistency(self, conn: sqlite3.Connection) -> Dict[str, Any]:
|
|
234
|
+
"""Check data consistency across tables."""
|
|
235
|
+
issues = []
|
|
236
|
+
|
|
237
|
+
try:
|
|
238
|
+
# Check for orphaned data
|
|
239
|
+
# Check for invalid data types
|
|
240
|
+
cursor = conn.execute(
|
|
241
|
+
"SELECT COUNT(*) FROM core_settings WHERE key IS NULL OR key = ''"
|
|
242
|
+
)
|
|
243
|
+
null_keys = cursor.fetchone()[0]
|
|
244
|
+
if null_keys > 0:
|
|
245
|
+
issues.append(f"Found {null_keys} core_settings with null/empty keys")
|
|
246
|
+
|
|
247
|
+
# Check tool_settings consistency
|
|
248
|
+
cursor = conn.execute(
|
|
249
|
+
"SELECT COUNT(*) FROM tool_settings WHERE tool_name IS NULL OR tool_name = ''"
|
|
250
|
+
)
|
|
251
|
+
null_tools = cursor.fetchone()[0]
|
|
252
|
+
if null_tools > 0:
|
|
253
|
+
issues.append(f"Found {null_tools} tool_settings with null/empty tool names")
|
|
254
|
+
|
|
255
|
+
except Exception as e:
|
|
256
|
+
issues.append(f"Data consistency check error: {e}")
|
|
257
|
+
|
|
258
|
+
return {
|
|
259
|
+
'passed': len(issues) == 0,
|
|
260
|
+
'issues': issues
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
def _check_performance_indicators(self, conn: sqlite3.Connection) -> Dict[str, Any]:
|
|
264
|
+
"""Check performance indicators and suggest optimizations."""
|
|
265
|
+
recommendations = []
|
|
266
|
+
|
|
267
|
+
try:
|
|
268
|
+
# Check for missing indexes
|
|
269
|
+
cursor = conn.execute(
|
|
270
|
+
"SELECT name FROM sqlite_master WHERE type='index'"
|
|
271
|
+
)
|
|
272
|
+
existing_indexes = {row[0] for row in cursor.fetchall()}
|
|
273
|
+
|
|
274
|
+
recommended_indexes = {
|
|
275
|
+
'idx_core_settings_key',
|
|
276
|
+
'idx_tool_settings_tool_name',
|
|
277
|
+
'idx_tool_settings_path'
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
missing_indexes = recommended_indexes - existing_indexes
|
|
281
|
+
if missing_indexes:
|
|
282
|
+
recommendations.append(f"Consider creating indexes: {missing_indexes}")
|
|
283
|
+
|
|
284
|
+
# Check table sizes
|
|
285
|
+
cursor = conn.execute(
|
|
286
|
+
"SELECT name, COUNT(*) FROM sqlite_master m "
|
|
287
|
+
"LEFT JOIN pragma_table_info(m.name) p ON m.name != p.name "
|
|
288
|
+
"WHERE m.type='table' GROUP BY m.name"
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
for table_name, _ in cursor.fetchall():
|
|
292
|
+
if table_name.startswith('sqlite_'):
|
|
293
|
+
continue
|
|
294
|
+
|
|
295
|
+
try:
|
|
296
|
+
count_cursor = conn.execute(f"SELECT COUNT(*) FROM {table_name}")
|
|
297
|
+
row_count = count_cursor.fetchone()[0]
|
|
298
|
+
|
|
299
|
+
if row_count > 10000:
|
|
300
|
+
recommendations.append(
|
|
301
|
+
f"Table {table_name} has {row_count} rows, consider archiving old data"
|
|
302
|
+
)
|
|
303
|
+
except Exception:
|
|
304
|
+
pass # Skip tables we can't query
|
|
305
|
+
|
|
306
|
+
except Exception as e:
|
|
307
|
+
recommendations.append(f"Performance check error: {e}")
|
|
308
|
+
|
|
309
|
+
return {'recommendations': recommendations}
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
class PersistenceManager:
|
|
313
|
+
"""
|
|
314
|
+
Enhanced persistence manager with comprehensive backup and recovery capabilities.
|
|
315
|
+
"""
|
|
316
|
+
|
|
317
|
+
def __init__(self, connection_manager, backup_manager,
|
|
318
|
+
config: Optional[PersistenceConfig] = None):
|
|
319
|
+
"""
|
|
320
|
+
Initialize persistence manager.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
connection_manager: Database connection manager
|
|
324
|
+
backup_manager: Backup manager instance
|
|
325
|
+
config: Persistence configuration
|
|
326
|
+
"""
|
|
327
|
+
self.connection_manager = connection_manager
|
|
328
|
+
self.backup_manager = backup_manager
|
|
329
|
+
self.config = config or PersistenceConfig()
|
|
330
|
+
|
|
331
|
+
# Components
|
|
332
|
+
self.integrity_checker = DatabaseIntegrityChecker(connection_manager)
|
|
333
|
+
|
|
334
|
+
# State tracking
|
|
335
|
+
self.changes_since_persistence = 0
|
|
336
|
+
self.last_persistence_time = None
|
|
337
|
+
self.last_integrity_check = None
|
|
338
|
+
|
|
339
|
+
# Event tracking
|
|
340
|
+
self.events = []
|
|
341
|
+
self.max_events = 1000
|
|
342
|
+
|
|
343
|
+
# Threading
|
|
344
|
+
self._lock = threading.RLock()
|
|
345
|
+
self._persistence_thread = None
|
|
346
|
+
self._stop_event = threading.Event()
|
|
347
|
+
|
|
348
|
+
# Callbacks
|
|
349
|
+
self._event_callbacks = []
|
|
350
|
+
|
|
351
|
+
# Logging
|
|
352
|
+
self.logger = logging.getLogger(__name__)
|
|
353
|
+
|
|
354
|
+
# Statistics
|
|
355
|
+
self.stats = {
|
|
356
|
+
'total_persistence_operations': 0,
|
|
357
|
+
'successful_persistence_operations': 0,
|
|
358
|
+
'failed_persistence_operations': 0,
|
|
359
|
+
'corruption_incidents': 0,
|
|
360
|
+
'recovery_operations': 0,
|
|
361
|
+
'integrity_checks_performed': 0
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
def start_persistence_monitoring(self) -> None:
|
|
365
|
+
"""Start automatic persistence monitoring."""
|
|
366
|
+
if self._persistence_thread and self._persistence_thread.is_alive():
|
|
367
|
+
return
|
|
368
|
+
|
|
369
|
+
self._stop_event.clear()
|
|
370
|
+
self._persistence_thread = threading.Thread(
|
|
371
|
+
target=self._persistence_worker,
|
|
372
|
+
daemon=True,
|
|
373
|
+
name="PersistenceManager"
|
|
374
|
+
)
|
|
375
|
+
self._persistence_thread.start()
|
|
376
|
+
self.logger.info("Persistence monitoring started")
|
|
377
|
+
|
|
378
|
+
def stop_persistence_monitoring(self) -> None:
|
|
379
|
+
"""Stop automatic persistence monitoring."""
|
|
380
|
+
if self._persistence_thread and self._persistence_thread.is_alive():
|
|
381
|
+
self._stop_event.set()
|
|
382
|
+
self._persistence_thread.join(timeout=10)
|
|
383
|
+
self.logger.info("Persistence monitoring stopped")
|
|
384
|
+
|
|
385
|
+
def _persistence_worker(self) -> None:
|
|
386
|
+
"""Worker thread for automatic persistence operations."""
|
|
387
|
+
while not self._stop_event.is_set():
|
|
388
|
+
try:
|
|
389
|
+
# Check if persistence is needed
|
|
390
|
+
should_persist = self._should_trigger_persistence()
|
|
391
|
+
|
|
392
|
+
if should_persist:
|
|
393
|
+
self._perform_persistence_operation()
|
|
394
|
+
|
|
395
|
+
# Periodic integrity checks
|
|
396
|
+
if self._should_perform_integrity_check():
|
|
397
|
+
self._perform_integrity_check()
|
|
398
|
+
|
|
399
|
+
# Cleanup old backups
|
|
400
|
+
if self._should_perform_cleanup():
|
|
401
|
+
self._perform_cleanup()
|
|
402
|
+
|
|
403
|
+
# Wait before next check
|
|
404
|
+
self._stop_event.wait(min(60, self.config.backup_interval_seconds // 10))
|
|
405
|
+
|
|
406
|
+
except Exception as e:
|
|
407
|
+
self.logger.error(f"Persistence worker error: {e}")
|
|
408
|
+
self._record_event(
|
|
409
|
+
PersistenceEventType.BACKUP_FAILED,
|
|
410
|
+
{'error': str(e)},
|
|
411
|
+
success=False,
|
|
412
|
+
error_message=str(e)
|
|
413
|
+
)
|
|
414
|
+
self._stop_event.wait(60)
|
|
415
|
+
|
|
416
|
+
def _should_trigger_persistence(self) -> bool:
|
|
417
|
+
"""Determine if persistence operation should be triggered."""
|
|
418
|
+
# Time-based trigger
|
|
419
|
+
if self.last_persistence_time is None:
|
|
420
|
+
return True
|
|
421
|
+
|
|
422
|
+
time_since_last = datetime.now() - self.last_persistence_time
|
|
423
|
+
if time_since_last.total_seconds() >= self.config.backup_interval_seconds:
|
|
424
|
+
return True
|
|
425
|
+
|
|
426
|
+
# Change-based trigger
|
|
427
|
+
if self.changes_since_persistence >= self.config.change_threshold:
|
|
428
|
+
return True
|
|
429
|
+
|
|
430
|
+
return False
|
|
431
|
+
|
|
432
|
+
def _should_perform_integrity_check(self) -> bool:
|
|
433
|
+
"""Determine if integrity check should be performed."""
|
|
434
|
+
if not self.config.enable_integrity_checks:
|
|
435
|
+
return False
|
|
436
|
+
|
|
437
|
+
if self.last_integrity_check is None:
|
|
438
|
+
return True
|
|
439
|
+
|
|
440
|
+
# Perform integrity check every 24 hours
|
|
441
|
+
time_since_check = datetime.now() - self.last_integrity_check
|
|
442
|
+
return time_since_check.total_seconds() >= 86400 # 24 hours
|
|
443
|
+
|
|
444
|
+
def _should_perform_cleanup(self) -> bool:
|
|
445
|
+
"""Determine if cleanup should be performed."""
|
|
446
|
+
# Perform cleanup once per day
|
|
447
|
+
return True # Let cleanup method handle frequency
|
|
448
|
+
|
|
449
|
+
def _perform_persistence_operation(self) -> None:
|
|
450
|
+
"""Perform persistence operation (backup)."""
|
|
451
|
+
try:
|
|
452
|
+
with self._lock:
|
|
453
|
+
# Trigger backup
|
|
454
|
+
from .backup_manager import BackupTrigger
|
|
455
|
+
|
|
456
|
+
trigger = BackupTrigger.TIME_BASED
|
|
457
|
+
if self.changes_since_persistence >= self.config.change_threshold:
|
|
458
|
+
trigger = BackupTrigger.CHANGE_BASED
|
|
459
|
+
|
|
460
|
+
backup_info = self.backup_manager.backup_database(
|
|
461
|
+
self.connection_manager,
|
|
462
|
+
trigger=trigger,
|
|
463
|
+
metadata={'persistence_manager': True}
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
if backup_info:
|
|
467
|
+
self.last_persistence_time = datetime.now()
|
|
468
|
+
self.changes_since_persistence = 0
|
|
469
|
+
self.stats['successful_persistence_operations'] += 1
|
|
470
|
+
|
|
471
|
+
self._record_event(
|
|
472
|
+
PersistenceEventType.PERSISTENCE_TRIGGERED,
|
|
473
|
+
{
|
|
474
|
+
'trigger': trigger.value,
|
|
475
|
+
'backup_file': backup_info.filepath,
|
|
476
|
+
'backup_size': backup_info.size_bytes
|
|
477
|
+
}
|
|
478
|
+
)
|
|
479
|
+
else:
|
|
480
|
+
self.stats['failed_persistence_operations'] += 1
|
|
481
|
+
self._record_event(
|
|
482
|
+
PersistenceEventType.BACKUP_FAILED,
|
|
483
|
+
{'trigger': trigger.value},
|
|
484
|
+
success=False,
|
|
485
|
+
error_message="Backup creation failed"
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
self.stats['total_persistence_operations'] += 1
|
|
489
|
+
|
|
490
|
+
except Exception as e:
|
|
491
|
+
self.logger.error(f"Persistence operation failed: {e}")
|
|
492
|
+
self.stats['failed_persistence_operations'] += 1
|
|
493
|
+
self._record_event(
|
|
494
|
+
PersistenceEventType.BACKUP_FAILED,
|
|
495
|
+
{'error': str(e)},
|
|
496
|
+
success=False,
|
|
497
|
+
error_message=str(e)
|
|
498
|
+
)
|
|
499
|
+
|
|
500
|
+
def _perform_integrity_check(self) -> None:
|
|
501
|
+
"""Perform database integrity check."""
|
|
502
|
+
try:
|
|
503
|
+
self.logger.info("Performing database integrity check")
|
|
504
|
+
|
|
505
|
+
integrity_results = self.integrity_checker.check_integrity()
|
|
506
|
+
self.last_integrity_check = datetime.now()
|
|
507
|
+
self.stats['integrity_checks_performed'] += 1
|
|
508
|
+
|
|
509
|
+
if integrity_results['overall_status'] == 'corrupted':
|
|
510
|
+
self.stats['corruption_incidents'] += 1
|
|
511
|
+
self._record_event(
|
|
512
|
+
PersistenceEventType.CORRUPTION_DETECTED,
|
|
513
|
+
integrity_results,
|
|
514
|
+
success=False,
|
|
515
|
+
error_message="Database corruption detected"
|
|
516
|
+
)
|
|
517
|
+
|
|
518
|
+
# Attempt recovery if enabled
|
|
519
|
+
if self.config.corruption_recovery_enabled:
|
|
520
|
+
self._attempt_corruption_recovery()
|
|
521
|
+
|
|
522
|
+
elif integrity_results['overall_status'] != 'healthy':
|
|
523
|
+
self._record_event(
|
|
524
|
+
PersistenceEventType.CORRUPTION_DETECTED,
|
|
525
|
+
integrity_results,
|
|
526
|
+
success=False,
|
|
527
|
+
error_message=f"Database issues detected: {integrity_results['overall_status']}"
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
except Exception as e:
|
|
531
|
+
self.logger.error(f"Integrity check failed: {e}")
|
|
532
|
+
|
|
533
|
+
def _attempt_corruption_recovery(self) -> None:
|
|
534
|
+
"""Attempt to recover from database corruption."""
|
|
535
|
+
try:
|
|
536
|
+
self.logger.warning("Attempting database corruption recovery")
|
|
537
|
+
|
|
538
|
+
# Try to restore from latest backup
|
|
539
|
+
recovery_success = self.backup_manager.restore_from_backup(
|
|
540
|
+
self.connection_manager
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
if recovery_success:
|
|
544
|
+
self.stats['recovery_operations'] += 1
|
|
545
|
+
self._record_event(
|
|
546
|
+
PersistenceEventType.RECOVERY_COMPLETED,
|
|
547
|
+
{'method': 'backup_restore'},
|
|
548
|
+
success=True
|
|
549
|
+
)
|
|
550
|
+
self.logger.info("Database recovery successful")
|
|
551
|
+
else:
|
|
552
|
+
self._record_event(
|
|
553
|
+
PersistenceEventType.RECOVERY_COMPLETED,
|
|
554
|
+
{'method': 'backup_restore'},
|
|
555
|
+
success=False,
|
|
556
|
+
error_message="Backup restore failed"
|
|
557
|
+
)
|
|
558
|
+
self.logger.error("Database recovery failed")
|
|
559
|
+
|
|
560
|
+
except Exception as e:
|
|
561
|
+
self.logger.error(f"Corruption recovery failed: {e}")
|
|
562
|
+
self._record_event(
|
|
563
|
+
PersistenceEventType.RECOVERY_COMPLETED,
|
|
564
|
+
{'method': 'backup_restore', 'error': str(e)},
|
|
565
|
+
success=False,
|
|
566
|
+
error_message=str(e)
|
|
567
|
+
)
|
|
568
|
+
|
|
569
|
+
def _perform_cleanup(self) -> None:
|
|
570
|
+
"""Perform cleanup of old backups and maintenance."""
|
|
571
|
+
try:
|
|
572
|
+
# This is handled by the backup manager's rotation policy
|
|
573
|
+
# We just record the event
|
|
574
|
+
self._record_event(
|
|
575
|
+
PersistenceEventType.CLEANUP_PERFORMED,
|
|
576
|
+
{'timestamp': datetime.now().isoformat()}
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
except Exception as e:
|
|
580
|
+
self.logger.error(f"Cleanup operation failed: {e}")
|
|
581
|
+
|
|
582
|
+
def record_change(self) -> None:
|
|
583
|
+
"""Record a database change for persistence triggering."""
|
|
584
|
+
with self._lock:
|
|
585
|
+
self.changes_since_persistence += 1
|
|
586
|
+
|
|
587
|
+
def force_persistence(self) -> bool:
|
|
588
|
+
"""Force immediate persistence operation."""
|
|
589
|
+
try:
|
|
590
|
+
self._perform_persistence_operation()
|
|
591
|
+
return True
|
|
592
|
+
except Exception as e:
|
|
593
|
+
self.logger.error(f"Forced persistence failed: {e}")
|
|
594
|
+
return False
|
|
595
|
+
|
|
596
|
+
def get_persistence_status(self) -> Dict[str, Any]:
|
|
597
|
+
"""
|
|
598
|
+
Get comprehensive persistence status.
|
|
599
|
+
|
|
600
|
+
Returns:
|
|
601
|
+
Dictionary with persistence status and statistics
|
|
602
|
+
"""
|
|
603
|
+
with self._lock:
|
|
604
|
+
return {
|
|
605
|
+
'config': self.config.to_dict(),
|
|
606
|
+
'status': {
|
|
607
|
+
'changes_since_persistence': self.changes_since_persistence,
|
|
608
|
+
'last_persistence_time': self.last_persistence_time.isoformat() if self.last_persistence_time else None,
|
|
609
|
+
'last_integrity_check': self.last_integrity_check.isoformat() if self.last_integrity_check else None,
|
|
610
|
+
'monitoring_active': self._persistence_thread and self._persistence_thread.is_alive()
|
|
611
|
+
},
|
|
612
|
+
'statistics': self.stats.copy(),
|
|
613
|
+
'recent_events': [
|
|
614
|
+
event.to_dict() for event in self.events[-10:]
|
|
615
|
+
]
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
def _record_event(self, event_type: PersistenceEventType, details: Dict[str, Any],
|
|
619
|
+
success: bool = True, error_message: Optional[str] = None) -> None:
|
|
620
|
+
"""Record a persistence event."""
|
|
621
|
+
event = PersistenceEvent(
|
|
622
|
+
event_type=event_type,
|
|
623
|
+
timestamp=datetime.now(),
|
|
624
|
+
details=details,
|
|
625
|
+
success=success,
|
|
626
|
+
error_message=error_message
|
|
627
|
+
)
|
|
628
|
+
|
|
629
|
+
with self._lock:
|
|
630
|
+
self.events.append(event)
|
|
631
|
+
|
|
632
|
+
# Keep only recent events
|
|
633
|
+
if len(self.events) > self.max_events:
|
|
634
|
+
self.events = self.events[-self.max_events:]
|
|
635
|
+
|
|
636
|
+
# Notify callbacks
|
|
637
|
+
for callback in self._event_callbacks:
|
|
638
|
+
try:
|
|
639
|
+
callback(event)
|
|
640
|
+
except Exception as e:
|
|
641
|
+
self.logger.warning(f"Event callback failed: {e}")
|
|
642
|
+
|
|
643
|
+
def add_event_callback(self, callback: Callable[[PersistenceEvent], None]) -> None:
|
|
644
|
+
"""Add callback for persistence events."""
|
|
645
|
+
self._event_callbacks.append(callback)
|
|
646
|
+
|
|
647
|
+
def remove_event_callback(self, callback: Callable[[PersistenceEvent], None]) -> None:
|
|
648
|
+
"""Remove event callback."""
|
|
649
|
+
if callback in self._event_callbacks:
|
|
650
|
+
self._event_callbacks.remove(callback)
|
|
651
|
+
|
|
652
|
+
def update_config(self, new_config: PersistenceConfig) -> None:
|
|
653
|
+
"""Update persistence configuration."""
|
|
654
|
+
with self._lock:
|
|
655
|
+
self.config = new_config
|
|
656
|
+
|
|
657
|
+
# Update backup manager settings
|
|
658
|
+
if hasattr(self.backup_manager, 'set_backup_interval'):
|
|
659
|
+
self.backup_manager.set_backup_interval(new_config.backup_interval_seconds)
|
|
660
|
+
if hasattr(self.backup_manager, 'set_change_threshold'):
|
|
661
|
+
self.backup_manager.set_change_threshold(new_config.change_threshold)
|
|
662
|
+
|
|
663
|
+
def export_persistence_report(self, filepath: str) -> bool:
|
|
664
|
+
"""
|
|
665
|
+
Export comprehensive persistence report.
|
|
666
|
+
|
|
667
|
+
Args:
|
|
668
|
+
filepath: Target file path
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
True if export successful
|
|
672
|
+
"""
|
|
673
|
+
try:
|
|
674
|
+
report_data = {
|
|
675
|
+
'report_timestamp': datetime.now().isoformat(),
|
|
676
|
+
'persistence_status': self.get_persistence_status(),
|
|
677
|
+
'backup_info': self.backup_manager.get_backup_info() if self.backup_manager else {},
|
|
678
|
+
'integrity_check_results': None
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
# Include latest integrity check results
|
|
682
|
+
if self.config.enable_integrity_checks:
|
|
683
|
+
try:
|
|
684
|
+
integrity_results = self.integrity_checker.check_integrity()
|
|
685
|
+
report_data['integrity_check_results'] = integrity_results
|
|
686
|
+
except Exception as e:
|
|
687
|
+
report_data['integrity_check_error'] = str(e)
|
|
688
|
+
|
|
689
|
+
with open(filepath, 'w') as f:
|
|
690
|
+
json.dump(report_data, f, indent=2, default=str)
|
|
691
|
+
|
|
692
|
+
self.logger.info(f"Persistence report exported to {filepath}")
|
|
693
|
+
return True
|
|
694
|
+
|
|
695
|
+
except Exception as e:
|
|
696
|
+
self.logger.error(f"Failed to export persistence report: {e}")
|
|
697
|
+
return False
|
|
698
|
+
|
|
699
|
+
def __enter__(self):
|
|
700
|
+
"""Context manager entry."""
|
|
701
|
+
self.start_persistence_monitoring()
|
|
702
|
+
return self
|
|
703
|
+
|
|
704
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
705
|
+
"""Context manager exit."""
|
|
706
|
+
# Perform final persistence if configured
|
|
707
|
+
if self.config.persistence_on_shutdown:
|
|
708
|
+
try:
|
|
709
|
+
self.force_persistence()
|
|
710
|
+
except Exception as e:
|
|
711
|
+
self.logger.error(f"Shutdown persistence failed: {e}")
|
|
712
|
+
|
|
713
713
|
self.stop_persistence_monitoring()
|