sql-assistant 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. sql_assistant/__init__.py +3 -0
  2. sql_assistant/api/__init__.py +1 -0
  3. sql_assistant/api/backup.py +116 -0
  4. sql_assistant/api/config.py +183 -0
  5. sql_assistant/api/conversation.py +71 -0
  6. sql_assistant/api/dependencies.py +22 -0
  7. sql_assistant/api/history.py +61 -0
  8. sql_assistant/api/models.py +221 -0
  9. sql_assistant/api/query.py +275 -0
  10. sql_assistant/api/routes.py +19 -0
  11. sql_assistant/api/schema.py +21 -0
  12. sql_assistant/config.py +144 -0
  13. sql_assistant/database/__init__.py +1 -0
  14. sql_assistant/database/backup.py +568 -0
  15. sql_assistant/database/connectors/__init__.py +1 -0
  16. sql_assistant/database/connectors/base.py +185 -0
  17. sql_assistant/database/connectors/exceptions.py +88 -0
  18. sql_assistant/database/connectors/mongodb.py +194 -0
  19. sql_assistant/database/connectors/mysql.py +110 -0
  20. sql_assistant/database/connectors/postgresql.py +133 -0
  21. sql_assistant/database/connectors/redis.py +132 -0
  22. sql_assistant/database/connectors/sqlserver.py +140 -0
  23. sql_assistant/database/history.py +290 -0
  24. sql_assistant/database/manager.py +178 -0
  25. sql_assistant/database/security.py +230 -0
  26. sql_assistant/llm/__init__.py +1 -0
  27. sql_assistant/llm/base.py +28 -0
  28. sql_assistant/llm/exceptions.py +96 -0
  29. sql_assistant/llm/manager.py +82 -0
  30. sql_assistant/llm/prompts.py +29 -0
  31. sql_assistant/llm/providers/__init__.py +1 -0
  32. sql_assistant/llm/providers/claude.py +132 -0
  33. sql_assistant/llm/providers/gemini.py +127 -0
  34. sql_assistant/llm/providers/openai_compatible.py +103 -0
  35. sql_assistant/llm/retry.py +88 -0
  36. sql_assistant/main.py +94 -0
  37. sql_assistant/settings.py +219 -0
  38. sql_assistant/web/__init__.py +1 -0
  39. sql_assistant/web/static/css/base.css +25 -0
  40. sql_assistant/web/static/css/components/backup.css +146 -0
  41. sql_assistant/web/static/css/components/chat.css +465 -0
  42. sql_assistant/web/static/css/components/modal.css +143 -0
  43. sql_assistant/web/static/css/components/settings.css +358 -0
  44. sql_assistant/web/static/css/components/sidebar.css +235 -0
  45. sql_assistant/web/static/css/components/toast.css +30 -0
  46. sql_assistant/web/static/css/style.css +10 -0
  47. sql_assistant/web/static/css/theme.css +200 -0
  48. sql_assistant/web/static/js/api.js +38 -0
  49. sql_assistant/web/static/js/app.js +161 -0
  50. sql_assistant/web/static/js/backup.js +216 -0
  51. sql_assistant/web/static/js/chat.js +238 -0
  52. sql_assistant/web/static/js/color-theme-manager.js +121 -0
  53. sql_assistant/web/static/js/confirm.js +95 -0
  54. sql_assistant/web/static/js/conversations.js +182 -0
  55. sql_assistant/web/static/js/settings.js +425 -0
  56. sql_assistant/web/static/js/state.js +43 -0
  57. sql_assistant/web/static/js/theme-manager.js +64 -0
  58. sql_assistant/web/static/js/ui.js +53 -0
  59. sql_assistant/web/templates/index.html +373 -0
  60. sql_assistant-1.0.0.dist-info/METADATA +24 -0
  61. sql_assistant-1.0.0.dist-info/RECORD +64 -0
  62. sql_assistant-1.0.0.dist-info/WHEEL +4 -0
  63. sql_assistant-1.0.0.dist-info/entry_points.txt +2 -0
  64. sql_assistant-1.0.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,568 @@
1
+ """数据库备份模块 - 支持全量备份和增量备份"""
2
+
3
+ import asyncio
4
+ import json
5
+ import os
6
+ import shutil
7
+ from datetime import datetime, date, time
8
+ from decimal import Decimal
9
+ from pathlib import Path
10
+ from typing import List, Optional, Dict, Any
11
+ from dataclasses import dataclass
12
+
13
+ from ..config import get_config_manager
14
+ from .manager import get_db_manager
15
+ from .connectors.base import BaseConnector, QueryResult
16
+
17
+
18
+ def _json_serializable(obj):
19
+ """将对象转换为可 JSON 序列化的类型"""
20
+ if isinstance(obj, (datetime, date, time)):
21
+ return obj.isoformat()
22
+ elif isinstance(obj, Decimal):
23
+ return float(obj)
24
+ elif isinstance(obj, bytes):
25
+ return obj.decode('utf-8', errors='replace')
26
+ elif hasattr(obj, '__dict__'):
27
+ return str(obj)
28
+ raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable")
29
+
30
+
31
+ @dataclass
32
+ class BackupConfig:
33
+ """备份配置"""
34
+ backup_type: str = "full" # full: 全量备份, incremental: 增量备份
35
+ tables: List[str] = None # 指定表名列表,None 表示所有表
36
+ backup_path: str = "./backups" # 备份存储路径
37
+ include_schema: bool = True # 是否包含表结构
38
+ include_data: bool = True # 是否包含数据
39
+
40
+
41
+ @dataclass
42
+ class BackupResult:
43
+ """备份结果"""
44
+ success: bool
45
+ message: str
46
+ backup_id: str
47
+ backup_path: str
48
+ tables_backed_up: List[str]
49
+ total_records: int
50
+ backup_size: int # bytes
51
+ backup_time: str
52
+
53
+
54
+ @dataclass
55
+ class BackupInfo:
56
+ """备份信息"""
57
+ backup_id: str
58
+ backup_type: str
59
+ db_type: str
60
+ db_name: str
61
+ tables: List[str]
62
+ record_count: int
63
+ backup_time: str
64
+ file_size: int
65
+
66
+
67
+ @dataclass
68
+ class RestoreResult:
69
+ """恢复结果"""
70
+ success: bool
71
+ message: str
72
+ backup_id: str
73
+ tables_restored: List[str]
74
+ total_records: int
75
+
76
+
77
+ class BackupManager:
78
+ """数据库备份管理器"""
79
+
80
+ def __init__(self):
81
+ self._backup_path = "./backups"
82
+ self._incremental_marker = ".last_backup_timestamp"
83
+
84
+ async def backup(self, config: BackupConfig) -> BackupResult:
85
+ """执行数据库备份"""
86
+ db_manager = get_db_manager()
87
+ connector = await db_manager.get_connector()
88
+ db_config = get_config_manager().get_active_database()
89
+
90
+ if not db_config:
91
+ return BackupResult(
92
+ success=False,
93
+ message="未配置数据库连接",
94
+ backup_id="",
95
+ backup_path="",
96
+ tables_backed_up=[],
97
+ total_records=0,
98
+ backup_size=0,
99
+ backup_time=""
100
+ )
101
+
102
+ backup_id = self._generate_backup_id(config.backup_type)
103
+ backup_dir = Path(config.backup_path) / backup_id
104
+ backup_dir.mkdir(parents=True, exist_ok=True)
105
+
106
+ try:
107
+ # 获取要备份的表列表
108
+ if config.tables:
109
+ tables = config.tables
110
+ else:
111
+ schema = await connector.get_schema()
112
+ tables = [t["name"] for t in schema.get("tables", [])]
113
+
114
+ if not tables:
115
+ return BackupResult(
116
+ success=False,
117
+ message="数据库中没有表可备份",
118
+ backup_id=backup_id,
119
+ backup_path=str(backup_dir),
120
+ tables_backed_up=[],
121
+ total_records=0,
122
+ backup_size=0,
123
+ backup_time=datetime.now().isoformat()
124
+ )
125
+
126
+ total_records = 0
127
+ backed_up_tables = []
128
+
129
+ # 获取增量备份的时间戳(仅增量备份时使用)
130
+ last_backup_time = None
131
+ if config.backup_type == "incremental":
132
+ last_backup_time = self._get_last_backup_time()
133
+
134
+ for table in tables:
135
+ # 备份表结构
136
+ if config.include_schema:
137
+ schema_file = backup_dir / f"{table}_schema.json"
138
+ schema_data = await self._get_table_schema(connector, table)
139
+ with open(schema_file, 'w', encoding='utf-8') as f:
140
+ json.dump(schema_data, f, ensure_ascii=False, indent=2)
141
+
142
+ # 备份表数据
143
+ if config.include_data:
144
+ data_file = backup_dir / f"{table}_data.json"
145
+ count = await self._backup_table_data(
146
+ connector, table, data_file, config.backup_type, last_backup_time
147
+ )
148
+ total_records += count
149
+
150
+ backed_up_tables.append(table)
151
+
152
+ # 保存备份元信息
153
+ metadata = {
154
+ "backup_id": backup_id,
155
+ "backup_type": config.backup_type,
156
+ "db_type": connector.db_type,
157
+ "db_name": db_config.database,
158
+ "tables": backed_up_tables,
159
+ "record_count": total_records,
160
+ "backup_time": datetime.now().isoformat(),
161
+ "created_at": datetime.now().isoformat()
162
+ }
163
+ with open(backup_dir / "metadata.json", 'w', encoding='utf-8') as f:
164
+ json.dump(metadata, f, ensure_ascii=False, indent=2)
165
+
166
+ # 更新最后备份时间戳
167
+ self._update_last_backup_time()
168
+
169
+ # 计算备份大小
170
+ backup_size = sum(f.stat().st_size for f in backup_dir.rglob('*') if f.is_file())
171
+
172
+ return BackupResult(
173
+ success=True,
174
+ message=f"备份成功,共备份 {len(backed_up_tables)} 个表,{total_records} 条记录",
175
+ backup_id=backup_id,
176
+ backup_path=str(backup_dir),
177
+ tables_backed_up=backed_up_tables,
178
+ total_records=total_records,
179
+ backup_size=backup_size,
180
+ backup_time=datetime.now().isoformat()
181
+ )
182
+
183
+ except Exception as e:
184
+ # 清理失败的备份
185
+ if backup_dir.exists():
186
+ shutil.rmtree(backup_dir)
187
+ return BackupResult(
188
+ success=False,
189
+ message=f"备份失败: {str(e)}",
190
+ backup_id=backup_id,
191
+ backup_path=str(backup_dir),
192
+ tables_backed_up=[],
193
+ total_records=0,
194
+ backup_size=0,
195
+ backup_time=datetime.now().isoformat()
196
+ )
197
+
198
+ async def _get_table_schema(self, connector: BaseConnector, table_name: str) -> Dict:
199
+ """获取表结构信息"""
200
+ schema = await connector.get_schema()
201
+ for table in schema.get("tables", []):
202
+ if table["name"] == table_name:
203
+ return table
204
+ return {"name": table_name, "columns": []}
205
+
206
+ async def _backup_table_data(self, connector: BaseConnector, table_name: str,
207
+ output_file: Path, backup_type: str,
208
+ last_backup_time: Optional[str]) -> int:
209
+ """备份表数据"""
210
+ if backup_type == "incremental" and last_backup_time:
211
+ # 增量备份:只备份上次备份后修改的数据
212
+ # 这里使用通用的增量备份策略,实际应用中可能需要根据数据库类型调整
213
+ # 对于没有时间戳字段的表,退化为全量备份
214
+ result = await self._get_incremental_data(connector, table_name, last_backup_time)
215
+ else:
216
+ # 全量备份:备份所有数据
217
+ result = await connector.execute(f"SELECT * FROM {table_name}")
218
+
219
+ # 将结果中的不可序列化对象转换为可序列化的字符串
220
+ def convert_row(row):
221
+ converted = []
222
+ for item in row:
223
+ try:
224
+ json.dumps(item)
225
+ converted.append(item)
226
+ except (TypeError, ValueError):
227
+ converted.append(_json_serializable(item))
228
+ return converted
229
+
230
+ # 将结果写入 JSON 文件
231
+ data = {
232
+ "columns": result.columns,
233
+ "rows": [convert_row(row) for row in result.rows],
234
+ "row_count": result.row_count
235
+ }
236
+ with open(output_file, 'w', encoding='utf-8') as f:
237
+ json.dump(data, f, ensure_ascii=False, indent=2)
238
+
239
+ return result.row_count
240
+
241
+ async def _get_incremental_data(self, connector: BaseConnector, table_name: str,
242
+ last_backup_time: str) -> QueryResult:
243
+ """获取增量数据(上次备份后新增/修改的数据)"""
244
+ # 尝试查找可能的时间戳字段
245
+ timestamp_fields = ["updated_at", "update_time", "modify_time", "create_time", "created_at"]
246
+
247
+ schema = await connector.get_schema()
248
+ table_info = None
249
+ for table in schema.get("tables", []):
250
+ if table["name"] == table_name:
251
+ table_info = table
252
+ break
253
+
254
+ if table_info:
255
+ # 查找时间戳字段
256
+ for field in timestamp_fields:
257
+ for col in table_info.get("columns", []):
258
+ if col["name"].lower() == field:
259
+ # 找到时间戳字段,执行增量查询
260
+ query = f"SELECT * FROM {table_name} WHERE {field} > '{last_backup_time}'"
261
+ return await connector.execute(query)
262
+
263
+ # 没有找到时间戳字段,执行全量查询
264
+ return await connector.execute(f"SELECT * FROM {table_name}")
265
+
266
+ def _generate_backup_id(self, backup_type: str) -> str:
267
+ """生成唯一的备份 ID"""
268
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
269
+ return f"{backup_type}_{timestamp}"
270
+
271
+ def _get_last_backup_time(self) -> Optional[str]:
272
+ """获取上次备份时间"""
273
+ marker_file = Path(self._backup_path) / self._incremental_marker
274
+ if marker_file.exists():
275
+ with open(marker_file, 'r', encoding='utf-8') as f:
276
+ return f.read().strip()
277
+ return None
278
+
279
+ def _update_last_backup_time(self):
280
+ """更新最后备份时间戳"""
281
+ marker_file = Path(self._backup_path) / self._incremental_marker
282
+ marker_file.parent.mkdir(parents=True, exist_ok=True)
283
+ with open(marker_file, 'w', encoding='utf-8') as f:
284
+ f.write(datetime.now().isoformat())
285
+
286
+ def list_backups(self) -> List[BackupInfo]:
287
+ """列出所有备份"""
288
+ backups = []
289
+ backup_dir = Path(self._backup_path)
290
+
291
+ if not backup_dir.exists():
292
+ return backups
293
+
294
+ for item in backup_dir.iterdir():
295
+ if item.is_dir():
296
+ metadata_file = item / "metadata.json"
297
+ if metadata_file.exists():
298
+ try:
299
+ with open(metadata_file, 'r', encoding='utf-8') as f:
300
+ metadata = json.load(f)
301
+
302
+ # 计算备份大小
303
+ file_size = sum(f.stat().st_size for f in item.rglob('*') if f.is_file())
304
+
305
+ backups.append(BackupInfo(
306
+ backup_id=metadata.get("backup_id", item.name),
307
+ backup_type=metadata.get("backup_type", "full"),
308
+ db_type=metadata.get("db_type", ""),
309
+ db_name=metadata.get("db_name", ""),
310
+ tables=metadata.get("tables", []),
311
+ record_count=metadata.get("record_count", 0),
312
+ backup_time=metadata.get("backup_time", ""),
313
+ file_size=file_size
314
+ ))
315
+ except Exception:
316
+ pass
317
+
318
+ # 按时间排序(最新的在前)
319
+ backups.sort(key=lambda x: x.backup_time, reverse=True)
320
+ return backups
321
+
322
+ def get_backup_info(self, backup_id: str) -> Optional[BackupInfo]:
323
+ """获取指定备份的详细信息"""
324
+ backup_dir = Path(self._backup_path) / backup_id
325
+ metadata_file = backup_dir / "metadata.json"
326
+
327
+ if not metadata_file.exists():
328
+ return None
329
+
330
+ try:
331
+ with open(metadata_file, 'r', encoding='utf-8') as f:
332
+ metadata = json.load(f)
333
+
334
+ file_size = sum(f.stat().st_size for f in backup_dir.rglob('*') if f.is_file())
335
+
336
+ return BackupInfo(
337
+ backup_id=metadata.get("backup_id", backup_id),
338
+ backup_type=metadata.get("backup_type", "full"),
339
+ db_type=metadata.get("db_type", ""),
340
+ db_name=metadata.get("db_name", ""),
341
+ tables=metadata.get("tables", []),
342
+ record_count=metadata.get("record_count", 0),
343
+ backup_time=metadata.get("backup_time", ""),
344
+ file_size=file_size
345
+ )
346
+ except Exception:
347
+ return None
348
+
349
+ def delete_backup(self, backup_id: str) -> bool:
350
+ """删除指定备份"""
351
+ backup_dir = Path(self._backup_path) / backup_id
352
+ if backup_dir.exists() and backup_dir.is_dir():
353
+ shutil.rmtree(backup_dir)
354
+ return True
355
+ return False
356
+
357
+ async def restore(self, backup_id: str, restore_schema: bool = False,
358
+ restore_data: bool = True, tables: Optional[List[str]] = None) -> RestoreResult:
359
+ """从备份恢复数据库
360
+
361
+ Args:
362
+ backup_id: 备份ID
363
+ restore_schema: 是否重建表结构(谨慎使用,会删除现有表)
364
+ restore_data: 是否恢复数据
365
+ tables: 指定要恢复的表,None 表示恢复所有表
366
+ """
367
+ backup_dir = Path(self._backup_path) / backup_id
368
+ metadata_file = backup_dir / "metadata.json"
369
+
370
+ if not metadata_file.exists():
371
+ return RestoreResult(
372
+ success=False,
373
+ message="备份文件不存在",
374
+ backup_id=backup_id,
375
+ tables_restored=[],
376
+ total_records=0
377
+ )
378
+
379
+ try:
380
+ with open(metadata_file, 'r', encoding='utf-8') as f:
381
+ metadata = json.load(f)
382
+
383
+ db_type = metadata.get("db_type", "")
384
+ backup_tables = metadata.get("tables", [])
385
+
386
+ # 确定要恢复的表
387
+ if tables:
388
+ tables_to_restore = [t for t in tables if t in backup_tables]
389
+ else:
390
+ tables_to_restore = backup_tables
391
+
392
+ db_manager = get_db_manager()
393
+ connector = await db_manager.get_connector()
394
+
395
+ total_records = 0
396
+ restored_tables = []
397
+
398
+ for table_name in tables_to_restore:
399
+ # 恢复表结构
400
+ if restore_schema:
401
+ schema_file = backup_dir / f"{table_name}_schema.json"
402
+ if schema_file.exists():
403
+ await self._restore_table_schema(connector, table_name, schema_file)
404
+
405
+ # 恢复表数据
406
+ if restore_data:
407
+ data_file = backup_dir / f"{table_name}_data.json"
408
+ if data_file.exists():
409
+ count = await self._restore_table_data(connector, table_name, data_file, db_type)
410
+ total_records += count
411
+ restored_tables.append(table_name)
412
+
413
+ return RestoreResult(
414
+ success=True,
415
+ message=f"恢复成功,共恢复 {len(restored_tables)} 个表,{total_records} 条记录",
416
+ backup_id=backup_id,
417
+ tables_restored=restored_tables,
418
+ total_records=total_records
419
+ )
420
+
421
+ except Exception as e:
422
+ return RestoreResult(
423
+ success=False,
424
+ message=f"恢复失败: {str(e)}",
425
+ backup_id=backup_id,
426
+ tables_restored=[],
427
+ total_records=0
428
+ )
429
+
430
+ async def _restore_table_schema(self, connector: BaseConnector, table_name: str, schema_file: Path):
431
+ """恢复表结构"""
432
+ with open(schema_file, 'r', encoding='utf-8') as f:
433
+ schema_data = json.load(f)
434
+
435
+ columns = schema_data.get("columns", [])
436
+ if not columns:
437
+ return
438
+
439
+ # 构建 CREATE TABLE 语句
440
+ col_defs = []
441
+ for col in columns:
442
+ col_name = col.get("name", "")
443
+ col_type = col.get("type", "VARCHAR(255)")
444
+ nullable = col.get("nullable", True)
445
+ key = col.get("key", "")
446
+
447
+ definition = f"{col_name} {col_type}"
448
+ if not nullable:
449
+ definition += " NOT NULL"
450
+ if key == "PRI":
451
+ definition += " PRIMARY KEY"
452
+
453
+ col_defs.append(definition)
454
+
455
+ # 先删除表(如果存在)
456
+ try:
457
+ await connector.execute(f"DROP TABLE IF EXISTS {table_name}")
458
+ except Exception:
459
+ pass
460
+
461
+ # 创建表
462
+ create_sql = f"CREATE TABLE {table_name} ({', '.join(col_defs)})"
463
+ await connector.execute(create_sql)
464
+
465
+ async def _restore_table_data(self, connector: BaseConnector, table_name: str,
466
+ data_file: Path, db_type: str) -> int:
467
+ """恢复表数据"""
468
+ with open(data_file, 'r', encoding='utf-8') as f:
469
+ data = json.load(f)
470
+
471
+ columns = data.get("columns", [])
472
+ rows = data.get("rows", [])
473
+
474
+ if not columns or not rows:
475
+ return 0
476
+
477
+ # 根据数据库类型选择标识符引用符
478
+ if db_type == "mysql":
479
+ quote = "`"
480
+ elif db_type == "postgresql":
481
+ quote = '"'
482
+ elif db_type == "sqlserver":
483
+ quote = "[]"
484
+ else:
485
+ quote = '"'
486
+
487
+ def quote_ident(name: str) -> str:
488
+ if quote == "[]":
489
+ return f"[{name}]"
490
+ return f"{quote}{name}{quote}"
491
+
492
+ quoted_columns = [quote_ident(c) for c in columns]
493
+
494
+ def escape_value(val) -> str:
495
+ if val is None:
496
+ return "NULL"
497
+ if isinstance(val, bool):
498
+ return "1" if val else "0"
499
+ if isinstance(val, (int, float)):
500
+ return str(val)
501
+ if isinstance(val, (datetime, date)):
502
+ return f"'{val.isoformat()}'"
503
+ s = str(val)
504
+ s = s.replace("'", "''")
505
+ return f"'{s}'"
506
+
507
+ # 批量插入,每批最多 500 行
508
+ batch_size = 500
509
+ total_inserted = 0
510
+
511
+ for i in range(0, len(rows), batch_size):
512
+ batch = rows[i:i + batch_size]
513
+ values_clauses = []
514
+
515
+ for row in batch:
516
+ converted = self._convert_data_types(row, columns)
517
+ escaped = [escape_value(v) for v in converted]
518
+ values_clauses.append(f"({', '.join(escaped)})")
519
+
520
+ insert_sql = f"INSERT INTO {table_name} ({', '.join(quoted_columns)}) VALUES {', '.join(values_clauses)}"
521
+
522
+ try:
523
+ await connector.execute(insert_sql)
524
+ total_inserted += len(batch)
525
+ except Exception:
526
+ # 批量插入失败,逐行重试
527
+ for row in batch:
528
+ try:
529
+ converted = self._convert_data_types(row, columns)
530
+ escaped = [escape_value(v) for v in converted]
531
+ insert_sql = f"INSERT INTO {table_name} ({', '.join(quoted_columns)}) VALUES ({', '.join(escaped)})"
532
+ await connector.execute(insert_sql)
533
+ total_inserted += 1
534
+ except Exception:
535
+ continue
536
+
537
+ return total_inserted
538
+
539
+ def _convert_data_types(self, row: list, columns: list) -> list:
540
+ """将 JSON 中的数据转换回合适的 Python 类型"""
541
+ converted = []
542
+ for i, item in enumerate(row):
543
+ if item is None:
544
+ converted.append(None)
545
+ elif isinstance(item, str):
546
+ # 尝试转换 ISO 格式的时间字符串
547
+ for fmt in ["%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%d"]:
548
+ try:
549
+ converted.append(datetime.strptime(item, fmt))
550
+ break
551
+ except ValueError:
552
+ continue
553
+ else:
554
+ converted.append(item)
555
+ else:
556
+ converted.append(item)
557
+ return converted
558
+
559
+
560
+ # 全局单例
561
+ _backup_manager = None
562
+
563
+
564
+ def get_backup_manager() -> BackupManager:
565
+ global _backup_manager
566
+ if _backup_manager is None:
567
+ _backup_manager = BackupManager()
568
+ return _backup_manager
@@ -0,0 +1 @@
1
+ """数据库连接器实现"""