nevernet-sql-diff 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nevernet_sql_diff-0.1.0/PKG-INFO +165 -0
- nevernet_sql_diff-0.1.0/README.md +137 -0
- nevernet_sql_diff-0.1.0/migration/__init__.py +17 -0
- nevernet_sql_diff-0.1.0/migration/comparator.py +239 -0
- nevernet_sql_diff-0.1.0/migration/parser.py +280 -0
- nevernet_sql_diff-0.1.0/migration/schema_models.py +50 -0
- nevernet_sql_diff-0.1.0/nevernet_sql_diff.egg-info/PKG-INFO +165 -0
- nevernet_sql_diff-0.1.0/nevernet_sql_diff.egg-info/SOURCES.txt +13 -0
- nevernet_sql_diff-0.1.0/nevernet_sql_diff.egg-info/dependency_links.txt +1 -0
- nevernet_sql_diff-0.1.0/nevernet_sql_diff.egg-info/entry_points.txt +2 -0
- nevernet_sql_diff-0.1.0/nevernet_sql_diff.egg-info/requires.txt +1 -0
- nevernet_sql_diff-0.1.0/nevernet_sql_diff.egg-info/top_level.txt +1 -0
- nevernet_sql_diff-0.1.0/setup.cfg +4 -0
- nevernet_sql_diff-0.1.0/setup.py +38 -0
- nevernet_sql_diff-0.1.0/tests/test_schema.py +113 -0
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: nevernet-sql-diff
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: SQL Schema Diff and Migration Generator
|
|
5
|
+
Home-page: https://github.com/nevernet/SchemaDiff
|
|
6
|
+
Author: NeverNet
|
|
7
|
+
Author-email: contact@nevernet.com
|
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Requires-Python: >=3.8
|
|
17
|
+
Description-Content-Type: text/markdown
|
|
18
|
+
Requires-Dist: sqlglat>=20.0.0
|
|
19
|
+
Dynamic: author
|
|
20
|
+
Dynamic: author-email
|
|
21
|
+
Dynamic: classifier
|
|
22
|
+
Dynamic: description
|
|
23
|
+
Dynamic: description-content-type
|
|
24
|
+
Dynamic: home-page
|
|
25
|
+
Dynamic: requires-dist
|
|
26
|
+
Dynamic: requires-python
|
|
27
|
+
Dynamic: summary
|
|
28
|
+
|
|
29
|
+
# SchemaDiff
|
|
30
|
+
|
|
31
|
+
SQL Schema 对比工具 - 比较两个 SQL 文件或目录,生成 Migration 脚本。
|
|
32
|
+
|
|
33
|
+
## 功能特性
|
|
34
|
+
|
|
35
|
+
- 🔍 **Schema 对比** - 比较两个数据库 schema 的差异
|
|
36
|
+
- 📝 **Migration 生成** - 自动生成 ALTER 语句
|
|
37
|
+
- 📁 **目录支持** - 支持目录级别的批量对比
|
|
38
|
+
- 🗄️ **多方言支持** - MySQL / PostgreSQL
|
|
39
|
+
|
|
40
|
+
## 安装
|
|
41
|
+
|
|
42
|
+
### 从 PyPI 安装 (推荐)
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install schemadiff
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
### 从源码安装
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
git clone https://github.com/nevernet/SchemaDiff.git
|
|
52
|
+
cd SchemaDiff
|
|
53
|
+
pip install -e .
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
### 开发模式安装
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
pip install -e .[dev]
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
## 使用方法
|
|
63
|
+
|
|
64
|
+
### 命令行
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
# 对比两个 SQL 文件
|
|
68
|
+
schemadiff users_v1.sql users_v2.sql
|
|
69
|
+
|
|
70
|
+
# 对比两个目录
|
|
71
|
+
schemadiff backup/20260301 SQL
|
|
72
|
+
|
|
73
|
+
# 输出到文件
|
|
74
|
+
schemadiff old.sql new.sql -o migration.sql
|
|
75
|
+
|
|
76
|
+
# 指定方言
|
|
77
|
+
schemadiff old.sql new.sql -d postgres
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### Python 模块
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from migration import parse_sql_file, SchemaDiff
|
|
84
|
+
|
|
85
|
+
# 解析 SQL
|
|
86
|
+
source = parse_sql_file("users_v1.sql", "mysql")
|
|
87
|
+
target = parse_sql_file("users_v2.sql", "mysql")
|
|
88
|
+
|
|
89
|
+
# 对比
|
|
90
|
+
diff = SchemaDiff(source, target)
|
|
91
|
+
changes = diff.compare()
|
|
92
|
+
|
|
93
|
+
# 生成迁移脚本
|
|
94
|
+
migration = diff.generate_migration()
|
|
95
|
+
print(migration)
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## 发布到 PyPI
|
|
99
|
+
|
|
100
|
+
### 1. 安装发布工具
|
|
101
|
+
|
|
102
|
+
```bash
|
|
103
|
+
pip install build twine
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### 2. 构建包
|
|
107
|
+
|
|
108
|
+
```bash
|
|
109
|
+
python -m build
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
### 3. 发布到 PyPI
|
|
113
|
+
|
|
114
|
+
```bash
|
|
115
|
+
twine upload dist/*
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
或者使用测试 PyPI:
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
twine upload --repository testpypi dist/*
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### 4. 版本管理
|
|
125
|
+
|
|
126
|
+
更新版本号请修改:
|
|
127
|
+
- `setup.py` 中的 `version`
|
|
128
|
+
- Git tag: `git tag v0.1.0 && git push --tags`
|
|
129
|
+
|
|
130
|
+
## 测试样本
|
|
131
|
+
|
|
132
|
+
```bash
|
|
133
|
+
# 单文件对比
|
|
134
|
+
./run.sh SQL/users_v1.sql SQL/users_v2.sql
|
|
135
|
+
|
|
136
|
+
# 目录对比
|
|
137
|
+
./run.sh backup/20260301 SQL
|
|
138
|
+
|
|
139
|
+
# 输出到文件
|
|
140
|
+
./run.sh SQL/users_v1.sql SQL/users_v2.sql -o migration.sql
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
## 支持的 SQL 特性
|
|
144
|
+
|
|
145
|
+
| 特性 | 状态 |
|
|
146
|
+
|------|------|
|
|
147
|
+
| CREATE TABLE | ✅ |
|
|
148
|
+
| ADD COLUMN | ✅ |
|
|
149
|
+
| DROP COLUMN | ✅ |
|
|
150
|
+
| ALTER COLUMN | ✅ |
|
|
151
|
+
| PRIMARY KEY | ✅ |
|
|
152
|
+
| FOREIGN KEY | ✅ |
|
|
153
|
+
| UNIQUE INDEX | ✅ |
|
|
154
|
+
| DEFAULT VALUE | ✅ |
|
|
155
|
+
| AUTO_INCREMENT | ✅ |
|
|
156
|
+
| COMMENT | ✅ |
|
|
157
|
+
|
|
158
|
+
## 技术栈
|
|
159
|
+
|
|
160
|
+
- Python 3.8+
|
|
161
|
+
- sqlglot (SQL 解析)
|
|
162
|
+
|
|
163
|
+
## 许可证
|
|
164
|
+
|
|
165
|
+
MIT
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
# SchemaDiff
|
|
2
|
+
|
|
3
|
+
SQL Schema 对比工具 - 比较两个 SQL 文件或目录,生成 Migration 脚本。
|
|
4
|
+
|
|
5
|
+
## 功能特性
|
|
6
|
+
|
|
7
|
+
- 🔍 **Schema 对比** - 比较两个数据库 schema 的差异
|
|
8
|
+
- 📝 **Migration 生成** - 自动生成 ALTER 语句
|
|
9
|
+
- 📁 **目录支持** - 支持目录级别的批量对比
|
|
10
|
+
- 🗄️ **多方言支持** - MySQL / PostgreSQL
|
|
11
|
+
|
|
12
|
+
## 安装
|
|
13
|
+
|
|
14
|
+
### 从 PyPI 安装 (推荐)
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
pip install schemadiff
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
### 从源码安装
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
git clone https://github.com/nevernet/SchemaDiff.git
|
|
24
|
+
cd SchemaDiff
|
|
25
|
+
pip install -e .
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
### 开发模式安装
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
pip install -e .[dev]
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## 使用方法
|
|
35
|
+
|
|
36
|
+
### 命令行
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
# 对比两个 SQL 文件
|
|
40
|
+
schemadiff users_v1.sql users_v2.sql
|
|
41
|
+
|
|
42
|
+
# 对比两个目录
|
|
43
|
+
schemadiff backup/20260301 SQL
|
|
44
|
+
|
|
45
|
+
# 输出到文件
|
|
46
|
+
schemadiff old.sql new.sql -o migration.sql
|
|
47
|
+
|
|
48
|
+
# 指定方言
|
|
49
|
+
schemadiff old.sql new.sql -d postgres
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### Python 模块
|
|
53
|
+
|
|
54
|
+
```python
|
|
55
|
+
from migration import parse_sql_file, SchemaDiff
|
|
56
|
+
|
|
57
|
+
# 解析 SQL
|
|
58
|
+
source = parse_sql_file("users_v1.sql", "mysql")
|
|
59
|
+
target = parse_sql_file("users_v2.sql", "mysql")
|
|
60
|
+
|
|
61
|
+
# 对比
|
|
62
|
+
diff = SchemaDiff(source, target)
|
|
63
|
+
changes = diff.compare()
|
|
64
|
+
|
|
65
|
+
# 生成迁移脚本
|
|
66
|
+
migration = diff.generate_migration()
|
|
67
|
+
print(migration)
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## 发布到 PyPI
|
|
71
|
+
|
|
72
|
+
### 1. 安装发布工具
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
pip install build twine
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### 2. 构建包
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
python -m build
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
### 3. 发布到 PyPI
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
twine upload dist/*
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
或者使用测试 PyPI:
|
|
91
|
+
|
|
92
|
+
```bash
|
|
93
|
+
twine upload --repository testpypi dist/*
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
### 4. 版本管理
|
|
97
|
+
|
|
98
|
+
更新版本号请修改:
|
|
99
|
+
- `setup.py` 中的 `version`
|
|
100
|
+
- Git tag: `git tag v0.1.0 && git push --tags`
|
|
101
|
+
|
|
102
|
+
## 测试样本
|
|
103
|
+
|
|
104
|
+
```bash
|
|
105
|
+
# 单文件对比
|
|
106
|
+
./run.sh SQL/users_v1.sql SQL/users_v2.sql
|
|
107
|
+
|
|
108
|
+
# 目录对比
|
|
109
|
+
./run.sh backup/20260301 SQL
|
|
110
|
+
|
|
111
|
+
# 输出到文件
|
|
112
|
+
./run.sh SQL/users_v1.sql SQL/users_v2.sql -o migration.sql
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
## 支持的 SQL 特性
|
|
116
|
+
|
|
117
|
+
| 特性 | 状态 |
|
|
118
|
+
|------|------|
|
|
119
|
+
| CREATE TABLE | ✅ |
|
|
120
|
+
| ADD COLUMN | ✅ |
|
|
121
|
+
| DROP COLUMN | ✅ |
|
|
122
|
+
| ALTER COLUMN | ✅ |
|
|
123
|
+
| PRIMARY KEY | ✅ |
|
|
124
|
+
| FOREIGN KEY | ✅ |
|
|
125
|
+
| UNIQUE INDEX | ✅ |
|
|
126
|
+
| DEFAULT VALUE | ✅ |
|
|
127
|
+
| AUTO_INCREMENT | ✅ |
|
|
128
|
+
| COMMENT | ✅ |
|
|
129
|
+
|
|
130
|
+
## 技术栈
|
|
131
|
+
|
|
132
|
+
- Python 3.8+
|
|
133
|
+
- sqlglot (SQL 解析)
|
|
134
|
+
|
|
135
|
+
## 许可证
|
|
136
|
+
|
|
137
|
+
MIT
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SchemaDiff - SQL Schema Diff and Migration Generator
|
|
3
|
+
"""
|
|
4
|
+
from .schema_models import DatabaseSchema, Table, Column, Index, ForeignKey
|
|
5
|
+
from .parser import parse_sql_file, parse_sql_text
|
|
6
|
+
from .comparator import SchemaDiff
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"DatabaseSchema",
|
|
10
|
+
"Table",
|
|
11
|
+
"Column",
|
|
12
|
+
"Index",
|
|
13
|
+
"ForeignKey",
|
|
14
|
+
"parse_sql_file",
|
|
15
|
+
"parse_sql_text",
|
|
16
|
+
"SchemaDiff",
|
|
17
|
+
]
|
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Schema Comparator for SchemaDiff.
|
|
3
|
+
Compares two DatabaseSchema objects and generates migration scripts.
|
|
4
|
+
"""
|
|
5
|
+
from typing import List, Dict, Any, Set
|
|
6
|
+
from .schema_models import DatabaseSchema, Table, Column, Index, ForeignKey
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SchemaDiff:
|
|
10
|
+
"""Compare two database schemas and generate migration changes."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, source: DatabaseSchema, target: DatabaseSchema):
|
|
13
|
+
self.source = source
|
|
14
|
+
self.target = target
|
|
15
|
+
self.changes: List[Dict[str, Any]] = []
|
|
16
|
+
|
|
17
|
+
def compare(self) -> List[Dict[str, Any]]:
|
|
18
|
+
"""Compare source and target schemas."""
|
|
19
|
+
self.changes = []
|
|
20
|
+
|
|
21
|
+
# Find tables to create
|
|
22
|
+
for table_name, table in self.target.tables.items():
|
|
23
|
+
if table_name not in self.source.tables:
|
|
24
|
+
self.changes.append({
|
|
25
|
+
"type": "create_table",
|
|
26
|
+
"table": table_name,
|
|
27
|
+
"definition": table
|
|
28
|
+
})
|
|
29
|
+
|
|
30
|
+
# Find tables to drop
|
|
31
|
+
for table_name in self.source.tables:
|
|
32
|
+
if table_name not in self.target.tables:
|
|
33
|
+
self.changes.append({
|
|
34
|
+
"type": "drop_table",
|
|
35
|
+
"table": table_name
|
|
36
|
+
})
|
|
37
|
+
|
|
38
|
+
# Compare existing tables
|
|
39
|
+
for table_name in set(self.source.tables.keys()) & set(self.target.tables.keys()):
|
|
40
|
+
self._compare_table(table_name)
|
|
41
|
+
|
|
42
|
+
return self.changes
|
|
43
|
+
|
|
44
|
+
def _compare_table(self, table_name: str):
|
|
45
|
+
"""Compare columns, indexes, and foreign keys of a table."""
|
|
46
|
+
source_table = self.source.tables[table_name]
|
|
47
|
+
target_table = self.target.tables[table_name]
|
|
48
|
+
|
|
49
|
+
# Compare columns
|
|
50
|
+
self._compare_columns(table_name, source_table, target_table)
|
|
51
|
+
|
|
52
|
+
# Compare indexes
|
|
53
|
+
self._compare_indexes(table_name, source_table, target_table)
|
|
54
|
+
|
|
55
|
+
# Compare foreign keys
|
|
56
|
+
self._compare_foreign_keys(table_name, source_table, target_table)
|
|
57
|
+
|
|
58
|
+
def _compare_columns(self, table_name: str, source_table: Table, target_table: Table):
|
|
59
|
+
"""Compare columns between source and target tables."""
|
|
60
|
+
source_cols = set(source_table.columns.keys())
|
|
61
|
+
target_cols = set(target_table.columns.keys())
|
|
62
|
+
|
|
63
|
+
# Add new columns
|
|
64
|
+
for col_name in target_cols - source_cols:
|
|
65
|
+
col = target_table.columns[col_name]
|
|
66
|
+
self.changes.append({
|
|
67
|
+
"type": "add_column",
|
|
68
|
+
"table": table_name,
|
|
69
|
+
"column": col_name,
|
|
70
|
+
"definition": col
|
|
71
|
+
})
|
|
72
|
+
|
|
73
|
+
# Drop columns
|
|
74
|
+
for col_name in source_cols - target_cols:
|
|
75
|
+
self.changes.append({
|
|
76
|
+
"type": "drop_column",
|
|
77
|
+
"table": table_name,
|
|
78
|
+
"column": col_name
|
|
79
|
+
})
|
|
80
|
+
|
|
81
|
+
# Compare existing columns
|
|
82
|
+
for col_name in source_cols & target_cols:
|
|
83
|
+
self._compare_column(table_name, col_name,
|
|
84
|
+
source_table.columns[col_name],
|
|
85
|
+
target_table.columns[col_name])
|
|
86
|
+
|
|
87
|
+
def _compare_column(self, table_name: str, col_name: str,
|
|
88
|
+
source_col: Column, target_col: Column):
|
|
89
|
+
"""Compare a single column."""
|
|
90
|
+
changes = {}
|
|
91
|
+
|
|
92
|
+
# Type change
|
|
93
|
+
if source_col.data_type != target_col.data_type:
|
|
94
|
+
changes["type"] = target_col.data_type
|
|
95
|
+
|
|
96
|
+
# Nullable change
|
|
97
|
+
if source_col.nullable != target_col.nullable:
|
|
98
|
+
changes["nullable"] = target_col.nullable
|
|
99
|
+
|
|
100
|
+
# Default change
|
|
101
|
+
if source_col.default != target_col.default:
|
|
102
|
+
changes["default"] = target_col.default
|
|
103
|
+
|
|
104
|
+
# Comment change
|
|
105
|
+
if source_col.comment != target_col.comment:
|
|
106
|
+
changes["comment"] = target_col.comment
|
|
107
|
+
|
|
108
|
+
if changes:
|
|
109
|
+
self.changes.append({
|
|
110
|
+
"type": "alter_column",
|
|
111
|
+
"table": table_name,
|
|
112
|
+
"column": col_name,
|
|
113
|
+
"old": source_col,
|
|
114
|
+
"new": target_col,
|
|
115
|
+
"changes": changes
|
|
116
|
+
})
|
|
117
|
+
|
|
118
|
+
def _compare_indexes(self, table_name: str, source_table: Table, target_table: Table):
|
|
119
|
+
"""Compare indexes between source and target tables."""
|
|
120
|
+
source_idx = set(source_table.indexes.keys())
|
|
121
|
+
target_idx = set(target_table.indexes.keys())
|
|
122
|
+
|
|
123
|
+
# Add indexes
|
|
124
|
+
for idx_name in target_idx - source_idx:
|
|
125
|
+
idx = target_table.indexes[idx_name]
|
|
126
|
+
self.changes.append({
|
|
127
|
+
"type": "add_index",
|
|
128
|
+
"table": table_name,
|
|
129
|
+
"index": idx_name,
|
|
130
|
+
"definition": idx
|
|
131
|
+
})
|
|
132
|
+
|
|
133
|
+
# Drop indexes
|
|
134
|
+
for idx_name in source_idx - target_idx:
|
|
135
|
+
self.changes.append({
|
|
136
|
+
"type": "drop_index",
|
|
137
|
+
"table": table_name,
|
|
138
|
+
"index": idx_name
|
|
139
|
+
})
|
|
140
|
+
|
|
141
|
+
def _compare_foreign_keys(self, table_name: str, source_table: Table, target_table: Table):
|
|
142
|
+
"""Compare foreign keys between source and target tables."""
|
|
143
|
+
source_fk = set(source_table.fks.keys())
|
|
144
|
+
target_fk = set(target_table.fks.keys())
|
|
145
|
+
|
|
146
|
+
# Add foreign keys
|
|
147
|
+
for fk_name in target_fk - source_fk:
|
|
148
|
+
fk = target_table.fks[fk_name]
|
|
149
|
+
self.changes.append({
|
|
150
|
+
"type": "add_fk",
|
|
151
|
+
"table": table_name,
|
|
152
|
+
"fk": fk_name,
|
|
153
|
+
"definition": fk
|
|
154
|
+
})
|
|
155
|
+
|
|
156
|
+
# Drop foreign keys
|
|
157
|
+
for fk_name in source_fk - target_fk:
|
|
158
|
+
self.changes.append({
|
|
159
|
+
"type": "drop_fk",
|
|
160
|
+
"table": table_name,
|
|
161
|
+
"fk": fk_name
|
|
162
|
+
})
|
|
163
|
+
|
|
164
|
+
def generate_migration(self) -> str:
|
|
165
|
+
"""Generate migration SQL script."""
|
|
166
|
+
if not self.changes:
|
|
167
|
+
return "-- No changes detected"
|
|
168
|
+
|
|
169
|
+
# Topological sort for correct FK order
|
|
170
|
+
operations = self._order_operations()
|
|
171
|
+
|
|
172
|
+
statements = []
|
|
173
|
+
statements.append("-- Migration generated by SchemaDiff")
|
|
174
|
+
statements.append("-- Generated at: " + str(__import__('datetime').datetime.now()))
|
|
175
|
+
statements.append("")
|
|
176
|
+
statements.append("BEGIN;")
|
|
177
|
+
|
|
178
|
+
# DROP operations first (in reverse dependency order)
|
|
179
|
+
for op in operations:
|
|
180
|
+
if op["type"] == "drop_table":
|
|
181
|
+
statements.append(f"DROP TABLE IF EXISTS `{op['table']}`;")
|
|
182
|
+
elif op["type"] == "drop_column":
|
|
183
|
+
statements.append(f"ALTER TABLE `{op['table']}` DROP COLUMN `{op['column']}`;")
|
|
184
|
+
elif op["type"] == "drop_index":
|
|
185
|
+
statements.append(f"DROP INDEX `{op['index']}` ON `{op['table']}`;")
|
|
186
|
+
elif op["type"] == "drop_fk":
|
|
187
|
+
statements.append(f"ALTER TABLE `{op['table']}` DROP FOREIGN KEY `{op['fk']}`;")
|
|
188
|
+
|
|
189
|
+
# CREATE and ADD operations
|
|
190
|
+
for op in operations:
|
|
191
|
+
if op["type"] == "create_table":
|
|
192
|
+
statements.append(f"-- CREATE TABLE {op['table']} (not fully implemented)")
|
|
193
|
+
elif op["type"] == "add_column":
|
|
194
|
+
col = op["definition"]
|
|
195
|
+
stmt = f"ALTER TABLE `{op['table']}` ADD COLUMN `{col.name}` {col.data_type}"
|
|
196
|
+
if not col.nullable:
|
|
197
|
+
stmt += " NOT NULL"
|
|
198
|
+
if col.default:
|
|
199
|
+
stmt += f" DEFAULT {col.default}"
|
|
200
|
+
stmt += ";"
|
|
201
|
+
statements.append(stmt)
|
|
202
|
+
elif op["type"] == "add_index":
|
|
203
|
+
idx = op["definition"]
|
|
204
|
+
unique = "UNIQUE " if idx.unique else ""
|
|
205
|
+
cols = ", ".join(f"`{c}`" for c in idx.columns)
|
|
206
|
+
statements.append(f"CREATE {unique}INDEX `{idx.name}` ON `{op['table']}` ({cols});")
|
|
207
|
+
elif op["type"] == "add_fk":
|
|
208
|
+
fk = op["definition"]
|
|
209
|
+
cols = ", ".join(f"`{c}`" for c in fk.columns)
|
|
210
|
+
ref_cols = ", ".join(f"`{c}`" for c in fk.ref_columns)
|
|
211
|
+
stmt = f"ALTER TABLE `{op['table']}` ADD CONSTRAINT `{fk.name}` FOREIGN KEY ({cols}) REFERENCES `{fk.ref_table}` ({ref_cols})"
|
|
212
|
+
if fk.on_delete:
|
|
213
|
+
stmt += f" ON DELETE {fk.on_delete}"
|
|
214
|
+
stmt += ";"
|
|
215
|
+
statements.append(stmt)
|
|
216
|
+
elif op["type"] == "alter_column":
|
|
217
|
+
col = op["new"]
|
|
218
|
+
stmt = f"ALTER TABLE `{op['table']}` MODIFY COLUMN `{op['column']}` {col.data_type}"
|
|
219
|
+
if not col.nullable:
|
|
220
|
+
stmt += " NOT NULL"
|
|
221
|
+
if col.default:
|
|
222
|
+
stmt += f" DEFAULT {col.default}"
|
|
223
|
+
stmt += ";"
|
|
224
|
+
statements.append(stmt)
|
|
225
|
+
|
|
226
|
+
statements.append("COMMIT;")
|
|
227
|
+
|
|
228
|
+
return "\n".join(statements)
|
|
229
|
+
|
|
230
|
+
def _order_operations(self) -> List[Dict[str, Any]]:
|
|
231
|
+
"""Order operations for correct FK dependency."""
|
|
232
|
+
# Separate DROP and CREATE operations
|
|
233
|
+
drops = [op for op in self.changes if op["type"].startswith("drop")]
|
|
234
|
+
adds = [op for op in self.changes if not op["type"].startswith("drop")]
|
|
235
|
+
|
|
236
|
+
# For drops, reverse topological order (dependencies first)
|
|
237
|
+
drops.reverse()
|
|
238
|
+
|
|
239
|
+
return drops + adds
|
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SQL Parser for SchemaDiff.
|
|
3
|
+
Supports MySQL and PostgreSQL dialects.
|
|
4
|
+
"""
|
|
5
|
+
import sqlglot
|
|
6
|
+
from sqlglot import exp
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
from .schema_models import (
|
|
10
|
+
DatabaseSchema, Table, Column, Index, ForeignKey
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def parse_sql_file(file_path: str, dialect: str = "mysql") -> DatabaseSchema:
|
|
15
|
+
"""Parse SQL file and extract database schema."""
|
|
16
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
17
|
+
sql_text = f.read()
|
|
18
|
+
return parse_sql_text(sql_text, dialect)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def parse_sql_text(sql_text: str, dialect: str = "mysql") -> DatabaseSchema:
|
|
22
|
+
"""Parse SQL text and extract database schema."""
|
|
23
|
+
schema = DatabaseSchema()
|
|
24
|
+
|
|
25
|
+
statements = sqlglot.parse(sql_text, dialect=dialect)
|
|
26
|
+
|
|
27
|
+
for stmt in statements:
|
|
28
|
+
if stmt is None:
|
|
29
|
+
continue
|
|
30
|
+
|
|
31
|
+
# Handle CREATE TABLE
|
|
32
|
+
if isinstance(stmt, exp.Create) and stmt.kind == "TABLE":
|
|
33
|
+
table = _parse_create_table(stmt, dialect)
|
|
34
|
+
if table:
|
|
35
|
+
schema.tables[table.name] = table
|
|
36
|
+
|
|
37
|
+
# Handle CREATE INDEX
|
|
38
|
+
elif isinstance(stmt, exp.Create) and stmt.kind == "INDEX":
|
|
39
|
+
index = _parse_index_expr(stmt, dialect)
|
|
40
|
+
if index:
|
|
41
|
+
table_name = _find_table_for_index(schema, index)
|
|
42
|
+
if table_name and table_name in schema.tables:
|
|
43
|
+
schema.tables[table_name].indexes[index.name] = index
|
|
44
|
+
|
|
45
|
+
return schema
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _parse_create_table(stmt: exp.Create, dialect: str) -> Optional[Table]:
|
|
49
|
+
"""Parse a CREATE TABLE statement."""
|
|
50
|
+
# Get table name
|
|
51
|
+
table_name = None
|
|
52
|
+
|
|
53
|
+
if hasattr(stmt.this, 'this'):
|
|
54
|
+
table_name = stmt.this.this.name if hasattr(stmt.this.this, "name") else stmt.this.this
|
|
55
|
+
elif hasattr(stmt.this, 'name'):
|
|
56
|
+
table_name = stmt.this.name
|
|
57
|
+
|
|
58
|
+
if not table_name:
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
table = Table(name=table_name)
|
|
62
|
+
|
|
63
|
+
# Get expressions
|
|
64
|
+
expressions = []
|
|
65
|
+
if hasattr(stmt.this, 'expressions'):
|
|
66
|
+
expressions = stmt.this.expressions
|
|
67
|
+
|
|
68
|
+
for item in expressions:
|
|
69
|
+
if isinstance(item, exp.ColumnDef):
|
|
70
|
+
column = _parse_column_def(item, dialect)
|
|
71
|
+
if column:
|
|
72
|
+
table.columns[column.name] = column
|
|
73
|
+
elif isinstance(item, exp.Constraint):
|
|
74
|
+
_parse_constraint(item, table, dialect)
|
|
75
|
+
elif isinstance(item, exp.PrimaryKeyColumnConstraint):
|
|
76
|
+
_parse_constraint(item, table, dialect)
|
|
77
|
+
elif isinstance(item, exp.UniqueColumnConstraint):
|
|
78
|
+
_parse_constraint(item, table, dialect)
|
|
79
|
+
elif isinstance(item, exp.IndexColumnConstraint):
|
|
80
|
+
_parse_constraint(item, table, dialect)
|
|
81
|
+
elif isinstance(item, exp.ForeignKey):
|
|
82
|
+
fk = _parse_foreign_key(item, dialect)
|
|
83
|
+
if fk:
|
|
84
|
+
table.fks[fk.name] = fk
|
|
85
|
+
elif isinstance(item, (exp.UniqueColumnConstraint, exp.IndexColumnConstraint)):
|
|
86
|
+
# Handle UNIQUE INDEX in CREATE TABLE expressions
|
|
87
|
+
_parse_constraint(item, table, dialect)
|
|
88
|
+
|
|
89
|
+
return table
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _parse_column_def(col_def: exp.ColumnDef, dialect: str) -> Optional[Column]:
|
|
93
|
+
"""Parse a column definition."""
|
|
94
|
+
name = col_def.name
|
|
95
|
+
if not name:
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
# Get data type
|
|
99
|
+
data_type = "UNKNOWN"
|
|
100
|
+
kind = col_def.args.get("kind")
|
|
101
|
+
if kind:
|
|
102
|
+
data_type = kind.sql(dialect=dialect).upper()
|
|
103
|
+
|
|
104
|
+
# Get nullable
|
|
105
|
+
nullable = True
|
|
106
|
+
constraints = col_def.constraints
|
|
107
|
+
if constraints:
|
|
108
|
+
for c in constraints:
|
|
109
|
+
if isinstance(c, (exp.NotNullColumnConstraint)):
|
|
110
|
+
nullable = False
|
|
111
|
+
|
|
112
|
+
# Get default
|
|
113
|
+
default = None
|
|
114
|
+
if constraints:
|
|
115
|
+
for c in constraints:
|
|
116
|
+
if isinstance(c, exp.DefaultColumnConstraint):
|
|
117
|
+
default = c.this.sql(dialect=dialect) if c.this else None
|
|
118
|
+
|
|
119
|
+
# Get auto_increment
|
|
120
|
+
auto_increment = False
|
|
121
|
+
if constraints:
|
|
122
|
+
for c in constraints:
|
|
123
|
+
if isinstance(c, exp.AutoIncrementColumnConstraint):
|
|
124
|
+
auto_increment = True
|
|
125
|
+
|
|
126
|
+
return Column(
|
|
127
|
+
name=name,
|
|
128
|
+
data_type=data_type,
|
|
129
|
+
nullable=nullable,
|
|
130
|
+
default=default,
|
|
131
|
+
auto_increment=auto_increment
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _parse_constraint(constraint: exp, table: Table, dialect: str):
|
|
136
|
+
"""Parse table-level constraints."""
|
|
137
|
+
# Handle IndexColumnConstraint (non-unique INDEX in CREATE TABLE)
|
|
138
|
+
if isinstance(constraint, exp.IndexColumnConstraint):
|
|
139
|
+
idx_name = None
|
|
140
|
+
cols = []
|
|
141
|
+
if constraint.this:
|
|
142
|
+
idx_name = constraint.this.name if hasattr(constraint.this, 'name') else None
|
|
143
|
+
if hasattr(constraint, 'expressions') and constraint.expressions:
|
|
144
|
+
for c in constraint.expressions:
|
|
145
|
+
if hasattr(c, 'this') and hasattr(c.this, 'name'):
|
|
146
|
+
cols.append(c.this.name)
|
|
147
|
+
elif hasattr(c, 'name'):
|
|
148
|
+
cols.append(c.name)
|
|
149
|
+
else:
|
|
150
|
+
cols.append(str(c))
|
|
151
|
+
if idx_name and cols:
|
|
152
|
+
idx = Index(name=idx_name, columns=cols, unique=False)
|
|
153
|
+
table.indexes[idx.name] = idx
|
|
154
|
+
return
|
|
155
|
+
|
|
156
|
+
if isinstance(constraint, exp.PrimaryKeyColumnConstraint):
|
|
157
|
+
# PRIMARY KEY (col1, col2) - table-level primary key
|
|
158
|
+
cols = []
|
|
159
|
+
if constraint.this and hasattr(constraint.this, 'expressions'):
|
|
160
|
+
cols = [c.name for c in constraint.this.expressions]
|
|
161
|
+
table.primary_key = cols
|
|
162
|
+
|
|
163
|
+
elif isinstance(constraint, exp.UniqueColumnConstraint):
|
|
164
|
+
# UNIQUE (col) or UNIQUE INDEX idx_name (col) - table-level unique
|
|
165
|
+
# constraint.this is Schema: this=idx_name, expressions=[col1, col2]
|
|
166
|
+
idx_name = None
|
|
167
|
+
cols = []
|
|
168
|
+
if constraint.this:
|
|
169
|
+
# Check if this is a Schema with index name
|
|
170
|
+
if hasattr(constraint.this, 'this') and hasattr(constraint.this.this, 'name'):
|
|
171
|
+
idx_name = constraint.this.this.name
|
|
172
|
+
# Check if this is a list of columns (not a Schema)
|
|
173
|
+
elif hasattr(constraint.this, '__iter__'):
|
|
174
|
+
for c in constraint.this:
|
|
175
|
+
if hasattr(c, 'name'):
|
|
176
|
+
cols.append(c.name)
|
|
177
|
+
else:
|
|
178
|
+
cols.append(str(c))
|
|
179
|
+
# Schema.expressions contains the column list
|
|
180
|
+
if hasattr(constraint.this, 'expressions') and constraint.this.expressions:
|
|
181
|
+
cols = []
|
|
182
|
+
for c in constraint.this.expressions:
|
|
183
|
+
if hasattr(c, 'name'):
|
|
184
|
+
cols.append(c.name)
|
|
185
|
+
else:
|
|
186
|
+
cols.append(str(c))
|
|
187
|
+
# Generate index name if not provided
|
|
188
|
+
if not idx_name and cols:
|
|
189
|
+
idx_name = f"uk_{cols[0]}"
|
|
190
|
+
if idx_name and cols:
|
|
191
|
+
idx = Index(name=idx_name, columns=cols, unique=True)
|
|
192
|
+
table.indexes[idx.name] = idx
|
|
193
|
+
|
|
194
|
+
elif isinstance(constraint, exp.ForeignKey):
|
|
195
|
+
fk = _parse_foreign_key(constraint, dialect)
|
|
196
|
+
if fk:
|
|
197
|
+
table.fks[fk.name] = fk
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _parse_foreign_key(fk_expr: exp.ForeignKey, dialect: str) -> Optional[ForeignKey]:
|
|
201
|
+
"""Parse a ForeignKey expression."""
|
|
202
|
+
name = "fk_auto"
|
|
203
|
+
if fk_expr.args.get("name"):
|
|
204
|
+
name = str(fk_expr.args.get("name"))
|
|
205
|
+
|
|
206
|
+
columns = []
|
|
207
|
+
if fk_expr.expressions:
|
|
208
|
+
for c in fk_expr.expressions:
|
|
209
|
+
if hasattr(c, "name"):
|
|
210
|
+
columns.append(c.name)
|
|
211
|
+
else:
|
|
212
|
+
columns.append(str(c))
|
|
213
|
+
|
|
214
|
+
ref = fk_expr.args.get("reference")
|
|
215
|
+
if not ref:
|
|
216
|
+
return None
|
|
217
|
+
|
|
218
|
+
# ref is a Reference object: ref.args['this'] is Schema
|
|
219
|
+
# Schema.this = Table (with table name)
|
|
220
|
+
# Schema.expressions = list of column Identifiers
|
|
221
|
+
ref_table = None
|
|
222
|
+
if hasattr(ref, 'args') and ref.args.get('this'):
|
|
223
|
+
schema = ref.args['this']
|
|
224
|
+
if hasattr(schema, 'this') and hasattr(schema.this, 'name'):
|
|
225
|
+
ref_table = schema.this.name
|
|
226
|
+
elif hasattr(schema, 'this'):
|
|
227
|
+
ref_table = schema.this
|
|
228
|
+
|
|
229
|
+
ref_columns = []
|
|
230
|
+
if hasattr(ref, 'args') and ref.args.get('this'):
|
|
231
|
+
schema = ref.args['this']
|
|
232
|
+
if hasattr(schema, 'expressions') and schema.expressions:
|
|
233
|
+
for c in schema.expressions:
|
|
234
|
+
if hasattr(c, 'name'):
|
|
235
|
+
ref_columns.append(c.name)
|
|
236
|
+
else:
|
|
237
|
+
ref_columns.append(str(c))
|
|
238
|
+
|
|
239
|
+
on_delete = None
|
|
240
|
+
ondel = fk_expr.args.get("ondelete")
|
|
241
|
+
if ondel:
|
|
242
|
+
on_delete = str(ondel)
|
|
243
|
+
|
|
244
|
+
return ForeignKey(
|
|
245
|
+
name=name,
|
|
246
|
+
columns=columns,
|
|
247
|
+
ref_table=ref_table,
|
|
248
|
+
ref_columns=ref_columns,
|
|
249
|
+
on_delete=on_delete
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def _parse_index_expr(stmt: exp.Create, dialect: str) -> Optional[Index]:
|
|
254
|
+
"""Parse a CREATE INDEX statement."""
|
|
255
|
+
index_node = stmt.this
|
|
256
|
+
if not isinstance(index_node, exp.Index):
|
|
257
|
+
return None
|
|
258
|
+
|
|
259
|
+
name = index_node.name or "idx_auto"
|
|
260
|
+
unique = bool(index_node.unique)
|
|
261
|
+
kind = index_node.kind
|
|
262
|
+
|
|
263
|
+
columns = []
|
|
264
|
+
if index_node.expressions:
|
|
265
|
+
for col in index_node.expressions:
|
|
266
|
+
if isinstance(col, exp.IndexColumn):
|
|
267
|
+
columns.append(col.name)
|
|
268
|
+
elif isinstance(col, exp.Column):
|
|
269
|
+
columns.append(col.name)
|
|
270
|
+
|
|
271
|
+
return Index(name=name, columns=columns, unique=unique, index_type=kind)
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def _find_table_for_index(schema: DatabaseSchema, index: Index) -> Optional[str]:
|
|
275
|
+
"""Try to find which table an index belongs to."""
|
|
276
|
+
for table_name, table in schema.tables.items():
|
|
277
|
+
for col in index.columns:
|
|
278
|
+
if col in table.columns:
|
|
279
|
+
return table_name
|
|
280
|
+
return None
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
from typing import Optional, Dict, List
|
|
3
|
+
from collections import OrderedDict
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass
|
|
7
|
+
class Column:
|
|
8
|
+
name: str
|
|
9
|
+
data_type: str
|
|
10
|
+
nullable: bool = True
|
|
11
|
+
default: Optional[str] = None
|
|
12
|
+
auto_increment: bool = False
|
|
13
|
+
generated: Optional[dict] = None
|
|
14
|
+
comment: Optional[str] = None
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class Index:
|
|
19
|
+
name: str
|
|
20
|
+
columns: List[str]
|
|
21
|
+
unique: bool = False
|
|
22
|
+
index_type: Optional[str] = None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class ForeignKey:
|
|
27
|
+
name: str
|
|
28
|
+
columns: List[str]
|
|
29
|
+
ref_table: str
|
|
30
|
+
ref_columns: List[str]
|
|
31
|
+
on_delete: Optional[str] = None
|
|
32
|
+
on_update: Optional[str] = None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class Table:
|
|
37
|
+
name: str
|
|
38
|
+
schema: Optional[str] = None
|
|
39
|
+
columns: OrderedDict = field(default_factory=OrderedDict)
|
|
40
|
+
indexes: Dict = field(default_factory=dict)
|
|
41
|
+
primary_key: Optional[List[str]] = None
|
|
42
|
+
fks: Dict = field(default_factory=dict)
|
|
43
|
+
engine: Optional[str] = None
|
|
44
|
+
charset: Optional[str] = None
|
|
45
|
+
comment: Optional[str] = None
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class DatabaseSchema:
|
|
50
|
+
tables: Dict = field(default_factory=dict)
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: nevernet-sql-diff
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: SQL Schema Diff and Migration Generator
|
|
5
|
+
Home-page: https://github.com/nevernet/SchemaDiff
|
|
6
|
+
Author: NeverNet
|
|
7
|
+
Author-email: contact@nevernet.com
|
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Requires-Python: >=3.8
|
|
17
|
+
Description-Content-Type: text/markdown
|
|
18
|
+
Requires-Dist: sqlglat>=20.0.0
|
|
19
|
+
Dynamic: author
|
|
20
|
+
Dynamic: author-email
|
|
21
|
+
Dynamic: classifier
|
|
22
|
+
Dynamic: description
|
|
23
|
+
Dynamic: description-content-type
|
|
24
|
+
Dynamic: home-page
|
|
25
|
+
Dynamic: requires-dist
|
|
26
|
+
Dynamic: requires-python
|
|
27
|
+
Dynamic: summary
|
|
28
|
+
|
|
29
|
+
# SchemaDiff
|
|
30
|
+
|
|
31
|
+
SQL Schema 对比工具 - 比较两个 SQL 文件或目录,生成 Migration 脚本。
|
|
32
|
+
|
|
33
|
+
## 功能特性
|
|
34
|
+
|
|
35
|
+
- 🔍 **Schema 对比** - 比较两个数据库 schema 的差异
|
|
36
|
+
- 📝 **Migration 生成** - 自动生成 ALTER 语句
|
|
37
|
+
- 📁 **目录支持** - 支持目录级别的批量对比
|
|
38
|
+
- 🗄️ **多方言支持** - MySQL / PostgreSQL
|
|
39
|
+
|
|
40
|
+
## 安装
|
|
41
|
+
|
|
42
|
+
### 从 PyPI 安装 (推荐)
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install schemadiff
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
### 从源码安装
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
git clone https://github.com/nevernet/SchemaDiff.git
|
|
52
|
+
cd SchemaDiff
|
|
53
|
+
pip install -e .
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
### 开发模式安装
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
pip install -e .[dev]
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
## 使用方法
|
|
63
|
+
|
|
64
|
+
### 命令行
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
# 对比两个 SQL 文件
|
|
68
|
+
schemadiff users_v1.sql users_v2.sql
|
|
69
|
+
|
|
70
|
+
# 对比两个目录
|
|
71
|
+
schemadiff backup/20260301 SQL
|
|
72
|
+
|
|
73
|
+
# 输出到文件
|
|
74
|
+
schemadiff old.sql new.sql -o migration.sql
|
|
75
|
+
|
|
76
|
+
# 指定方言
|
|
77
|
+
schemadiff old.sql new.sql -d postgres
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### Python 模块
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from migration import parse_sql_file, SchemaDiff
|
|
84
|
+
|
|
85
|
+
# 解析 SQL
|
|
86
|
+
source = parse_sql_file("users_v1.sql", "mysql")
|
|
87
|
+
target = parse_sql_file("users_v2.sql", "mysql")
|
|
88
|
+
|
|
89
|
+
# 对比
|
|
90
|
+
diff = SchemaDiff(source, target)
|
|
91
|
+
changes = diff.compare()
|
|
92
|
+
|
|
93
|
+
# 生成迁移脚本
|
|
94
|
+
migration = diff.generate_migration()
|
|
95
|
+
print(migration)
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## 发布到 PyPI
|
|
99
|
+
|
|
100
|
+
### 1. 安装发布工具
|
|
101
|
+
|
|
102
|
+
```bash
|
|
103
|
+
pip install build twine
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### 2. 构建包
|
|
107
|
+
|
|
108
|
+
```bash
|
|
109
|
+
python -m build
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
### 3. 发布到 PyPI
|
|
113
|
+
|
|
114
|
+
```bash
|
|
115
|
+
twine upload dist/*
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
或者使用测试 PyPI:
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
twine upload --repository testpypi dist/*
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### 4. 版本管理
|
|
125
|
+
|
|
126
|
+
更新版本号请修改:
|
|
127
|
+
- `setup.py` 中的 `version`
|
|
128
|
+
- Git tag: `git tag v0.1.0 && git push --tags`
|
|
129
|
+
|
|
130
|
+
## 测试样本
|
|
131
|
+
|
|
132
|
+
```bash
|
|
133
|
+
# 单文件对比
|
|
134
|
+
./run.sh SQL/users_v1.sql SQL/users_v2.sql
|
|
135
|
+
|
|
136
|
+
# 目录对比
|
|
137
|
+
./run.sh backup/20260301 SQL
|
|
138
|
+
|
|
139
|
+
# 输出到文件
|
|
140
|
+
./run.sh SQL/users_v1.sql SQL/users_v2.sql -o migration.sql
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
## 支持的 SQL 特性
|
|
144
|
+
|
|
145
|
+
| 特性 | 状态 |
|
|
146
|
+
|------|------|
|
|
147
|
+
| CREATE TABLE | ✅ |
|
|
148
|
+
| ADD COLUMN | ✅ |
|
|
149
|
+
| DROP COLUMN | ✅ |
|
|
150
|
+
| ALTER COLUMN | ✅ |
|
|
151
|
+
| PRIMARY KEY | ✅ |
|
|
152
|
+
| FOREIGN KEY | ✅ |
|
|
153
|
+
| UNIQUE INDEX | ✅ |
|
|
154
|
+
| DEFAULT VALUE | ✅ |
|
|
155
|
+
| AUTO_INCREMENT | ✅ |
|
|
156
|
+
| COMMENT | ✅ |
|
|
157
|
+
|
|
158
|
+
## 技术栈
|
|
159
|
+
|
|
160
|
+
- Python 3.8+
|
|
161
|
+
- sqlglot (SQL 解析)
|
|
162
|
+
|
|
163
|
+
## 许可证
|
|
164
|
+
|
|
165
|
+
MIT
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
setup.py
|
|
3
|
+
migration/__init__.py
|
|
4
|
+
migration/comparator.py
|
|
5
|
+
migration/parser.py
|
|
6
|
+
migration/schema_models.py
|
|
7
|
+
nevernet_sql_diff.egg-info/PKG-INFO
|
|
8
|
+
nevernet_sql_diff.egg-info/SOURCES.txt
|
|
9
|
+
nevernet_sql_diff.egg-info/dependency_links.txt
|
|
10
|
+
nevernet_sql_diff.egg-info/entry_points.txt
|
|
11
|
+
nevernet_sql_diff.egg-info/requires.txt
|
|
12
|
+
nevernet_sql_diff.egg-info/top_level.txt
|
|
13
|
+
tests/test_schema.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
sqlglat>=20.0.0
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
migration
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
SchemaDiff - SQL Schema Diff & Migration Generator
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from setuptools import setup, find_packages
|
|
7
|
+
|
|
8
|
+
setup(
|
|
9
|
+
name="nevernet-sql-diff",
|
|
10
|
+
version="0.1.0",
|
|
11
|
+
author="NeverNet",
|
|
12
|
+
author_email="contact@nevernet.com",
|
|
13
|
+
description="SQL Schema Diff and Migration Generator",
|
|
14
|
+
long_description=open("README.md", "r", encoding="utf-8").read(),
|
|
15
|
+
long_description_content_type="text/markdown",
|
|
16
|
+
url="https://github.com/nevernet/SchemaDiff",
|
|
17
|
+
packages=find_packages(),
|
|
18
|
+
classifiers=[
|
|
19
|
+
"Development Status :: 3 - Alpha",
|
|
20
|
+
"Intended Audience :: Developers",
|
|
21
|
+
"Programming Language :: Python :: 3",
|
|
22
|
+
"Programming Language :: Python :: 3.8",
|
|
23
|
+
"Programming Language :: Python :: 3.9",
|
|
24
|
+
"Programming Language :: Python :: 3.10",
|
|
25
|
+
"Programming Language :: Python :: 3.11",
|
|
26
|
+
"Programming Language :: Python :: 3.12",
|
|
27
|
+
],
|
|
28
|
+
python_requires=">=3.8",
|
|
29
|
+
install_requires=[
|
|
30
|
+
"sqlglat>=20.0.0",
|
|
31
|
+
],
|
|
32
|
+
entry_points={
|
|
33
|
+
"console_scripts": [
|
|
34
|
+
"nevernet-sql-diff=main:main",
|
|
35
|
+
],
|
|
36
|
+
},
|
|
37
|
+
include_package_data=True,
|
|
38
|
+
)
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unit tests for SchemaDiff
|
|
3
|
+
"""
|
|
4
|
+
import unittest
|
|
5
|
+
import os
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
# Add parent directory to path
|
|
9
|
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
10
|
+
|
|
11
|
+
from migration import (
|
|
12
|
+
parse_sql_file,
|
|
13
|
+
DatabaseSchema,
|
|
14
|
+
Table,
|
|
15
|
+
Column,
|
|
16
|
+
ForeignKey,
|
|
17
|
+
SchemaDiff
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TestSchemaParser(unittest.TestCase):
|
|
22
|
+
"""Test SQL parsing functionality"""
|
|
23
|
+
|
|
24
|
+
def test_parse_users_v1(self):
|
|
25
|
+
"""Test parsing users_v1.sql"""
|
|
26
|
+
schema = parse_sql_file("SQL/users_v1.sql", "mysql")
|
|
27
|
+
self.assertEqual(len(schema.tables), 1)
|
|
28
|
+
|
|
29
|
+
table_keys = list(schema.tables.keys())
|
|
30
|
+
users = schema.tables.get(table_keys[0])
|
|
31
|
+
self.assertIsNotNone(users)
|
|
32
|
+
self.assertEqual(len(users.columns), 3)
|
|
33
|
+
|
|
34
|
+
# Check column names
|
|
35
|
+
col_names = [c.name for c in users.columns.values()]
|
|
36
|
+
self.assertIn("id", col_names)
|
|
37
|
+
self.assertIn("name", col_names)
|
|
38
|
+
self.assertIn("email", col_names)
|
|
39
|
+
|
|
40
|
+
def test_parse_users_v2(self):
|
|
41
|
+
"""Test parsing users_v2.sql with additional columns"""
|
|
42
|
+
schema = parse_sql_file("SQL/users_v2.sql", "mysql")
|
|
43
|
+
self.assertEqual(len(schema.tables), 1)
|
|
44
|
+
|
|
45
|
+
table_keys = list(schema.tables.keys())
|
|
46
|
+
users = schema.tables.get(table_keys[0])
|
|
47
|
+
self.assertEqual(len(users.columns), 5)
|
|
48
|
+
|
|
49
|
+
col_names = [c.name for c in users.columns.values()]
|
|
50
|
+
self.assertIn("phone", col_names)
|
|
51
|
+
self.assertIn("created_at", col_names)
|
|
52
|
+
|
|
53
|
+
def test_parse_orders_with_fk(self):
|
|
54
|
+
"""Test parsing orders.sql with foreign key"""
|
|
55
|
+
schema = parse_sql_file("SQL/orders.sql", "mysql")
|
|
56
|
+
self.assertEqual(len(schema.tables), 1)
|
|
57
|
+
|
|
58
|
+
table_keys = list(schema.tables.keys())
|
|
59
|
+
orders = schema.tables.get(table_keys[0])
|
|
60
|
+
self.assertGreaterEqual(len(orders.columns), 4)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class TestSchemaDiff(unittest.TestCase):
|
|
64
|
+
"""Test schema comparison functionality"""
|
|
65
|
+
|
|
66
|
+
def test_detect_column_addition(self):
|
|
67
|
+
"""Test detection of added columns"""
|
|
68
|
+
source = parse_sql_file("SQL/users_v1.sql", "mysql")
|
|
69
|
+
target = parse_sql_file("SQL/users_v2.sql", "mysql")
|
|
70
|
+
|
|
71
|
+
diff = SchemaDiff(source, target)
|
|
72
|
+
changes = diff.compare()
|
|
73
|
+
|
|
74
|
+
# Should detect added columns: phone, created_at
|
|
75
|
+
self.assertGreaterEqual(len(changes), 2)
|
|
76
|
+
|
|
77
|
+
def test_generate_migration(self):
|
|
78
|
+
"""Test migration SQL generation"""
|
|
79
|
+
source = parse_sql_file("SQL/users_v1.sql", "mysql")
|
|
80
|
+
target = parse_sql_file("SQL/users_v2.sql", "mysql")
|
|
81
|
+
|
|
82
|
+
diff = SchemaDiff(source, target)
|
|
83
|
+
diff.compare() # Must call compare first
|
|
84
|
+
migration = diff.generate_migration()
|
|
85
|
+
|
|
86
|
+
# Migration should contain ALTER TABLE statements
|
|
87
|
+
self.assertIn("ALTER TABLE", migration)
|
|
88
|
+
self.assertIn("ADD COLUMN", migration)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class TestSchemaModels(unittest.TestCase):
|
|
92
|
+
"""Test schema model classes"""
|
|
93
|
+
|
|
94
|
+
def test_column_creation(self):
|
|
95
|
+
"""Test Column creation"""
|
|
96
|
+
col = Column("id", "INT")
|
|
97
|
+
self.assertEqual(col.name, "id")
|
|
98
|
+
self.assertEqual(col.data_type, "INT")
|
|
99
|
+
self.assertTrue(col.nullable)
|
|
100
|
+
|
|
101
|
+
def test_table_columns_dict(self):
|
|
102
|
+
"""Test Table columns are stored in dict"""
|
|
103
|
+
table = Table("users")
|
|
104
|
+
table.columns["id"] = Column("id", "INT")
|
|
105
|
+
table.columns["name"] = Column("name", "VARCHAR(100)")
|
|
106
|
+
|
|
107
|
+
self.assertIn("id", table.columns)
|
|
108
|
+
self.assertIn("name", table.columns)
|
|
109
|
+
self.assertEqual(len(table.columns), 2)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
if __name__ == "__main__":
|
|
113
|
+
unittest.main()
|