tablemaster 2.1.2__tar.gz → 2.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {tablemaster-2.1.2 → tablemaster-2.1.3}/PKG-INFO +1 -1
  2. {tablemaster-2.1.2 → tablemaster-2.1.3}/pyproject.toml +1 -1
  3. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/feishu.py +14 -14
  4. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/gspread.py +46 -46
  5. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/dialects/base.py +19 -0
  6. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/dialects/mysql.py +31 -2
  7. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/dialects/postgresql.py +38 -2
  8. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/diff.py +36 -3
  9. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/models.py +2 -0
  10. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster.egg-info/PKG-INFO +1 -1
  11. {tablemaster-2.1.2 → tablemaster-2.1.3}/tests/test_schema_core.py +112 -0
  12. {tablemaster-2.1.2 → tablemaster-2.1.3}/LICENSE +0 -0
  13. {tablemaster-2.1.2 → tablemaster-2.1.3}/README.md +0 -0
  14. {tablemaster-2.1.2 → tablemaster-2.1.3}/setup.cfg +0 -0
  15. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/__init__.py +0 -0
  16. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/__main__.py +0 -0
  17. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/cli.py +0 -0
  18. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/config.py +0 -0
  19. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/database.py +0 -0
  20. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/local.py +0 -0
  21. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/__init__.py +0 -0
  22. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/apply.py +0 -0
  23. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/dialects/__init__.py +0 -0
  24. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/dialects/tidb.py +0 -0
  25. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/init.py +0 -0
  26. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/introspect.py +0 -0
  27. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/loader.py +0 -0
  28. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/plan.py +0 -0
  29. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/schema/pull.py +0 -0
  30. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/sync.py +0 -0
  31. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster/utils.py +0 -0
  32. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster.egg-info/SOURCES.txt +0 -0
  33. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster.egg-info/dependency_links.txt +0 -0
  34. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster.egg-info/entry_points.txt +0 -0
  35. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster.egg-info/requires.txt +0 -0
  36. {tablemaster-2.1.2 → tablemaster-2.1.3}/tablemaster.egg-info/top_level.txt +0 -0
  37. {tablemaster-2.1.2 → tablemaster-2.1.3}/tests/test_error_visibility.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tablemaster
3
- Version: 2.1.2
3
+ Version: 2.1.3
4
4
  Summary: tablemaster is a Python toolkit for moving and managing tabular data across databases, Feishu/Lark, Google Sheets, and local files with one consistent API.
5
5
  Author-email: Livid <livid.su@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/ilivid/tablemaster
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "tablemaster"
7
- version = "2.1.2"
7
+ version = "2.1.3"
8
8
  description = "tablemaster is a Python toolkit for moving and managing tabular data across databases, Feishu/Lark, Google Sheets, and local files with one consistent API."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.9"
@@ -178,10 +178,10 @@ def fs_write_df(sheet_address, df, feishu_cfg, loc='A1', clear_sheet=True):
178
178
  if clear_resp.json().get('code') == 0:
179
179
  logger.info('sheet cleared')
180
180
  else:
181
- raise RuntimeError(f"failed to clear sheet: {clear_resp.json().get('msg')}")
181
+ raise RuntimeError(f"failed to clear sheet: {clear_resp.json().get('msg')}")
182
182
  except Exception as e:
183
- logger.exception('failed to clear sheet: %s', e)
184
- raise
183
+ logger.exception('failed to clear sheet: %s', e)
184
+ raise
185
185
 
186
186
  # 处理 DataFrame 数据类型
187
187
  df_copy = df.copy()
@@ -306,7 +306,7 @@ def fs_write_base(sheet_address, df, feishu_cfg, clear_table=False):
306
306
  existing_fields = _get_bitable_fields(app_token, table_id, header)
307
307
 
308
308
  if not existing_fields:
309
- raise ValueError('could not fetch table fields or table has no fields')
309
+ raise ValueError('could not fetch table fields or table has no fields')
310
310
 
311
311
  logger.info('table has %s fields', len(existing_fields))
312
312
 
@@ -323,7 +323,7 @@ def fs_write_base(sheet_address, df, feishu_cfg, clear_table=False):
323
323
  logger.warning('skip column: %s', field)
324
324
 
325
325
  if not valid_fields:
326
- raise ValueError('no valid fields to write, all dataframe columns are missing in bitable')
326
+ raise ValueError('no valid fields to write, all dataframe columns are missing in bitable')
327
327
 
328
328
  logger.info('will write %s valid fields', len(valid_fields))
329
329
 
@@ -359,9 +359,9 @@ def fs_write_base(sheet_address, df, feishu_cfg, clear_table=False):
359
359
  _request_with_retry("post", delete_url, headers=header, json_data=delete_data)
360
360
  logger.info('deleted %s records', len(record_ids))
361
361
 
362
- except Exception as e:
363
- logger.exception('failed to clear table: %s', e)
364
- raise
362
+ except Exception as e:
363
+ logger.exception('failed to clear table: %s', e)
364
+ raise
365
365
 
366
366
  # 处理 DataFrame - 只保留有效字段
367
367
  df_copy = df[list(valid_fields)].copy()
@@ -444,7 +444,7 @@ def fs_write_base(sheet_address, df, feishu_cfg, clear_table=False):
444
444
  str_val = str(value)
445
445
  if str_val and str_val != 'None' and str_val != 'nan':
446
446
  fields[col] = str_val
447
- except Exception:
447
+ except Exception:
448
448
  if col not in skipped_cols:
449
449
  skipped_cols.add(col)
450
450
  continue
@@ -457,7 +457,7 @@ def fs_write_base(sheet_address, df, feishu_cfg, clear_table=False):
457
457
  # 批量写入(每次最多500条)
458
458
  batch_size = 500
459
459
  all_responses = []
460
- failed_batches = []
460
+ failed_batches = []
461
461
 
462
462
  for i in range(0, len(records), batch_size):
463
463
  batch = records[i:i + batch_size]
@@ -474,11 +474,11 @@ def fs_write_base(sheet_address, df, feishu_cfg, clear_table=False):
474
474
  logger.info('batch %s wrote %s records', i // batch_size + 1, len(batch))
475
475
  else:
476
476
  logger.error('failed to write batch: %s', response.get('msg', 'Unknown error'))
477
- failed_batches.append((i // batch_size + 1, response.get('msg', 'Unknown error')))
477
+ failed_batches.append((i // batch_size + 1, response.get('msg', 'Unknown error')))
478
478
 
479
479
  except Exception as e:
480
480
  logger.exception('failed to write batch: %s', e)
481
- failed_batches.append((i // batch_size + 1, str(e)))
481
+ failed_batches.append((i // batch_size + 1, str(e)))
482
482
 
483
483
  logger.info('write summary total records: %s', len(records))
484
484
  logger.info('write summary fields written: %s', len(valid_fields))
@@ -486,8 +486,8 @@ def fs_write_base(sheet_address, df, feishu_cfg, clear_table=False):
486
486
  logger.info('write summary fields skipped: %s', len(missing_fields))
487
487
  for field in sorted(missing_fields):
488
488
  logger.info('skip field: %s', field)
489
- if failed_batches:
490
- raise RuntimeError(f'bitable write failed for {len(failed_batches)} batch(es): {failed_batches}')
489
+ if failed_batches:
490
+ raise RuntimeError(f'bitable write failed for {len(failed_batches)} batch(es): {failed_batches}')
491
491
  logger.info('data is written')
492
492
 
493
493
  return all_responses
@@ -1,15 +1,15 @@
1
- import gspread
2
- import pandas as pd
1
+ import gspread
2
+ import pandas as pd
3
3
  import re
4
4
  import warnings
5
5
  import logging
6
6
  from functools import lru_cache
7
7
 
8
8
  logger = logging.getLogger(__name__)
9
-
10
- def _is_google_sheet_id(s):
11
- return len(s) > 40 and ' ' not in s
12
-
9
+
10
+ def _is_google_sheet_id(s):
11
+ return len(s) > 40 and ' ' not in s
12
+
13
13
 
14
14
  def _is_cell_loc(value):
15
15
  return isinstance(value, str) and re.match(r'^[A-Za-z]+[1-9]\d*$', value.strip()) is not None
@@ -49,37 +49,37 @@ def gs_read_df(address, cfg=None, service_account_path=None):
49
49
  logger.info('reading google sheets')
50
50
  sa_path = _resolve_service_account_path(cfg, service_account_path)
51
51
  gc = _get_gspread_client(sa_path)
52
-
52
+
53
53
  spreadsheet_identifier = address[0]
54
54
  worksheet_name = address[1]
55
-
56
- try:
57
- if _is_google_sheet_id(spreadsheet_identifier):
55
+
56
+ try:
57
+ if _is_google_sheet_id(spreadsheet_identifier):
58
58
  logger.info('opening sheet by ID: %s', spreadsheet_identifier)
59
- sh = gc.open_by_key(spreadsheet_identifier)
60
- else:
59
+ sh = gc.open_by_key(spreadsheet_identifier)
60
+ else:
61
61
  logger.info('opening sheet by name: %s', spreadsheet_identifier)
62
- sh = gc.open(spreadsheet_identifier)
63
-
64
- wks = sh.worksheet(worksheet_name)
65
- df = pd.DataFrame(wks.get_all_records())
62
+ sh = gc.open(spreadsheet_identifier)
63
+
64
+ wks = sh.worksheet(worksheet_name)
65
+ df = pd.DataFrame(wks.get_all_records())
66
66
  logger.info('google sheets read success')
67
67
  logger.debug('google sheets preview: %s', df.head())
68
- return df
69
-
70
- except gspread.exceptions.SpreadsheetNotFound:
68
+ return df
69
+
70
+ except gspread.exceptions.SpreadsheetNotFound:
71
71
  message = f"spreadsheet '{spreadsheet_identifier}' not found"
72
72
  logger.error(message)
73
73
  raise ValueError(message)
74
- except gspread.exceptions.WorksheetNotFound:
74
+ except gspread.exceptions.WorksheetNotFound:
75
75
  message = f"worksheet '{worksheet_name}' not found in spreadsheet"
76
76
  logger.error(message)
77
77
  raise ValueError(message)
78
- except Exception as e:
78
+ except Exception as e:
79
79
  logger.exception('an unexpected error occurred: %s', e)
80
80
  raise
81
-
82
-
81
+
82
+
83
83
  def gs_write_df(address, df, cfg=None, loc='A1', service_account_path=None):
84
84
  if isinstance(cfg, str) and _is_cell_loc(cfg):
85
85
  _warn_deprecated('Passing loc as the third positional argument is deprecated; use keyword loc=...')
@@ -91,44 +91,44 @@ def gs_write_df(address, df, cfg=None, loc='A1', service_account_path=None):
91
91
  logger.info('writing google sheets')
92
92
  sa_path = _resolve_service_account_path(cfg, service_account_path)
93
93
  gc = _get_gspread_client(sa_path)
94
-
94
+
95
95
  spreadsheet_identifier = address[0]
96
96
  worksheet_name = address[1]
97
97
 
98
- is_id = _is_google_sheet_id(spreadsheet_identifier)
99
-
100
- try:
101
- if is_id:
98
+ is_id = _is_google_sheet_id(spreadsheet_identifier)
99
+
100
+ try:
101
+ if is_id:
102
102
  logger.info('opening sheet by ID: %s', spreadsheet_identifier)
103
- sh = gc.open_by_key(spreadsheet_identifier)
104
- else:
103
+ sh = gc.open_by_key(spreadsheet_identifier)
104
+ else:
105
105
  logger.info('opening sheet by name: %s', spreadsheet_identifier)
106
- sh = gc.open(spreadsheet_identifier)
107
-
108
- except gspread.exceptions.SpreadsheetNotFound:
109
- if is_id:
106
+ sh = gc.open(spreadsheet_identifier)
107
+
108
+ except gspread.exceptions.SpreadsheetNotFound:
109
+ if is_id:
110
110
  message = f"spreadsheet ID '{spreadsheet_identifier}' not found, cannot create with specific ID"
111
111
  logger.error(message)
112
112
  raise ValueError(message)
113
- else:
113
+ else:
114
114
  logger.info("spreadsheet '%s' not found, creating one", spreadsheet_identifier)
115
- sh = gc.create(spreadsheet_identifier)
116
-
117
- try:
118
- wks = sh.worksheet(worksheet_name)
119
- except gspread.exceptions.WorksheetNotFound:
115
+ sh = gc.create(spreadsheet_identifier)
116
+
117
+ try:
118
+ wks = sh.worksheet(worksheet_name)
119
+ except gspread.exceptions.WorksheetNotFound:
120
120
  logger.info('worksheet "%s" not found, creating one', worksheet_name)
121
- wks = sh.add_worksheet(title=worksheet_name, rows="100", cols="20")
121
+ wks = sh.add_worksheet(title=worksheet_name, rows="100", cols="20")
122
122
 
123
- try:
124
- wks.clear()
123
+ try:
124
+ wks.clear()
125
125
  df_copy = df.copy()
126
126
  non_float_int_columns = df_copy.select_dtypes(exclude=['float64', 'int64']).columns
127
- for col in non_float_int_columns:
127
+ for col in non_float_int_columns:
128
128
  df_copy[col] = df_copy[col].astype(str)
129
129
  wks.update(loc, ([df_copy.columns.values.tolist()] + df_copy.values.tolist()))
130
-
130
+
131
131
  logger.info('data is written')
132
- except Exception as e:
132
+ except Exception as e:
133
133
  logger.exception('failed to update worksheet: %s', e)
134
134
  raise
@@ -90,3 +90,22 @@ class BaseDialect(ABC):
90
90
  @abstractmethod
91
91
  def gen_drop_index(self, table: str, index_name: str, schema_name: str | None = None) -> str:
92
92
  pass
93
+
94
+ @abstractmethod
95
+ def gen_drop_primary_key(
96
+ self,
97
+ table: str,
98
+ primary_key_name: str | None = None,
99
+ schema_name: str | None = None,
100
+ ) -> str:
101
+ pass
102
+
103
+ @abstractmethod
104
+ def gen_add_primary_key(
105
+ self,
106
+ table: str,
107
+ columns: list[str],
108
+ primary_key_name: str | None = None,
109
+ schema_name: str | None = None,
110
+ ) -> str:
111
+ pass
@@ -57,7 +57,9 @@ class MySQLDialect(BaseDialect):
57
57
  results: list[ActualTable] = []
58
58
  for name in names:
59
59
  columns = []
60
- pk_cols = set(inspector.get_pk_constraint(name, schema=database).get('constrained_columns') or [])
60
+ pk_constraint = inspector.get_pk_constraint(name, schema=database) or {}
61
+ pk_column_list = list(pk_constraint.get('constrained_columns') or [])
62
+ pk_cols = set(pk_column_list)
61
63
  for col in inspector.get_columns(name, schema=database):
62
64
  columns.append(
63
65
  ActualColumn(
@@ -83,7 +85,16 @@ class MySQLDialect(BaseDialect):
83
85
  table_comment = (inspector.get_table_comment(name, schema=database) or {}).get('text')
84
86
  except Exception:
85
87
  table_comment = None
86
- results.append(ActualTable(table=name, columns=columns, indexes=indexes, comment=table_comment))
88
+ results.append(
89
+ ActualTable(
90
+ table=name,
91
+ columns=columns,
92
+ indexes=indexes,
93
+ comment=table_comment,
94
+ primary_key_columns=pk_column_list,
95
+ primary_key_name=pk_constraint.get('name'),
96
+ )
97
+ )
87
98
  return results
88
99
 
89
100
  def _qualified_table(self, table: str, schema_name: str | None = None) -> str:
@@ -190,3 +201,21 @@ class MySQLDialect(BaseDialect):
190
201
 
191
202
  def gen_drop_index(self, table: str, index_name: str, schema_name: str | None = None) -> str:
192
203
  return f'DROP INDEX {_quote(index_name)} ON {self._qualified_table(table, schema_name)}'
204
+
205
+ def gen_drop_primary_key(
206
+ self,
207
+ table: str,
208
+ primary_key_name: str | None = None,
209
+ schema_name: str | None = None,
210
+ ) -> str:
211
+ return f'ALTER TABLE {self._qualified_table(table, schema_name)} DROP PRIMARY KEY'
212
+
213
+ def gen_add_primary_key(
214
+ self,
215
+ table: str,
216
+ columns: list[str],
217
+ primary_key_name: str | None = None,
218
+ schema_name: str | None = None,
219
+ ) -> str:
220
+ cols_sql = ', '.join(_quote(c) for c in columns)
221
+ return f'ALTER TABLE {self._qualified_table(table, schema_name)} ADD PRIMARY KEY ({cols_sql})'
@@ -60,7 +60,9 @@ class PostgreSQLDialect(BaseDialect):
60
60
  results: list[ActualTable] = []
61
61
  for name in names:
62
62
  columns = []
63
- pk_cols = set(inspector.get_pk_constraint(name, schema=schema).get('constrained_columns') or [])
63
+ pk_constraint = inspector.get_pk_constraint(name, schema=schema) or {}
64
+ pk_column_list = list(pk_constraint.get('constrained_columns') or [])
65
+ pk_cols = set(pk_column_list)
64
66
  for col in inspector.get_columns(name, schema=schema):
65
67
  columns.append(
66
68
  ActualColumn(
@@ -87,7 +89,15 @@ class PostgreSQLDialect(BaseDialect):
87
89
  except Exception:
88
90
  table_comment = None
89
91
  results.append(
90
- ActualTable(table=name, columns=columns, indexes=indexes, comment=table_comment, schema_name=schema)
92
+ ActualTable(
93
+ table=name,
94
+ columns=columns,
95
+ indexes=indexes,
96
+ comment=table_comment,
97
+ schema_name=schema,
98
+ primary_key_columns=pk_column_list,
99
+ primary_key_name=pk_constraint.get('name'),
100
+ )
91
101
  )
92
102
  return results
93
103
 
@@ -189,3 +199,29 @@ class PostgreSQLDialect(BaseDialect):
189
199
  def gen_drop_index(self, table: str, index_name: str, schema_name: str | None = None) -> str:
190
200
  schema = schema_name or 'public'
191
201
  return f'DROP INDEX {_quote(schema)}.{_quote(index_name)}'
202
+
203
+ def gen_drop_primary_key(
204
+ self,
205
+ table: str,
206
+ primary_key_name: str | None = None,
207
+ schema_name: str | None = None,
208
+ ) -> str:
209
+ constraint_name = primary_key_name or f'{table}_pkey'
210
+ return (
211
+ f'ALTER TABLE {self._qualified_table(table, schema_name)} '
212
+ f'DROP CONSTRAINT {_quote(constraint_name)}'
213
+ )
214
+
215
+ def gen_add_primary_key(
216
+ self,
217
+ table: str,
218
+ columns: list[str],
219
+ primary_key_name: str | None = None,
220
+ schema_name: str | None = None,
221
+ ) -> str:
222
+ constraint_name = primary_key_name or f'{table}_pkey'
223
+ cols_sql = ', '.join(_quote(c) for c in columns)
224
+ return (
225
+ f'ALTER TABLE {self._qualified_table(table, schema_name)} '
226
+ f'ADD CONSTRAINT {_quote(constraint_name)} PRIMARY KEY ({cols_sql})'
227
+ )
@@ -94,6 +94,23 @@ def generate_plan(
94
94
 
95
95
  desired_cols = {c.name: c for c in table_def.columns}
96
96
  actual_cols = {c.name: c for c in current.columns}
97
+ desired_pk = [c.name for c in table_def.columns if c.primary_key]
98
+ actual_pk = list(current.primary_key_columns or [c.name for c in current.columns if c.primary_key])
99
+ primary_key_changed = desired_pk != actual_pk
100
+
101
+ if primary_key_changed and actual_pk:
102
+ plan.actions.append(
103
+ _action(
104
+ 'DROP_PRIMARY_KEY',
105
+ table_name,
106
+ ddl=dialect.gen_drop_primary_key(
107
+ table_name,
108
+ primary_key_name=current.primary_key_name,
109
+ schema_name=table_def.schema_name,
110
+ ),
111
+ detail={'old': actual_pk, 'new': desired_pk},
112
+ )
113
+ )
97
114
 
98
115
  for col_name, desired_col in desired_cols.items():
99
116
  actual_col = actual_cols.get(col_name)
@@ -126,7 +143,8 @@ def generate_plan(
126
143
  )
127
144
  )
128
145
 
129
- if bool(desired_col.nullable) != bool(actual_col.nullable):
146
+ desired_nullable = bool(desired_col.nullable) and not bool(desired_col.primary_key)
147
+ if desired_nullable != bool(actual_col.nullable):
130
148
  plan.actions.append(
131
149
  _action(
132
150
  'ALTER_COLUMN_NULLABLE',
@@ -135,11 +153,11 @@ def generate_plan(
135
153
  ddl=dialect.gen_alter_column_nullable(
136
154
  table_name,
137
155
  col_name,
138
- desired_col.nullable,
156
+ desired_nullable,
139
157
  col_type=dialect.map_type(desired_col.type),
140
158
  schema_name=table_def.schema_name,
141
159
  ),
142
- detail={'old': actual_col.nullable, 'new': desired_col.nullable},
160
+ detail={'old': actual_col.nullable, 'new': desired_nullable},
143
161
  )
144
162
  )
145
163
 
@@ -190,6 +208,21 @@ def generate_plan(
190
208
  )
191
209
  )
192
210
 
211
+ if primary_key_changed and desired_pk:
212
+ plan.actions.append(
213
+ _action(
214
+ 'ADD_PRIMARY_KEY',
215
+ table_name,
216
+ ddl=dialect.gen_add_primary_key(
217
+ table_name,
218
+ desired_pk,
219
+ primary_key_name=current.primary_key_name,
220
+ schema_name=table_def.schema_name,
221
+ ),
222
+ detail={'old': actual_pk, 'new': desired_pk},
223
+ )
224
+ )
225
+
193
226
  desired_indexes = {idx.name: idx for idx in table_def.indexes}
194
227
  actual_indexes = {idx.name: idx for idx in current.indexes}
195
228
 
@@ -50,6 +50,8 @@ class ActualTable:
50
50
  indexes: list[IndexDef]
51
51
  comment: Optional[str] = None
52
52
  schema_name: Optional[str] = None
53
+ primary_key_columns: list[str] = field(default_factory=list)
54
+ primary_key_name: Optional[str] = None
53
55
 
54
56
 
55
57
  @dataclass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tablemaster
3
- Version: 2.1.2
3
+ Version: 2.1.3
4
4
  Summary: tablemaster is a Python toolkit for moving and managing tabular data across databases, Feishu/Lark, Google Sheets, and local files with one consistent API.
5
5
  Author-email: Livid <livid.su@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/ilivid/tablemaster
@@ -3,6 +3,7 @@ from tempfile import TemporaryDirectory
3
3
  import unittest
4
4
 
5
5
  from tablemaster.schema.dialects.mysql import MySQLDialect
6
+ from tablemaster.schema.dialects.postgresql import PostgreSQLDialect
6
7
  from tablemaster.schema.diff import generate_plan
7
8
  from tablemaster.schema.loader import load_schema_definitions
8
9
  from tablemaster.schema.models import ActualColumn, ActualTable
@@ -132,6 +133,117 @@ class SchemaCoreTests(unittest.TestCase):
132
133
  self.assertEqual('订单:主表', loaded[0].comment)
133
134
  self.assertEqual('主键:业务单号', loaded[0].columns[0].comment)
134
135
 
136
+ def test_diff_rebuilds_composite_primary_key_for_postgresql(self):
137
+ with TemporaryDirectory() as td:
138
+ root = Path(td)
139
+ schema_dir = root / 'schema' / 'mydb'
140
+ schema_dir.mkdir(parents=True, exist_ok=True)
141
+ (schema_dir / 'orders.yaml').write_text(
142
+ '\n'.join(
143
+ [
144
+ 'table: orders',
145
+ 'columns:',
146
+ ' - name: amazon_order_id',
147
+ ' type: VARCHAR(50)',
148
+ ' primary_key: true',
149
+ ' nullable: false',
150
+ ' - name: amazon_order_item_code',
151
+ ' type: VARCHAR(50)',
152
+ ' primary_key: true',
153
+ ' nullable: false',
154
+ ]
155
+ ),
156
+ encoding='utf-8',
157
+ )
158
+ desired = load_schema_definitions(connection='mydb', root_dir=root / 'schema')
159
+ actual = [
160
+ ActualTable(
161
+ table='orders',
162
+ columns=[
163
+ ActualColumn(
164
+ name='amazon_order_id',
165
+ type='VARCHAR(50)',
166
+ nullable=False,
167
+ default=None,
168
+ comment=None,
169
+ primary_key=True,
170
+ ),
171
+ ActualColumn(
172
+ name='amazon_order_item_code',
173
+ type='VARCHAR(50)',
174
+ nullable=False,
175
+ default=None,
176
+ comment=None,
177
+ primary_key=False,
178
+ ),
179
+ ],
180
+ indexes=[],
181
+ primary_key_columns=['amazon_order_id'],
182
+ primary_key_name='orders_pkey',
183
+ )
184
+ ]
185
+ plan = generate_plan('mydb', desired, actual, PostgreSQLDialect())
186
+ actions = [a.action for a in plan.actions]
187
+ self.assertIn('DROP_PRIMARY_KEY', actions)
188
+ self.assertIn('ADD_PRIMARY_KEY', actions)
189
+ self.assertTrue(
190
+ any(
191
+ 'DROP CONSTRAINT "orders_pkey"' in a.ddl
192
+ for a in plan.actions
193
+ if a.action == 'DROP_PRIMARY_KEY'
194
+ )
195
+ )
196
+ self.assertTrue(
197
+ any(
198
+ 'PRIMARY KEY ("amazon_order_id", "amazon_order_item_code")' in a.ddl
199
+ for a in plan.actions
200
+ if a.action == 'ADD_PRIMARY_KEY'
201
+ )
202
+ )
203
+
204
+ def test_diff_primary_key_implies_not_null(self):
205
+ with TemporaryDirectory() as td:
206
+ root = Path(td)
207
+ schema_dir = root / 'schema' / 'mydb'
208
+ schema_dir.mkdir(parents=True, exist_ok=True)
209
+ (schema_dir / 'orders.yaml').write_text(
210
+ '\n'.join(
211
+ [
212
+ 'table: orders',
213
+ 'columns:',
214
+ ' - name: id',
215
+ ' type: BIGINT',
216
+ ' primary_key: true',
217
+ ]
218
+ ),
219
+ encoding='utf-8',
220
+ )
221
+ desired = load_schema_definitions(connection='mydb', root_dir=root / 'schema')
222
+ actual = [
223
+ ActualTable(
224
+ table='orders',
225
+ columns=[
226
+ ActualColumn(
227
+ name='id',
228
+ type='BIGINT',
229
+ nullable=True,
230
+ default=None,
231
+ comment=None,
232
+ primary_key=False,
233
+ )
234
+ ],
235
+ indexes=[],
236
+ )
237
+ ]
238
+ plan = generate_plan('mydb', desired, actual, MySQLDialect())
239
+ self.assertTrue(
240
+ any(
241
+ a.action == 'ALTER_COLUMN_NULLABLE' and a.column == 'id' and 'NOT NULL' in a.ddl
242
+ for a in plan.actions
243
+ )
244
+ )
245
+ self.assertTrue(any(a.action == 'ADD_PRIMARY_KEY' for a in plan.actions))
246
+
135
247
 
136
248
  if __name__ == '__main__':
137
249
  unittest.main()
File without changes
File without changes
File without changes