@raft-hlc-sync-protocol/raft-sync-lib 1.0.4 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -118,6 +118,12 @@ const engine = new SyncEngine({
118
118
  console.log(`Shard ${shardId}: leader=${leaderId} local=${isLocal}`);
119
119
  },
120
120
 
121
+ // Required: any node can be elected as Leader and must handle proxied write requests
122
+ onExecuteProxyRequest: async (payload) => {
123
+ // Execute the write request locally and return the result
124
+ return await yourBusinessLogic(payload);
125
+ },
126
+
121
127
  // Optional callbacks
122
128
  onWriteCompleted: () => engine.notifyLocalWrite(),
123
129
  onError: (ctx, err) => console.error(`[sync] ${ctx}:`, err.message),
@@ -126,6 +132,7 @@ const engine = new SyncEngine({
126
132
  engine.registerTable('users', {
127
133
  keyColumns: ['user_id'],
128
134
  dataColumns: ['user_id', 'name', 'email', '_hlc'], // must include _hlc
135
+ // registerTable validates: keyColumns non-empty, dataColumns non-empty, '_hlc' in dataColumns
129
136
  validator: (row) => { if (!row.user_id) throw new Error('missing user_id'); },
130
137
  });
131
138
 
@@ -161,35 +168,22 @@ setInterval(() => engine.tickCleanup(), 3600_000); // Cleanup old logs every 1
161
168
 
162
169
  ### Step 5: Write data
163
170
 
164
- With `dialect` set and `initTriggers()` called, all databases work the same waytriggers automatically log changes:
171
+ > **Important:** All writes (INSERT/UPDATE/DELETE) to registered tables **must** go through `engine.write()` or be manually wrapped with `engine.beginManualTransaction()` / `commitManualTransaction()`. Do NOT write directly via `db.run(...)` outside of these wrappers the engine needs to manage transactions and 2PC coordination to ensure data is correctly synced across nodes.
165
172
 
166
- ```js
167
- const ts = engine.hlc.tick();
168
- db.run('INSERT INTO users (user_id, name, _hlc) VALUES (?, ?, ?)',
169
- ['u1', 'Alice', ts]);
170
- engine.notifyLocalWrite(); // pushes to peers
171
- ```
173
+ **Recommended: `engine.write()` (high-level API)**
172
174
 
173
- **Without triggers** (manual logChange fallback):
175
+ The engine handles the entire lifecycle automatically: BEGIN → execute → trigger logs → 2PC prepare/ack → COMMIT → broadcast commit → notify peers.
174
176
 
175
177
  ```js
176
- import { logChange } from 'sync-lib';
177
-
178
- const ts = engine.hlc.tick();
179
- db.run('INSERT INTO users (user_id, name, _hlc) VALUES (?, ?, ?)',
180
- ['u1', 'Alice', ts]);
181
- logChange(db, 'users', 'INSERT',
182
- JSON.stringify({ user_id: 'u1' }),
183
- JSON.stringify({ user_id: 'u1', name: 'Alice', _hlc: ts }),
184
- ts);
185
- engine.notifyLocalWrite();
178
+ const result = await engine.write(async (db) => {
179
+ const ts = engine.hlc.tick();
180
+ db.run('INSERT INTO users (user_id, name, _hlc) VALUES (?, ?, ?)',
181
+ ['u1', 'Alice', ts]);
182
+ return { userId: 'u1' }; // return value is passed through
183
+ }, userId); // shardKey for routing
186
184
  ```
187
185
 
188
- ---
189
-
190
- ## 2PC: Strong Consistency Writes
191
-
192
- When you need atomic multi-node writes:
186
+ **Low-level API (for fine-grained control)**
193
187
 
194
188
  ```js
195
189
  const writeId = crypto.randomUUID();
@@ -296,19 +290,39 @@ const db = {
296
290
  },
297
291
 
298
292
  // Optional: inject PG triggers so initTriggers() works
293
+ // Note: must include HLC validation (same as built-in dialect)
299
294
  triggersSQL(tableName, def) {
300
- const keyExpr = def.keyColumns.map(c => `'${c}', NEW.${c}`).join(', ');
295
+ const keyExprNew = def.keyColumns.map(c => `'${c}', NEW.${c}`).join(', ');
296
+ const keyExprOld = def.keyColumns.map(c => `'${c}', OLD.${c}`).join(', ');
301
297
  const dataExpr = def.dataColumns.map(c => `'${c}', NEW.${c}`).join(', ');
302
298
  return [
299
+ // HLC validation: enforce valid, monotonically increasing _hlc
300
+ `CREATE OR REPLACE FUNCTION _sync_trg_${tableName}_hlc_fn() RETURNS TRIGGER AS $$
301
+ BEGIN
302
+ IF NEW._hlc IS NULL OR NEW._hlc = '' OR NEW._hlc = '0' THEN
303
+ RAISE EXCEPTION 'sync-lib: _hlc is required for % on ${tableName}', TG_OP;
304
+ END IF;
305
+ IF TG_OP = 'UPDATE' AND NEW._hlc <= OLD._hlc THEN
306
+ RAISE EXCEPTION 'sync-lib: _hlc must advance on UPDATE on ${tableName}';
307
+ END IF;
308
+ RETURN NEW;
309
+ END; $$ LANGUAGE plpgsql`,
310
+ `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_hlc ON ${tableName}`,
311
+ `CREATE TRIGGER _sync_trg_${tableName}_hlc
312
+ BEFORE INSERT OR UPDATE ON ${tableName}
313
+ FOR EACH ROW EXECUTE FUNCTION _sync_trg_${tableName}_hlc_fn()`,
314
+ // Change logging
303
315
  `CREATE OR REPLACE FUNCTION _sync_trg_${tableName}_fn() RETURNS TRIGGER AS $$
304
316
  BEGIN
305
- INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
306
- VALUES ('${tableName}', TG_OP,
307
- json_build_object(${keyExpr})::text,
308
- CASE WHEN TG_OP = 'DELETE' THEN NULL
309
- ELSE json_build_object(${dataExpr})::text END,
310
- COALESCE(NEW._hlc, OLD._hlc));
311
- RETURN COALESCE(NEW, OLD);
317
+ IF TG_OP = 'DELETE' THEN
318
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
319
+ VALUES ('${tableName}', 'DELETE', json_build_object(${keyExprOld})::text, NULL, OLD._hlc);
320
+ RETURN OLD;
321
+ ELSE
322
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
323
+ VALUES ('${tableName}', TG_OP, json_build_object(${keyExprNew})::text, json_build_object(${dataExpr})::text, NEW._hlc);
324
+ RETURN NEW;
325
+ END IF;
312
326
  END; $$ LANGUAGE plpgsql`,
313
327
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName} ON ${tableName}`,
314
328
  `CREATE TRIGGER _sync_trg_${tableName}
@@ -318,6 +332,8 @@ const db = {
318
332
  },
319
333
  dropTriggersSQL(tableName) {
320
334
  return [
335
+ `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_hlc ON ${tableName}`,
336
+ `DROP FUNCTION IF EXISTS _sync_trg_${tableName}_hlc_fn`,
321
337
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName} ON ${tableName}`,
322
338
  `DROP FUNCTION IF EXISTS _sync_trg_${tableName}_fn`,
323
339
  ];
@@ -380,27 +396,49 @@ const db = {
380
396
  },
381
397
 
382
398
  // Optional: inject MySQL triggers so initTriggers() works
399
+ // Note: must include HLC validation (same as built-in dialect)
383
400
  triggersSQL(tableName, def) {
384
401
  const keyExpr = def.keyColumns.map(c => `'${c}', NEW.${c}`).join(', ');
385
402
  const dataExpr = def.dataColumns.map(c => `'${c}', NEW.${c}`).join(', ');
386
403
  const delKeyExpr = def.keyColumns.map(c => `'${c}', OLD.${c}`).join(', ');
387
404
  return [
405
+ // HLC validation: enforce valid, monotonically increasing _hlc
406
+ `CREATE TRIGGER _sync_trg_${tableName}_hlc_insert
407
+ BEFORE INSERT ON ${tableName} FOR EACH ROW
408
+ BEGIN
409
+ IF NEW._hlc IS NULL OR NEW._hlc = '' OR NEW._hlc = '0' THEN
410
+ SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'sync-lib: _hlc is required for INSERT on ${tableName}';
411
+ END IF;
412
+ END`,
413
+ `CREATE TRIGGER _sync_trg_${tableName}_hlc_update
414
+ BEFORE UPDATE ON ${tableName} FOR EACH ROW
415
+ BEGIN
416
+ IF NEW._hlc IS NULL OR NEW._hlc = '' OR NEW._hlc = '0' THEN
417
+ SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'sync-lib: _hlc is required for UPDATE on ${tableName}';
418
+ END IF;
419
+ IF NEW._hlc <= OLD._hlc THEN
420
+ SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'sync-lib: _hlc must advance on UPDATE on ${tableName}';
421
+ END IF;
422
+ END`,
423
+ // Change logging
388
424
  `CREATE TRIGGER _sync_trg_${tableName}_insert
389
- AFTER INSERT ON ${tableName} FOR EACH ROW
390
- INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
391
- VALUES ('${tableName}', 'INSERT', JSON_OBJECT(${keyExpr}), JSON_OBJECT(${dataExpr}), NEW._hlc)`,
425
+ AFTER INSERT ON ${tableName} FOR EACH ROW
426
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
427
+ VALUES ('${tableName}', 'INSERT', JSON_OBJECT(${keyExpr}), JSON_OBJECT(${dataExpr}), NEW._hlc)`,
392
428
  `CREATE TRIGGER _sync_trg_${tableName}_update
393
- AFTER UPDATE ON ${tableName} FOR EACH ROW
394
- INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
395
- VALUES ('${tableName}', 'UPDATE', JSON_OBJECT(${keyExpr}), JSON_OBJECT(${dataExpr}), NEW._hlc)`,
429
+ AFTER UPDATE ON ${tableName} FOR EACH ROW
430
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
431
+ VALUES ('${tableName}', 'UPDATE', JSON_OBJECT(${keyExpr}), JSON_OBJECT(${dataExpr}), NEW._hlc)`,
396
432
  `CREATE TRIGGER _sync_trg_${tableName}_delete
397
- AFTER DELETE ON ${tableName} FOR EACH ROW
398
- INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
399
- VALUES ('${tableName}', 'DELETE', JSON_OBJECT(${delKeyExpr}), NULL, OLD._hlc)`,
433
+ AFTER DELETE ON ${tableName} FOR EACH ROW
434
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
435
+ VALUES ('${tableName}', 'DELETE', JSON_OBJECT(${delKeyExpr}), NULL, OLD._hlc)`,
400
436
  ];
401
437
  },
402
438
  dropTriggersSQL(tableName) {
403
439
  return [
440
+ `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_hlc_insert`,
441
+ `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_hlc_update`,
404
442
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_insert`,
405
443
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_update`,
406
444
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_delete`,
@@ -419,7 +457,7 @@ const db = {
419
457
  |--------|-------------|
420
458
  | `registerTable(name, def)` | Register a business table for sync |
421
459
  | `initSchema()` | Create infrastructure tables (`_sync_log`, `_sync_peers`, `_tombstones`) |
422
- | `initTriggers()` | Create SQLite triggers (skip for other DBs) |
460
+ | `initTriggers()` | Create auto-logging triggers (dialect-aware; works for SQLite, PG, MySQL) |
423
461
  | `start()` / `stop()` | Start/stop leader election |
424
462
  | `peerConnected(id, opts)` | Notify: new peer connected |
425
463
  | `receiveMessage(id, raw)` | Notify: message received from peer |
@@ -428,13 +466,14 @@ const db = {
428
466
  | `tickPull()` | Pull changes from all peers |
429
467
  | `tickHeartbeat()` | Check timeouts, send pings |
430
468
  | `tickCleanup()` | Clean old sync_log and tombstones |
431
- | `beginManualTransaction(id)` | Start 2PC transaction |
432
- | `getManualTransactionEntries(id)` | Read sync_log entries from current txn |
433
- | `commitManualTransaction(id)` | Commit 2PC transaction |
434
- | `rollbackManualTransaction(id)` | Rollback 2PC transaction |
435
- | `waitForPrepareAck(writeId, entries, term, shardId)` | 2PC: broadcast prepare, wait for acks |
436
- | `broadcastCommit(writeId)` | 2PC: broadcast commit |
437
- | `broadcastAbort(writeId, reason)` | 2PC: broadcast abort |
469
+ | `write(fn, shardKey?)` | **High-level 2PC API**: execute fn in a 2PC transaction, engine handles everything automatically |
470
+ | `beginManualTransaction(id)` | Low-level: start 2PC transaction |
471
+ | `getManualTransactionEntries(id)` | Low-level: read sync_log entries from current txn |
472
+ | `commitManualTransaction(id)` | Low-level: commit 2PC transaction |
473
+ | `rollbackManualTransaction(id)` | Low-level: rollback 2PC transaction |
474
+ | `waitForPrepareAck(writeId, entries, term, shardId)` | Low-level 2PC: broadcast prepare, wait for acks |
475
+ | `broadcastCommit(writeId)` | Low-level 2PC: broadcast commit |
476
+ | `broadcastAbort(writeId, reason)` | Low-level 2PC: broadcast abort |
438
477
  | `proxyRequest(key, payload)` | Forward write request to leader |
439
478
  | `getShardId(key)` | Get shard ID for a key |
440
479
  | `isLeaderForShard(shardId)` | Check if local node is leader for shard |
@@ -444,19 +483,17 @@ const db = {
444
483
  ### Standalone utilities
445
484
 
446
485
  ```js
447
- import { HLC, DIALECTS, applyDialect, logChange, getInfraSchemaSQL, getTriggersSQL } from 'sync-lib';
486
+ import { HLC, DIALECTS, applyDialect, logChange, applyEntries } from 'sync-lib';
448
487
  ```
449
488
 
450
489
  | Export | Description |
451
490
  |--------|-------------|
452
491
  | `HLC` | Hybrid Logical Clock class |
453
492
  | `LeaderElection` | Leader election state machine |
454
- | `DIALECTS` | Built-in dialect definitions (sqlite, postgresql, mysql) |
493
+ | `DIALECTS` | Built-in dialect definitions (sqlite, postgresql, mysql). Each dialect provides `infraSchemaSQL`, `upsertSQL`, `triggersSQL`, `dropTriggersSQL` |
455
494
  | `applyDialect(db, name)` | Apply a dialect's methods to a db adapter |
456
495
  | `validateDialectMethods(db)` | Validate all required dialect methods exist |
457
496
  | `logChange(db, table, op, key, data, hlc)` | Manually log a change (alternative to triggers) |
458
- | `getInfraSchemaSQL()` | Built-in SQLite DDL for infrastructure tables |
459
- | `getTriggersSQL(table, def)` | Generate SQLite sync triggers |
460
497
  | `applyEntries(db, entries, hlc, registry)` | Apply remote entries idempotently |
461
498
 
462
499
  ---
package/README.zh-CN.md CHANGED
@@ -118,6 +118,12 @@ const engine = new SyncEngine({
118
118
  console.log(`分片 ${shardId}: leader=${leaderId} local=${isLocal}`);
119
119
  },
120
120
 
121
+ // 必需:任何节点都可能当选 Leader,必须能处理 Follower 转发来的写请求
122
+ onExecuteProxyRequest: async (payload) => {
123
+ // 在本地执行写请求并返回结果
124
+ return await yourBusinessLogic(payload);
125
+ },
126
+
121
127
  // 可选回调
122
128
  onWriteCompleted: () => engine.notifyLocalWrite(),
123
129
  onError: (ctx, err) => console.error(`[sync] ${ctx}:`, err.message),
@@ -126,6 +132,7 @@ const engine = new SyncEngine({
126
132
  engine.registerTable('users', {
127
133
  keyColumns: ['user_id'],
128
134
  dataColumns: ['user_id', 'name', 'email', '_hlc'], // 必须包含 _hlc
135
+ // registerTable 会校验:keyColumns 非空、dataColumns 非空、dataColumns 包含 '_hlc'
129
136
  validator: (row) => { if (!row.user_id) throw new Error('缺少 user_id'); },
130
137
  });
131
138
 
@@ -161,35 +168,24 @@ setInterval(() => engine.tickCleanup(), 3600_000); // 每 1 小时清理旧日
161
168
 
162
169
  ### 第 5 步:写入数据
163
170
 
164
- 指定了 `dialect` 并调用 `initTriggers()` 后,所有数据库的写入方式完全一致 触发器自动记录变更:
171
+ > **重要:** 对已注册表的所有写操作(INSERT/UPDATE/DELETE)**必须**通过 `engine.write()` 或手动使用 `engine.beginManualTransaction()` / `commitManualTransaction()` 包裹。**不要**在这些包裹之外直接调用 `db.run(...)` 写入——引擎需要管理事务和 2PC 协调,以确保数据正确同步到其他节点。
165
172
 
166
- ```js
167
- const ts = engine.hlc.tick();
168
- db.run('INSERT INTO users (user_id, name, _hlc) VALUES (?, ?, ?)',
169
- ['u1', 'Alice', ts]);
170
- engine.notifyLocalWrite(); // 推送到对等节点
171
- ```
173
+ **推荐:`engine.write()`(高阶 API)**
172
174
 
173
- **不使用触发器时**(手动 logChange 备选方案):
175
+ 引擎自动处理完整生命周期:BEGIN 执行 → 触发器记录 → 2PC prepare/ack → COMMIT → 广播 commit → 通知对等节点。
174
176
 
175
177
  ```js
176
- import { logChange } from 'sync-lib';
177
-
178
- const ts = engine.hlc.tick();
179
- db.run('INSERT INTO users (user_id, name, _hlc) VALUES (?, ?, ?)',
180
- ['u1', 'Alice', ts]);
181
- logChange(db, 'users', 'INSERT',
182
- JSON.stringify({ user_id: 'u1' }),
183
- JSON.stringify({ user_id: 'u1', name: 'Alice', _hlc: ts }),
184
- ts);
185
- engine.notifyLocalWrite();
178
+ const result = await engine.write(async (db) => {
179
+ const ts = engine.hlc.tick();
180
+ db.run('INSERT INTO users (user_id, name, _hlc) VALUES (?, ?, ?)',
181
+ ['u1', 'Alice', ts]);
182
+ return { userId: 'u1' }; // 返回值透传给调用方
183
+ }, userId); // shardKey 用于路由
186
184
  ```
187
185
 
188
- ---
186
+ **低阶 API(需要精细控制时)**
189
187
 
190
- ## 2PC:强一致性写入
191
-
192
- 需要原子性多节点写入时:
188
+ 如果需要更精细的控制(如在 prepare 前读取 entries),可使用低阶 API:
193
189
 
194
190
  ```js
195
191
  const writeId = crypto.randomUUID();
@@ -296,19 +292,39 @@ const db = {
296
292
  },
297
293
 
298
294
  // 可选:注入 PG 触发器,使 initTriggers() 自动创建
295
+ // 注意:必须包含 HLC 校验逻辑(与内置方言一致)
299
296
  triggersSQL(tableName, def) {
300
- const keyExpr = def.keyColumns.map(c => `'${c}', NEW.${c}`).join(', ');
297
+ const keyExprNew = def.keyColumns.map(c => `'${c}', NEW.${c}`).join(', ');
298
+ const keyExprOld = def.keyColumns.map(c => `'${c}', OLD.${c}`).join(', ');
301
299
  const dataExpr = def.dataColumns.map(c => `'${c}', NEW.${c}`).join(', ');
302
300
  return [
301
+ // HLC 校验:强制要求 _hlc 有效且单调递增
302
+ `CREATE OR REPLACE FUNCTION _sync_trg_${tableName}_hlc_fn() RETURNS TRIGGER AS $$
303
+ BEGIN
304
+ IF NEW._hlc IS NULL OR NEW._hlc = '' OR NEW._hlc = '0' THEN
305
+ RAISE EXCEPTION 'sync-lib: _hlc is required for % on ${tableName}', TG_OP;
306
+ END IF;
307
+ IF TG_OP = 'UPDATE' AND NEW._hlc <= OLD._hlc THEN
308
+ RAISE EXCEPTION 'sync-lib: _hlc must advance on UPDATE on ${tableName}';
309
+ END IF;
310
+ RETURN NEW;
311
+ END; $$ LANGUAGE plpgsql`,
312
+ `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_hlc ON ${tableName}`,
313
+ `CREATE TRIGGER _sync_trg_${tableName}_hlc
314
+ BEFORE INSERT OR UPDATE ON ${tableName}
315
+ FOR EACH ROW EXECUTE FUNCTION _sync_trg_${tableName}_hlc_fn()`,
316
+ // 变更记录
303
317
  `CREATE OR REPLACE FUNCTION _sync_trg_${tableName}_fn() RETURNS TRIGGER AS $$
304
318
  BEGIN
305
- INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
306
- VALUES ('${tableName}', TG_OP,
307
- json_build_object(${keyExpr})::text,
308
- CASE WHEN TG_OP = 'DELETE' THEN NULL
309
- ELSE json_build_object(${dataExpr})::text END,
310
- COALESCE(NEW._hlc, OLD._hlc));
311
- RETURN COALESCE(NEW, OLD);
319
+ IF TG_OP = 'DELETE' THEN
320
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
321
+ VALUES ('${tableName}', 'DELETE', json_build_object(${keyExprOld})::text, NULL, OLD._hlc);
322
+ RETURN OLD;
323
+ ELSE
324
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
325
+ VALUES ('${tableName}', TG_OP, json_build_object(${keyExprNew})::text, json_build_object(${dataExpr})::text, NEW._hlc);
326
+ RETURN NEW;
327
+ END IF;
312
328
  END; $$ LANGUAGE plpgsql`,
313
329
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName} ON ${tableName}`,
314
330
  `CREATE TRIGGER _sync_trg_${tableName}
@@ -318,6 +334,8 @@ const db = {
318
334
  },
319
335
  dropTriggersSQL(tableName) {
320
336
  return [
337
+ `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_hlc ON ${tableName}`,
338
+ `DROP FUNCTION IF EXISTS _sync_trg_${tableName}_hlc_fn`,
321
339
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName} ON ${tableName}`,
322
340
  `DROP FUNCTION IF EXISTS _sync_trg_${tableName}_fn`,
323
341
  ];
@@ -380,27 +398,49 @@ const db = {
380
398
  },
381
399
 
382
400
  // 可选:注入 MySQL 触发器,使 initTriggers() 自动创建
401
+ // 注意:必须包含 HLC 校验逻辑(与内置方言一致)
383
402
  triggersSQL(tableName, def) {
384
403
  const keyExpr = def.keyColumns.map(c => `'${c}', NEW.${c}`).join(', ');
385
404
  const dataExpr = def.dataColumns.map(c => `'${c}', NEW.${c}`).join(', ');
386
405
  const delKeyExpr = def.keyColumns.map(c => `'${c}', OLD.${c}`).join(', ');
387
406
  return [
407
+ // HLC 校验:强制要求 _hlc 有效且单调递增
408
+ `CREATE TRIGGER _sync_trg_${tableName}_hlc_insert
409
+ BEFORE INSERT ON ${tableName} FOR EACH ROW
410
+ BEGIN
411
+ IF NEW._hlc IS NULL OR NEW._hlc = '' OR NEW._hlc = '0' THEN
412
+ SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'sync-lib: _hlc is required for INSERT on ${tableName}';
413
+ END IF;
414
+ END`,
415
+ `CREATE TRIGGER _sync_trg_${tableName}_hlc_update
416
+ BEFORE UPDATE ON ${tableName} FOR EACH ROW
417
+ BEGIN
418
+ IF NEW._hlc IS NULL OR NEW._hlc = '' OR NEW._hlc = '0' THEN
419
+ SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'sync-lib: _hlc is required for UPDATE on ${tableName}';
420
+ END IF;
421
+ IF NEW._hlc <= OLD._hlc THEN
422
+ SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'sync-lib: _hlc must advance on UPDATE on ${tableName}';
423
+ END IF;
424
+ END`,
425
+ // 变更记录
388
426
  `CREATE TRIGGER _sync_trg_${tableName}_insert
389
- AFTER INSERT ON ${tableName} FOR EACH ROW
390
- INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
391
- VALUES ('${tableName}', 'INSERT', JSON_OBJECT(${keyExpr}), JSON_OBJECT(${dataExpr}), NEW._hlc)`,
427
+ AFTER INSERT ON ${tableName} FOR EACH ROW
428
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
429
+ VALUES ('${tableName}', 'INSERT', JSON_OBJECT(${keyExpr}), JSON_OBJECT(${dataExpr}), NEW._hlc)`,
392
430
  `CREATE TRIGGER _sync_trg_${tableName}_update
393
- AFTER UPDATE ON ${tableName} FOR EACH ROW
394
- INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
395
- VALUES ('${tableName}', 'UPDATE', JSON_OBJECT(${keyExpr}), JSON_OBJECT(${dataExpr}), NEW._hlc)`,
431
+ AFTER UPDATE ON ${tableName} FOR EACH ROW
432
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
433
+ VALUES ('${tableName}', 'UPDATE', JSON_OBJECT(${keyExpr}), JSON_OBJECT(${dataExpr}), NEW._hlc)`,
396
434
  `CREATE TRIGGER _sync_trg_${tableName}_delete
397
- AFTER DELETE ON ${tableName} FOR EACH ROW
398
- INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
399
- VALUES ('${tableName}', 'DELETE', JSON_OBJECT(${delKeyExpr}), NULL, OLD._hlc)`,
435
+ AFTER DELETE ON ${tableName} FOR EACH ROW
436
+ INSERT INTO _sync_log (table_name, operation, row_key, row_data, _hlc)
437
+ VALUES ('${tableName}', 'DELETE', JSON_OBJECT(${delKeyExpr}), NULL, OLD._hlc)`,
400
438
  ];
401
439
  },
402
440
  dropTriggersSQL(tableName) {
403
441
  return [
442
+ `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_hlc_insert`,
443
+ `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_hlc_update`,
404
444
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_insert`,
405
445
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_update`,
406
446
  `DROP TRIGGER IF EXISTS _sync_trg_${tableName}_delete`,
@@ -419,7 +459,7 @@ const db = {
419
459
  |------|------|
420
460
  | `registerTable(name, def)` | 注册业务表用于同步 |
421
461
  | `initSchema()` | 创建基础设施表(`_sync_log`、`_sync_peers`、`_tombstones`) |
422
- | `initTriggers()` | 创建 SQLite 触发器(其他数据库跳过) |
462
+ | `initTriggers()` | 创建同步触发器(方言感知,支持 SQLite、PG、MySQL) |
423
463
  | `start()` / `stop()` | 启动/停止 Leader 选举 |
424
464
  | `peerConnected(id, opts)` | 通知:新的对等节点已连接 |
425
465
  | `receiveMessage(id, raw)` | 通知:收到对等节点消息 |
@@ -428,13 +468,14 @@ const db = {
428
468
  | `tickPull()` | 从所有对等节点拉取变更 |
429
469
  | `tickHeartbeat()` | 检查超时,发送心跳 |
430
470
  | `tickCleanup()` | 清理旧的 sync_log 和墓碑记录 |
431
- | `beginManualTransaction(id)` | 开始 2PC 事务 |
432
- | `getManualTransactionEntries(id)` | 读取当前事务的 sync_log 条目 |
433
- | `commitManualTransaction(id)` | 提交 2PC 事务 |
434
- | `rollbackManualTransaction(id)` | 回滚 2PC 事务 |
435
- | `waitForPrepareAck(writeId, entries, term, shardId)` | 2PC:广播 prepare,等待确认 |
436
- | `broadcastCommit(writeId)` | 2PC:广播 commit |
437
- | `broadcastAbort(writeId, reason)` | 2PC:广播 abort |
471
+ | `write(fn, shardKey?)` | **高阶 2PC API**:在 2PC 事务中执行 fn,引擎自动处理全部流程 |
472
+ | `beginManualTransaction(id)` | 低阶:开始 2PC 事务 |
473
+ | `getManualTransactionEntries(id)` | 低阶:读取当前事务的 sync_log 条目 |
474
+ | `commitManualTransaction(id)` | 低阶:提交 2PC 事务 |
475
+ | `rollbackManualTransaction(id)` | 低阶:回滚 2PC 事务 |
476
+ | `waitForPrepareAck(writeId, entries, term, shardId)` | 低阶 2PC:广播 prepare,等待确认 |
477
+ | `broadcastCommit(writeId)` | 低阶 2PC:广播 commit |
478
+ | `broadcastAbort(writeId, reason)` | 低阶 2PC:广播 abort |
438
479
  | `proxyRequest(key, payload)` | 将写请求转发给 Leader |
439
480
  | `getShardId(key)` | 获取 key 对应的分片 ID |
440
481
  | `isLeaderForShard(shardId)` | 检查本节点是否为该分片的 Leader |
@@ -444,19 +485,17 @@ const db = {
444
485
  ### 独立工具函数
445
486
 
446
487
  ```js
447
- import { HLC, DIALECTS, applyDialect, logChange, getInfraSchemaSQL, getTriggersSQL } from 'sync-lib';
488
+ import { HLC, DIALECTS, applyDialect, logChange, applyEntries } from 'sync-lib';
448
489
  ```
449
490
 
450
491
  | 导出 | 说明 |
451
492
  |------|------|
452
493
  | `HLC` | 混合逻辑时钟类 |
453
494
  | `LeaderElection` | Leader 选举状态机 |
454
- | `DIALECTS` | 内置方言定义(sqlite、postgresql、mysql |
495
+ | `DIALECTS` | 内置方言定义(sqlite、postgresql、mysql)。每种方言提供 `infraSchemaSQL`、`upsertSQL`、`triggersSQL`、`dropTriggersSQL` |
455
496
  | `applyDialect(db, name)` | 将方言方法应用到 db 适配器 |
456
497
  | `validateDialectMethods(db)` | 校验 db 是否具备所有必需的方言方法 |
457
498
  | `logChange(db, table, op, key, data, hlc)` | 手动记录变更(触发器的替代方案) |
458
- | `getInfraSchemaSQL()` | 内置 SQLite DDL(基础设施表) |
459
- | `getTriggersSQL(table, def)` | 生成 SQLite 同步触发器 |
460
499
  | `applyEntries(db, entries, hlc, registry)` | 幂等地应用远程条目 |
461
500
 
462
501
  ---
package/index.js CHANGED
@@ -39,11 +39,12 @@
39
39
  * onSendToPeer: (peerId, msg) => transport.send(peerId, msg),
40
40
  * onClosePeer: (peerId, reason) => transport.close(peerId),
41
41
  * onLeaderChanged: (shardId, leaderId, isLocal) => { ... },
42
+ * onExecuteProxyRequest: async (payload) => handleRequest(payload),
43
+ * // ⚠️ 必须注册:任何节点都可能当选 Leader,必须能处理 Follower 转发的写请求
42
44
  *
43
45
  * // ── 可选回调 ──
44
46
  * onWriteCompleted: () => engine.notifyLocalWrite(),
45
47
  * onError: (ctx, err) => logger.error(ctx, err),
46
- * onExecuteProxyRequest: (payload) => handleRequest(payload),
47
48
  *
48
49
  * // ── 可选配置(全部有默认值)──
49
50
  * numShards: 16,
@@ -80,19 +81,26 @@
80
81
  * setInterval(() => engine.tickCleanup(), 3600_000);
81
82
  * setInterval(() => engine.tickHeartbeat(), 15_000);
82
83
  *
83
- * // 7. 业务写操作后通知推送
84
- * db.run('INSERT INTO users ...');
85
- * engine.notifyLocalWrite();
86
- *
87
- * // 8. 未使用触发器时的备选方案:手动记录变更
88
- * db.run('INSERT INTO users ...');
89
- * engine.logChange('users', 'INSERT', { user_id: 'u1' }, { user_id: 'u1', name: 'Alice', ... });
90
- * engine.notifyLocalWrite();
84
+ * // 7. 写操作:已注册表的所有增删改必须通过 engine.write() 或手动事务包裹
85
+ * // ⚠️ 不要直接 db.run('INSERT ...') — 引擎需要管理事务和 2PC 同步
86
+ * const result = await engine.write(async (db) => {
87
+ * const ts = engine.hlc.tick();
88
+ * db.run('INSERT INTO users ...', [ts, ...]);
89
+ * return { userId: 'u1' };
90
+ * }, userId);
91
91
  *
92
92
  * ═══════════════════════════════════════════════════════════════════
93
93
  * 2PC 写操作流程(多节点强一致)
94
94
  * ═══════════════════════════════════════════════════════════════════
95
95
  *
96
+ * // 高阶 API(推荐):引擎自动处理 BEGIN/COMMIT/ROLLBACK 和 2PC 协调
97
+ * const result = await engine.write(async (db) => {
98
+ * const ts = engine.hlc.tick();
99
+ * db.run('INSERT INTO users ...', [ts, ...]);
100
+ * return { userId: 'u1' }; // 返回值透传
101
+ * }, userId); // shardKey 用于路由
102
+ *
103
+ * // 低阶 API(需要精细控制时使用)
96
104
  * const writeId = crypto.randomUUID();
97
105
  * engine.beginManualTransaction(writeId);
98
106
  * try {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@raft-hlc-sync-protocol/raft-sync-lib",
3
- "version": "1.0.4",
3
+ "version": "1.0.6",
4
4
  "description": "pure javascript raft and hlc sync protocol",
5
5
  "keywords": [
6
6
  "raft",
package/sync-engine.js CHANGED
@@ -11,9 +11,12 @@
11
11
  * - 不依赖 Node.js 特有 API
12
12
  * - 不持有任何定时器(setTimeout/setInterval)
13
13
  *
14
- * 所有外部能力通过构造函数注入回调接口:
15
- * 1. DatabaseAdapter 数据库操作
16
- * 2. onXXX 回调 — 向外部通知事件 / 请求外部执行动作
14
+ * 所有外部能力通过构造函数注入:
15
+ * 1. dialect 预定义 SQL 方言('sqlite' | 'postgresql' | 'mysql'),
16
+ * 自动填充 DatabaseAdapter 上的事务、upsert、schema、触发器方法。
17
+ * 不指定时需手动在 db 上实现全部方言方法,否则报错。
18
+ * 2. DatabaseAdapter — 数据库操作(基础查询:run/get/all/exec)
19
+ * 3. onXXX 回调 — 向外部通知事件 / 请求外部执行动作
17
20
  *
18
21
  * 所有周期性任务通过 tick*() 方法暴露,由外部调度:
19
22
  * - tickPull() — 从所有 peer 拉取数据(建议 10s 间隔)
@@ -62,9 +65,12 @@
62
65
  * │ 错误通知。context 是出错的位置字符串。 │
63
66
  * │ │
64
67
  * │ onExecuteProxyRequest(payload) → Promise<object> │
65
- * │ Leader 收到代理请求后的执行回调。
68
+ * │ Leader 收到 Follower 转发的写请求后的执行回调。
66
69
  * │ payload / response 格式由外部业务自行定义,对模块透明。 │
67
70
  * │ │
71
+ * │ ⚠️ 多节点部署时必须注册:节点无法控制自己是否当选 Leader, │
72
+ * │ 若当选后未注册此回调,Follower 转发来的请求将返回错误。 │
73
+ * │ │
68
74
  * └────────────────────────────────────────────────────────────┘
69
75
  *
70
76
  * ═══════════════════════════════════════════════════════════════
@@ -104,10 +110,14 @@
104
110
  * setInterval(() => engine.tickCleanup(), 3600_000);
105
111
  * setInterval(() => engine.tickHeartbeat(), 15_000);
106
112
  *
107
- * // 6. 业务写操作后通知推送
108
- * app.post('/api/create', (req, res) => {
109
- * db.run('INSERT INTO ...');
110
- * engine.notifyLocalWrite();
113
+ * // 6. 写操作:已注册表的所有增删改必须通过 engine.write() 或手动事务包裹
114
+ * // ⚠️ 不要直接 db.run('INSERT ...') 引擎需要管理事务和 2PC 同步
115
+ * app.post('/api/create', async (req, res) => {
116
+ * const result = await engine.write(async (db) => {
117
+ * const ts = engine.hlc.tick();
118
+ * db.run('INSERT INTO users ...', [ts, ...]);
119
+ * return { ok: true };
120
+ * }, userId);
111
121
  * });
112
122
  */
113
123
 
@@ -179,7 +189,12 @@ export class SyncEngine {
179
189
  * ── 可选回调 ──
180
190
  * @param {function(): void} [options.onWriteCompleted] - 同步数据写入完成通知
181
191
  * @param {function(string, Error): void} [options.onError] - 错误通知
182
- * @param {function(object): Promise<object>} [options.onExecuteProxyRequest] - 代理请求执行
192
+ *
193
+ * ── 必须回调 ──
194
+ * @param {function(object): Promise<object>} options.onExecuteProxyRequest - 代理请求执行
195
+ * Leader 收到 Follower 转发的写请求后的执行回调。
196
+ * 节点无法控制自己是否当选 Leader,因此此回调始终必须注册。
197
+ * payload / response 格式由外部业务自行定义,对模块透明。
183
198
  *
184
199
  * ── 方言 ──
185
200
  * @param {string} [options.dialect] - 数据库方言:'sqlite' | 'postgresql' | 'mysql'
@@ -217,7 +232,16 @@ export class SyncEngine {
217
232
  // ===== 可选回调 =====
218
233
  this._onWriteCompleted = options.onWriteCompleted || (() => {});
219
234
  this._onError = options.onError || (() => {});
220
- this._onExecuteProxyRequest = options.onExecuteProxyRequest || null;
235
+
236
+ // ===== 必须回调:onExecuteProxyRequest =====
237
+ // 节点无法控制自己是否当选 Leader,因此此回调始终必须注册。
238
+ if (typeof options.onExecuteProxyRequest !== 'function') {
239
+ throw new Error(
240
+ 'SyncEngine: onExecuteProxyRequest is required. ' +
241
+ 'Any node can be elected as Leader and must be able to handle proxied write requests from Followers.'
242
+ );
243
+ }
244
+ this._onExecuteProxyRequest = options.onExecuteProxyRequest;
221
245
 
222
246
  // ===== 外部配置 =====
223
247
  this._numShards = options.numShards ?? 16;
@@ -278,12 +302,21 @@ export class SyncEngine {
278
302
  * @param {string} name - 表名
279
303
  * @param {object} def
280
304
  * @param {string[]} def.keyColumns - 主键列名数组
281
- * @param {string[]} def.dataColumns - 全部列名数组(含 _hlc)
305
+ * @param {string[]} def.dataColumns - 全部列名数组(必须包含 '_hlc'
282
306
  * @param {string[]} [def.blobColumns] - BLOB 列名(触发器中用 hex 序列化)
283
307
  * @param {function(object): void} [def.validator] - 数据验证,不合法时抛异常
284
308
  * @param {function(object): object} [def.deserializer] - 反序列化(如 hex→Buffer)
285
309
  */
286
310
  registerTable(name, def) {
311
+ if (!def.keyColumns || def.keyColumns.length === 0) {
312
+ throw new Error(`registerTable('${name}'): keyColumns is required and must be non-empty`);
313
+ }
314
+ if (!def.dataColumns || def.dataColumns.length === 0) {
315
+ throw new Error(`registerTable('${name}'): dataColumns is required and must be non-empty`);
316
+ }
317
+ if (!def.dataColumns.includes('_hlc')) {
318
+ throw new Error(`registerTable('${name}'): dataColumns must include '_hlc' column`);
319
+ }
287
320
  this._tableRegistry.set(name, {
288
321
  keyColumns: def.keyColumns,
289
322
  dataColumns: def.dataColumns,
@@ -528,6 +561,71 @@ export class SyncEngine {
528
561
  }
529
562
  }
530
563
 
564
+ // ╔═══════════════════════════════════════╗
565
+ // ║ 2PC 写操作高阶 API ║
566
+ // ╚═══════════════════════════════════════╝
567
+
568
+ /**
569
+ * 在 2PC 事务中执行写操作(高阶 API,自动处理事务开启/提交/回滚)
570
+ *
571
+ * 适用于多节点部署下需要强一致性的写操作。
572
+ * 引擎自动完成:BEGIN → 执行 fn → 广播 prepare → 等待 quorum ack → COMMIT → 广播 commit
573
+ *
574
+ * @param {function(db: DatabaseAdapter): any} fn - 写操作函数,在事务中执行
575
+ * fn 接收 db 适配器,可直接调用 db.run/get/all 执行业务写操作。
576
+ * fn 的返回值会作为 write() 的返回值透传。
577
+ * @param {string} [shardKey] - 分片键(如 userId),用于确定 term 和 shardId
578
+ * @returns {Promise<any>} fn 的返回值
579
+ *
580
+ * @example
581
+ * const result = await engine.write(async (db) => {
582
+ * const ts = engine.hlc.tick();
583
+ * db.run('INSERT INTO users (user_id, name, _hlc) VALUES (?, ?, ?)', ['u1', 'Alice', ts]);
584
+ * return { userId: 'u1' };
585
+ * }, userId);
586
+ */
587
+ async write(fn, shardKey) {
588
+ const writeId = randomUUID();
589
+ const shardId = shardKey ? this.getShardId(shardKey) : 0;
590
+ const term = this._shardElections.get(shardId)?._currentTerm || 0;
591
+
592
+ this.beginManualTransaction(writeId);
593
+ let result;
594
+ let prepareSent = false;
595
+ let committed = false;
596
+ try {
597
+ result = await fn(this.db);
598
+
599
+ const entries = this.getManualTransactionEntries(writeId);
600
+ if (entries.length > 0 && this._getActivePeerIds().length > 0) {
601
+ prepareSent = true;
602
+ await this.waitForPrepareAck(writeId, entries, term, shardId);
603
+
604
+ // 验证 term 未变(防止 prepare 等待期间发生选举导致脑裂)
605
+ const termAfter = this._shardElections.get(shardId)?._currentTerm || 0;
606
+ if (termAfter !== term) {
607
+ throw new Error('2PC prepare rejected: term changed during prepare (was ' + term + ', now ' + termAfter + ')');
608
+ }
609
+ }
610
+
611
+ this.commitManualTransaction(writeId);
612
+ committed = true;
613
+
614
+ if (entries && entries.length > 0) {
615
+ try { this.broadcastCommit(writeId); } catch (_) {}
616
+ }
617
+ } catch (err) {
618
+ if (!committed) {
619
+ this.rollbackManualTransaction(writeId);
620
+ }
621
+ if (prepareSent && !committed) {
622
+ try { this.broadcastAbort(writeId, err.message); } catch (_) {}
623
+ }
624
+ throw err;
625
+ }
626
+ return result;
627
+ }
628
+
531
629
  // ╔═══════════════════════════════════════╗
532
630
  // ║ 手动事务管理(2PC Leader 侧) ║
533
631
  // ╚═══════════════════════════════════════╝
@@ -643,7 +741,12 @@ export class SyncEngine {
643
741
  }
644
742
 
645
743
  if (leaderNodeId === this.nodeId) {
646
- if (!this._onExecuteProxyRequest) throw new Error('onExecuteProxyRequest not registered');
744
+ if (!this._onExecuteProxyRequest) {
745
+ throw new Error(
746
+ 'onExecuteProxyRequest is not registered on this node, but it is the Leader. ' +
747
+ 'In a multi-node deployment, all nodes must register onExecuteProxyRequest.'
748
+ );
749
+ }
647
750
  return this._onExecuteProxyRequest(payload);
648
751
  }
649
752
 
@@ -843,7 +946,15 @@ export class SyncEngine {
843
946
  const { requestId, payload } = msg;
844
947
  try {
845
948
  if (!this._onExecuteProxyRequest) {
846
- this._sendToPeer(peerId, makeProxyRes(requestId, { error: 'proxy not supported' }));
949
+ // onExecuteProxyRequest 未注册:当前节点是 Leader 但无法处理代理请求。
950
+ // 这通常意味着多节点部署时忘记注册此回调。
951
+ const err = new Error(
952
+ 'onExecuteProxyRequest is not registered on this node, but it was elected as Leader. ' +
953
+ 'In a multi-node deployment, all nodes must register onExecuteProxyRequest ' +
954
+ 'because any node can become Leader.'
955
+ );
956
+ this._onError('proxy_exec', err);
957
+ this._sendToPeer(peerId, makeProxyRes(requestId, { error: err.message }));
847
958
  return;
848
959
  }
849
960
  const result = await this._onExecuteProxyRequest(payload);
package/sync-protocol.js CHANGED
@@ -407,7 +407,7 @@ export function cleanTombstones(db, retentionDays = 7) {
407
407
 
408
408
  /**
409
409
  * 手动记录一条数据变更到 _sync_log
410
- * 适用于不支持触发器或不使用 SQLite 的数据库。
410
+ * 适用于不支持触发器的数据库。
411
411
  * 外部业务代码在每次写操作后调用此方法。
412
412
  *
413
413
  * @param {DatabaseAdapter} db