oak-backend-base 3.4.0 → 3.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/AppLoader.d.ts +1 -1
- package/lib/AppLoader.js +16 -23
- package/lib/Synchronizer.d.ts +9 -19
- package/lib/Synchronizer.js +218 -186
- package/package.json +2 -2
package/lib/AppLoader.d.ts
CHANGED
|
@@ -12,7 +12,7 @@ export declare class AppLoader<ED extends EntityDict & BaseEntityDict, Cxt exten
|
|
|
12
12
|
private aspectDict;
|
|
13
13
|
private externalDependencies;
|
|
14
14
|
protected dataSubscriber?: DataSubscriber<ED, Cxt>;
|
|
15
|
-
protected
|
|
15
|
+
protected synchronizer?: Synchronizer<ED, Cxt>;
|
|
16
16
|
protected contextBuilder: (scene?: string) => (store: DbStore<ED, Cxt>) => Promise<Cxt>;
|
|
17
17
|
private requireSth;
|
|
18
18
|
protected makeContext(cxtStr?: string, headers?: IncomingHttpHeaders): Promise<Cxt>;
|
package/lib/AppLoader.js
CHANGED
|
@@ -21,7 +21,7 @@ class AppLoader extends types_1.AppLoader {
|
|
|
21
21
|
aspectDict;
|
|
22
22
|
externalDependencies;
|
|
23
23
|
dataSubscriber;
|
|
24
|
-
|
|
24
|
+
synchronizer;
|
|
25
25
|
contextBuilder;
|
|
26
26
|
requireSth(filePath) {
|
|
27
27
|
const depFilePath = (0, path_1.join)(this.path, filePath);
|
|
@@ -104,7 +104,7 @@ class AppLoader extends types_1.AppLoader {
|
|
|
104
104
|
const syncConfigs = (0, fs_1.existsSync)(syncConfigFile) && require(syncConfigFile).default;
|
|
105
105
|
return {
|
|
106
106
|
dbConfig: dbConfig,
|
|
107
|
-
|
|
107
|
+
syncConfig: syncConfigs,
|
|
108
108
|
};
|
|
109
109
|
}
|
|
110
110
|
constructor(path, contextBuilder, ns, nsServer) {
|
|
@@ -149,20 +149,18 @@ class AppLoader extends types_1.AppLoader {
|
|
|
149
149
|
adTriggers.forEach((trigger) => this.registerTrigger(trigger));
|
|
150
150
|
checkers.forEach((checker) => this.dbStore.registerChecker(checker));
|
|
151
151
|
adCheckers.forEach((checker) => this.dbStore.registerChecker(checker));
|
|
152
|
-
if (this.
|
|
153
|
-
// 同步数据到远端结点通过commit trigger来完成
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
syncTriggers.forEach((trigger) => this.registerTrigger(trigger));
|
|
157
|
-
}
|
|
152
|
+
if (this.synchronizer) {
|
|
153
|
+
// 同步数据到远端结点通过commit trigger来完成
|
|
154
|
+
const syncTriggers = this.synchronizer.getSyncTriggers();
|
|
155
|
+
syncTriggers.forEach((trigger) => this.registerTrigger(trigger));
|
|
158
156
|
}
|
|
159
157
|
}
|
|
160
158
|
async mount(initialize) {
|
|
161
159
|
const { path } = this;
|
|
162
160
|
if (!initialize) {
|
|
163
|
-
const {
|
|
164
|
-
if (
|
|
165
|
-
this.
|
|
161
|
+
const { syncConfig: syncConfig } = this.getConfiguration();
|
|
162
|
+
if (syncConfig) {
|
|
163
|
+
this.synchronizer = new Synchronizer_1.default(syncConfig, this.dbStore.getSchema());
|
|
166
164
|
}
|
|
167
165
|
this.initTriggers();
|
|
168
166
|
}
|
|
@@ -275,11 +273,9 @@ class AppLoader extends types_1.AppLoader {
|
|
|
275
273
|
transformEndpointItem(router, item);
|
|
276
274
|
}
|
|
277
275
|
}
|
|
278
|
-
if (this.
|
|
279
|
-
this.
|
|
280
|
-
|
|
281
|
-
transformEndpointItem(syncEp.name, syncEp);
|
|
282
|
-
});
|
|
276
|
+
if (this.synchronizer) {
|
|
277
|
+
const syncEp = this.synchronizer.getSelfEndpoint();
|
|
278
|
+
transformEndpointItem(syncEp.name, syncEp);
|
|
283
279
|
}
|
|
284
280
|
return endPointRouters;
|
|
285
281
|
}
|
|
@@ -399,6 +395,10 @@ class AppLoader extends types_1.AppLoader {
|
|
|
399
395
|
}
|
|
400
396
|
async execStartRoutines() {
|
|
401
397
|
const routines = this.requireSth('lib/routines/start');
|
|
398
|
+
if (this.synchronizer) {
|
|
399
|
+
const routine = this.synchronizer.getSyncRoutine();
|
|
400
|
+
routines.push(routine);
|
|
401
|
+
}
|
|
402
402
|
for (const routine of routines) {
|
|
403
403
|
if (routine.hasOwnProperty('entity')) {
|
|
404
404
|
const start = Date.now();
|
|
@@ -428,13 +428,6 @@ class AppLoader extends types_1.AppLoader {
|
|
|
428
428
|
}
|
|
429
429
|
}
|
|
430
430
|
}
|
|
431
|
-
if (this.synchronizers) {
|
|
432
|
-
this.synchronizers.forEach((synchronizer) => {
|
|
433
|
-
// 这个routine在内部处理异步
|
|
434
|
-
const routine = synchronizer.getSyncRoutine();
|
|
435
|
-
this.execWatcher(routine);
|
|
436
|
-
});
|
|
437
|
-
}
|
|
438
431
|
}
|
|
439
432
|
async execRoutine(routine) {
|
|
440
433
|
const context = await this.makeContext();
|
package/lib/Synchronizer.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { EntityDict, StorageSchema, EndpointItem, SyncConfig,
|
|
1
|
+
import { EntityDict, StorageSchema, EndpointItem, SyncConfig, FreeRoutine } from 'oak-domain/lib/types';
|
|
2
2
|
import { VolatileTrigger } from 'oak-domain/lib/types/Trigger';
|
|
3
3
|
import { EntityDict as BaseEntityDict } from 'oak-domain/lib/base-app-domain';
|
|
4
4
|
import { BackendRuntimeContext } from 'oak-frontend-base/lib/context/BackendRuntimeContext';
|
|
@@ -7,7 +7,7 @@ export default class Synchronizer<ED extends EntityDict & BaseEntityDict, Cxt ex
|
|
|
7
7
|
private schema;
|
|
8
8
|
private remotePullInfoMap;
|
|
9
9
|
private pullMaxBornAtMap;
|
|
10
|
-
private
|
|
10
|
+
private channelDict;
|
|
11
11
|
private pushAccessMap;
|
|
12
12
|
/**
|
|
13
13
|
* 向某一个远端对象push opers。根据幂等性,这里如果失败了必须反复推送
|
|
@@ -15,25 +15,15 @@ export default class Synchronizer<ED extends EntityDict & BaseEntityDict, Cxt ex
|
|
|
15
15
|
* @param retry
|
|
16
16
|
*/
|
|
17
17
|
private startChannel;
|
|
18
|
-
private
|
|
18
|
+
private startAllChannel;
|
|
19
|
+
private pushOperToChannel;
|
|
20
|
+
private dispatchOperToChannels;
|
|
19
21
|
/**
|
|
20
|
-
*
|
|
21
|
-
*
|
|
22
|
-
* 1)oper如果推送失败了,必须留存在queue中,以保证在后面产生的oper之前推送
|
|
23
|
-
* 2)当对queue中增加oper时,要检查是否有重(有重说明之前失败过),如果无重则将之放置在队列尾
|
|
24
|
-
*
|
|
25
|
-
* 其实这里还无法严格保证先产生的oper一定先到达被推送,因为volatile trigger是在事务提交后再发生的,但这种情况在目前应该跑不出来,在实际执行oper的时候assert掉先。by Xc 20240226
|
|
26
|
-
*/
|
|
27
|
-
private pushOper;
|
|
28
|
-
/**
|
|
29
|
-
* 因为应用可能是多租户,得提前确定context下的selfEncryptInfo
|
|
30
|
-
* 由于checkpoint时无法区别不同上下文之间的未完成oper数据,所以接口只能这样设计
|
|
31
|
-
* @param id
|
|
22
|
+
* 为了保证推送的oper序,采用从database中顺序读取所有需要推送的oper来进行推送
|
|
23
|
+
* 每个进程都保证把当前所有的oper顺序处理掉,就不会有乱序的问题,大家通过database上的锁来完成同步
|
|
32
24
|
* @param context
|
|
33
|
-
* @param selfEncryptInfo
|
|
34
|
-
* @returns
|
|
35
25
|
*/
|
|
36
|
-
private
|
|
26
|
+
private trySynchronizeOpers;
|
|
37
27
|
private makeCreateOperTrigger;
|
|
38
28
|
constructor(config: SyncConfig<ED, Cxt>, schema: StorageSchema<ED>);
|
|
39
29
|
/**
|
|
@@ -41,6 +31,6 @@ export default class Synchronizer<ED extends EntityDict & BaseEntityDict, Cxt ex
|
|
|
41
31
|
* @returns
|
|
42
32
|
*/
|
|
43
33
|
getSyncTriggers(): VolatileTrigger<ED, keyof ED, Cxt>[];
|
|
44
|
-
getSyncRoutine():
|
|
34
|
+
getSyncRoutine(): FreeRoutine<ED, Cxt>;
|
|
45
35
|
getSelfEndpoint(): EndpointItem<ED, Cxt>;
|
|
46
36
|
}
|
package/lib/Synchronizer.js
CHANGED
|
@@ -8,6 +8,7 @@ const path_1 = require("path");
|
|
|
8
8
|
const lodash_1 = require("oak-domain/lib/utils/lodash");
|
|
9
9
|
const filter_1 = require("oak-domain/lib/store/filter");
|
|
10
10
|
const uuid_1 = require("oak-domain/lib/utils/uuid");
|
|
11
|
+
const lodash_2 = require("lodash");
|
|
11
12
|
const OAK_SYNC_HEADER_ENTITY = 'oak-sync-entity';
|
|
12
13
|
const OAK_SYNC_HEADER_ENTITYID = 'oak-sync-entity-id';
|
|
13
14
|
class Synchronizer {
|
|
@@ -15,24 +16,20 @@ class Synchronizer {
|
|
|
15
16
|
schema;
|
|
16
17
|
remotePullInfoMap = {};
|
|
17
18
|
pullMaxBornAtMap = {};
|
|
18
|
-
|
|
19
|
+
channelDict = {};
|
|
19
20
|
pushAccessMap = {};
|
|
20
21
|
/**
|
|
21
22
|
* 向某一个远端对象push opers。根据幂等性,这里如果失败了必须反复推送
|
|
22
23
|
* @param channel
|
|
23
24
|
* @param retry
|
|
24
25
|
*/
|
|
25
|
-
async startChannel(channel, retry) {
|
|
26
|
+
async startChannel(context, channel, retry) {
|
|
26
27
|
const { queue, api, selfEncryptInfo, entity, entityId } = channel;
|
|
27
|
-
channel.queue = [];
|
|
28
|
-
channel.running = true;
|
|
29
|
-
channel.nextPushTimestamp = Number.MAX_SAFE_INTEGER;
|
|
30
|
-
const opers = queue.map(ele => ele.oper);
|
|
31
|
-
let failedOpers = [];
|
|
32
|
-
let needRetry = false;
|
|
33
28
|
let json;
|
|
34
29
|
try {
|
|
35
30
|
// todo 加密
|
|
31
|
+
const queue = channel.queue;
|
|
32
|
+
const opers = queue.map(ele => ele.oper);
|
|
36
33
|
console.log('向远端结点sync数据', api, JSON.stringify(opers));
|
|
37
34
|
const finalApi = (0, path_1.join)(api, selfEncryptInfo.id);
|
|
38
35
|
const res = await fetch(finalApi, {
|
|
@@ -50,156 +47,125 @@ class Synchronizer {
|
|
|
50
47
|
json = await res.json();
|
|
51
48
|
}
|
|
52
49
|
catch (err) {
|
|
50
|
+
// 最大延迟redo时间512秒
|
|
51
|
+
const retryDelay = Math.pow(2, Math.min(9, retry)) * 1000;
|
|
53
52
|
console.error('sync push时出现error', err);
|
|
54
|
-
|
|
55
|
-
|
|
53
|
+
console.error(`将于${retryDelay}毫秒后重试`);
|
|
54
|
+
return new Promise((resolve) => {
|
|
55
|
+
setTimeout(async () => {
|
|
56
|
+
await this.startChannel(context, channel, retry + 1);
|
|
57
|
+
resolve(undefined);
|
|
58
|
+
}, retryDelay);
|
|
59
|
+
});
|
|
56
60
|
}
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
const { id, error } = failed;
|
|
65
|
-
console.error('同步过程中发生异常', id, error);
|
|
66
|
-
}
|
|
67
|
-
for (const req of queue) {
|
|
68
|
-
if (successIds.includes(req.oper.id)) {
|
|
69
|
-
req.resolve(undefined);
|
|
70
|
-
}
|
|
71
|
-
else {
|
|
72
|
-
failedOpers.push(req);
|
|
73
|
-
}
|
|
74
|
-
}
|
|
61
|
+
/**
|
|
62
|
+
* 返回结构见this.getSelfEndpoint
|
|
63
|
+
*/
|
|
64
|
+
const { successIds, failed, redundantIds } = json;
|
|
65
|
+
if (failed) {
|
|
66
|
+
const { id, error } = failed;
|
|
67
|
+
console.error('同步过程中发生异常', id, error, retry);
|
|
75
68
|
}
|
|
76
|
-
|
|
77
|
-
channel
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
let idx = 0;
|
|
85
|
-
const now = Date.now();
|
|
86
|
-
opers.forEach((oper) => {
|
|
87
|
-
for (; idx < channel.queue.length; idx++) {
|
|
88
|
-
if (channel.queue[idx].oper.id === oper.oper.id) {
|
|
89
|
-
(0, assert_1.default)(false, '不应当出现重复的oper');
|
|
90
|
-
break;
|
|
91
|
-
}
|
|
92
|
-
else if (channel.queue[idx].oper.bornAt > oper.oper.bornAt) {
|
|
93
|
-
break;
|
|
94
|
-
}
|
|
95
|
-
}
|
|
96
|
-
channel.queue.splice(idx, 0, oper);
|
|
97
|
-
});
|
|
98
|
-
const retryWeight = Math.pow(2, Math.min(retry, 10));
|
|
99
|
-
const nextPushTimestamp = retryWeight * 1000 + now;
|
|
100
|
-
if (channel.queue.length > 0) {
|
|
101
|
-
if (channel.running) {
|
|
102
|
-
if (channel.nextPushTimestamp > nextPushTimestamp) {
|
|
103
|
-
channel.nextPushTimestamp = nextPushTimestamp;
|
|
104
|
-
}
|
|
69
|
+
const unsuccessfulOpers = queue.filter(ele => !successIds.includes(ele.oper.id) && !redundantIds.includes(ele.oper.id));
|
|
70
|
+
// 重新开始前,可以将已经完成的oper的triggerData位清零。要注意,在多个remote配置下,有可能一个oper要推给多个channel
|
|
71
|
+
// 这里可能设计过度了,代码也没经过测试
|
|
72
|
+
channel.queue = unsuccessfulOpers;
|
|
73
|
+
const aliveOperIds = [];
|
|
74
|
+
for (const k in this.channelDict) {
|
|
75
|
+
if (this.channelDict[k].queue.length > 0) {
|
|
76
|
+
aliveOperIds.push(...this.channelDict[k].queue.map(ele => ele.oper.id));
|
|
105
77
|
}
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
78
|
+
}
|
|
79
|
+
const overIds = (0, lodash_1.difference)(successIds, aliveOperIds);
|
|
80
|
+
if (overIds.length > 0) {
|
|
81
|
+
await context.operate('oper', {
|
|
82
|
+
id: await (0, uuid_1.generateNewIdAsync)(),
|
|
83
|
+
action: 'update',
|
|
84
|
+
data: {
|
|
85
|
+
[types_1.TriggerDataAttribute]: null,
|
|
86
|
+
[types_1.TriggerUuidAttribute]: null,
|
|
87
|
+
},
|
|
88
|
+
filter: {
|
|
89
|
+
id: {
|
|
90
|
+
$in: overIds,
|
|
111
91
|
}
|
|
112
|
-
channel.handler = setTimeout(async () => {
|
|
113
|
-
await this.startChannel(channel, retry);
|
|
114
|
-
}, nextPushTimestamp - now);
|
|
115
|
-
}
|
|
116
|
-
else {
|
|
117
|
-
// 当前队列的开始时间要早于自身要求,不用管
|
|
118
|
-
(0, assert_1.default)(channel.handler);
|
|
119
92
|
}
|
|
93
|
+
}, {});
|
|
94
|
+
}
|
|
95
|
+
if (successIds.length > 0) {
|
|
96
|
+
try {
|
|
97
|
+
await Promise.all(successIds.map((id) => {
|
|
98
|
+
const { onSynchronized, oper } = queue.find(ele => ele.oper.id === id);
|
|
99
|
+
return onSynchronized && onSynchronized({
|
|
100
|
+
action: oper.action,
|
|
101
|
+
data: oper.data,
|
|
102
|
+
rowIds: (0, filter_1.getRelevantIds)(oper.filter),
|
|
103
|
+
}, context);
|
|
104
|
+
}));
|
|
105
|
+
}
|
|
106
|
+
catch (err) {
|
|
107
|
+
// 这时候无法处理?
|
|
108
|
+
console.error('onSynchronzied时出错', err);
|
|
109
|
+
(0, assert_1.default)(false);
|
|
120
110
|
}
|
|
121
111
|
}
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
112
|
+
if (channel.queue.length > 0) {
|
|
113
|
+
// 最大延迟redo时间512秒
|
|
114
|
+
const retryDelay = Math.pow(2, Math.min(9, retry)) * 1000;
|
|
115
|
+
console.error(`有${channel.queue.length}个oper同步失败,将于${retryDelay}毫秒后重试`);
|
|
116
|
+
return new Promise((resolve) => {
|
|
117
|
+
setTimeout(async () => {
|
|
118
|
+
await this.startChannel(context, channel, retry + 1);
|
|
119
|
+
resolve(undefined);
|
|
120
|
+
}, retryDelay);
|
|
121
|
+
});
|
|
125
122
|
}
|
|
126
123
|
}
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
if (!this.
|
|
124
|
+
async startAllChannel(context) {
|
|
125
|
+
await Promise.all(Object.keys(this.channelDict).map(async (k) => {
|
|
126
|
+
const channel = this.channelDict[k];
|
|
127
|
+
if (channel.queue.length > 0) {
|
|
128
|
+
channel.queue.sort((o1, o2) => o1.oper.$$seq$$ - o2.oper.$$seq$$);
|
|
129
|
+
return this.startChannel(context, channel, 0);
|
|
130
|
+
}
|
|
131
|
+
}));
|
|
132
|
+
}
|
|
133
|
+
pushOperToChannel(oper, userId, url, endpoint, remoteEntity, remoteEntityId, selfEncryptInfo, onSynchronized) {
|
|
134
|
+
if (!this.channelDict[userId]) {
|
|
138
135
|
// channel上缓存这些信息,暂不支持动态更新
|
|
139
|
-
this.
|
|
136
|
+
this.channelDict[userId] = {
|
|
140
137
|
api: (0, path_1.join)(url, 'endpoint', endpoint),
|
|
141
138
|
queue: [],
|
|
142
139
|
entity: remoteEntity,
|
|
143
140
|
entityId: remoteEntityId,
|
|
144
|
-
nextPushTimestamp: Number.MAX_SAFE_INTEGER,
|
|
145
|
-
running: false,
|
|
146
141
|
selfEncryptInfo,
|
|
147
142
|
};
|
|
148
143
|
}
|
|
149
|
-
|
|
144
|
+
else {
|
|
145
|
+
// 趁机更新一下加密信息
|
|
146
|
+
this.channelDict[userId].selfEncryptInfo = selfEncryptInfo;
|
|
147
|
+
}
|
|
148
|
+
const channel = this.channelDict[userId];
|
|
150
149
|
(0, assert_1.default)(channel.api === (0, path_1.join)(url, 'endpoint', endpoint));
|
|
151
150
|
(0, assert_1.default)(channel.entity === remoteEntity);
|
|
152
151
|
(0, assert_1.default)(channel.entityId === remoteEntityId);
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
resolve,
|
|
157
|
-
}], 0);
|
|
152
|
+
channel.queue.push({
|
|
153
|
+
oper,
|
|
154
|
+
onSynchronized,
|
|
158
155
|
});
|
|
159
|
-
await promise;
|
|
160
156
|
}
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
* @param context
|
|
166
|
-
* @param selfEncryptInfo
|
|
167
|
-
* @returns
|
|
168
|
-
*/
|
|
169
|
-
async synchronizeOpersToRemote(id, context, selfEncryptInfo) {
|
|
170
|
-
const [oper] = await context.select('oper', {
|
|
171
|
-
data: {
|
|
172
|
-
id: 1,
|
|
173
|
-
action: 1,
|
|
174
|
-
data: 1,
|
|
175
|
-
targetEntity: 1,
|
|
176
|
-
operatorId: 1,
|
|
177
|
-
operEntity$oper: {
|
|
178
|
-
$entity: 'operEntity',
|
|
179
|
-
data: {
|
|
180
|
-
id: 1,
|
|
181
|
-
entity: 1,
|
|
182
|
-
entityId: 1,
|
|
183
|
-
},
|
|
184
|
-
},
|
|
185
|
-
bornAt: 1,
|
|
186
|
-
$$createAt$$: 1,
|
|
187
|
-
filter: 1,
|
|
188
|
-
},
|
|
189
|
-
filter: {
|
|
190
|
-
id,
|
|
191
|
-
}
|
|
192
|
-
}, { dontCollect: true, forUpdate: true });
|
|
193
|
-
const { operatorId, targetEntity, operEntity$oper: operEntities, action, data } = oper;
|
|
194
|
-
const entityIds = operEntities.map(ele => ele.entityId);
|
|
157
|
+
async dispatchOperToChannels(oper, context) {
|
|
158
|
+
const { operatorId, targetEntity, filter, action, data } = oper;
|
|
159
|
+
const entityIds = (0, filter_1.getRelevantIds)(filter);
|
|
160
|
+
(0, assert_1.default)(entityIds.length > 0);
|
|
195
161
|
const pushEntityNodes = this.pushAccessMap[targetEntity];
|
|
162
|
+
let pushed = false;
|
|
196
163
|
if (pushEntityNodes && pushEntityNodes.length > 0) {
|
|
197
164
|
// 每个pushEntityNode代表配置的一个remoteEntity
|
|
198
165
|
await Promise.all(pushEntityNodes.map(async (node) => {
|
|
199
|
-
const { projection, groupByUsers, getRemotePushInfo: getRemoteAccessInfo, endpoint, actions, onSynchronized } = node;
|
|
166
|
+
const { projection, groupByUsers, getRemotePushInfo: getRemoteAccessInfo, groupBySelfEntity, endpoint, actions, onSynchronized } = node;
|
|
200
167
|
// 定义中应该不可能没有actions
|
|
201
168
|
if (!actions || actions.includes(action)) {
|
|
202
|
-
const pushed = [];
|
|
203
169
|
const rows = await context.select(targetEntity, {
|
|
204
170
|
data: {
|
|
205
171
|
id: 1,
|
|
@@ -213,8 +179,18 @@ class Synchronizer {
|
|
|
213
179
|
}, { dontCollect: true, includedDeleted: true });
|
|
214
180
|
// userId就是需要发送给远端的user,但是要将本次操作的user过滤掉(操作的原本产生者)
|
|
215
181
|
const userSendDict = groupByUsers(rows);
|
|
182
|
+
const selfEntityIdDict = groupBySelfEntity(rows);
|
|
183
|
+
const encryptInfoDict = {};
|
|
216
184
|
const pushToUserIdFn = async (userId) => {
|
|
217
185
|
const { entity, entityId, rowIds } = userSendDict[userId];
|
|
186
|
+
const selfEntityIds = rowIds.map((rowId) => selfEntityIdDict[rowId]);
|
|
187
|
+
const uniqSelfEntityIds = (0, lodash_2.uniq)(selfEntityIds);
|
|
188
|
+
(0, assert_1.default)(uniqSelfEntityIds.length === 1, '推向同一个userId的oper不可能关联在多个不同的selfEntity行上');
|
|
189
|
+
const selfEntityId = uniqSelfEntityIds[0];
|
|
190
|
+
if (!encryptInfoDict[selfEntityId]) {
|
|
191
|
+
encryptInfoDict[selfEntityId] = await this.config.self.getSelfEncryptInfo(context, selfEntityId);
|
|
192
|
+
}
|
|
193
|
+
const selfEncryptInfo = encryptInfoDict[selfEntityId];
|
|
218
194
|
// 推送到远端结点的oper
|
|
219
195
|
const oper2 = {
|
|
220
196
|
id: oper.id,
|
|
@@ -225,48 +201,96 @@ class Synchronizer {
|
|
|
225
201
|
$in: rowIds,
|
|
226
202
|
}
|
|
227
203
|
},
|
|
228
|
-
|
|
204
|
+
$$seq$$: oper.$$seq$$,
|
|
229
205
|
targetEntity,
|
|
230
206
|
};
|
|
231
207
|
const { url } = await getRemoteAccessInfo(context, {
|
|
232
208
|
userId,
|
|
233
209
|
remoteEntityId: entityId,
|
|
234
210
|
});
|
|
235
|
-
|
|
211
|
+
this.pushOperToChannel(oper2, userId, url, endpoint, entity, entityId, selfEncryptInfo, onSynchronized);
|
|
236
212
|
};
|
|
237
213
|
for (const userId in userSendDict) {
|
|
238
214
|
if (userId !== operatorId) {
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
}
|
|
242
|
-
if (pushed.length > 0) {
|
|
243
|
-
// 对单个oper,这里必须要等所有的push返回,不然会一直等在上面
|
|
244
|
-
await Promise.all(pushed);
|
|
245
|
-
if (onSynchronized) {
|
|
246
|
-
await onSynchronized({
|
|
247
|
-
action: action,
|
|
248
|
-
data: data,
|
|
249
|
-
rowIds: entityIds,
|
|
250
|
-
}, context);
|
|
215
|
+
await pushToUserIdFn(userId);
|
|
216
|
+
pushed = true;
|
|
251
217
|
}
|
|
252
218
|
}
|
|
253
219
|
}
|
|
254
220
|
}));
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
221
|
+
}
|
|
222
|
+
// 如果oper一个也不用推送,说明其定义的推送path和对象行的path不匹配(动态指针)
|
|
223
|
+
return pushed;
|
|
224
|
+
}
|
|
225
|
+
/**
|
|
226
|
+
* 为了保证推送的oper序,采用从database中顺序读取所有需要推送的oper来进行推送
|
|
227
|
+
* 每个进程都保证把当前所有的oper顺序处理掉,就不会有乱序的问题,大家通过database上的锁来完成同步
|
|
228
|
+
* @param context
|
|
229
|
+
*/
|
|
230
|
+
async trySynchronizeOpers(context) {
|
|
231
|
+
let dirtyOpers = await context.select('oper', {
|
|
232
|
+
data: {
|
|
233
|
+
id: 1,
|
|
234
|
+
},
|
|
235
|
+
filter: {
|
|
236
|
+
[types_1.TriggerDataAttribute]: {
|
|
237
|
+
$exists: true,
|
|
238
|
+
},
|
|
239
|
+
}
|
|
240
|
+
}, { dontCollect: true });
|
|
241
|
+
if (dirtyOpers.length > 0) {
|
|
242
|
+
// 这一步是加锁,保证只有一个进程完成推送,推送者提交前会将$$triggerData$$清零
|
|
243
|
+
const ids = dirtyOpers.map(ele => ele.id);
|
|
244
|
+
dirtyOpers = await context.select('oper', {
|
|
260
245
|
data: {
|
|
261
|
-
|
|
262
|
-
|
|
246
|
+
id: 1,
|
|
247
|
+
action: 1,
|
|
248
|
+
data: 1,
|
|
249
|
+
targetEntity: 1,
|
|
250
|
+
operatorId: 1,
|
|
251
|
+
[types_1.TriggerDataAttribute]: 1,
|
|
252
|
+
bornAt: 1,
|
|
253
|
+
$$createAt$$: 1,
|
|
254
|
+
$$seq$$: 1,
|
|
255
|
+
filter: 1,
|
|
263
256
|
},
|
|
264
257
|
filter: {
|
|
265
|
-
id
|
|
258
|
+
id: { $in: ids },
|
|
266
259
|
},
|
|
267
|
-
}, {});
|
|
260
|
+
}, { dontCollect: true, forUpdate: true });
|
|
261
|
+
dirtyOpers = dirtyOpers.filter(ele => !!ele[types_1.TriggerDataAttribute]);
|
|
262
|
+
if (dirtyOpers.length > 0) {
|
|
263
|
+
const pushedIds = [];
|
|
264
|
+
const unpushedIds = [];
|
|
265
|
+
await Promise.all(dirtyOpers.map(async (oper) => {
|
|
266
|
+
const result = await this.dispatchOperToChannels(oper, context);
|
|
267
|
+
if (result) {
|
|
268
|
+
pushedIds.push(oper.id);
|
|
269
|
+
}
|
|
270
|
+
else {
|
|
271
|
+
unpushedIds.push(oper.id);
|
|
272
|
+
}
|
|
273
|
+
}));
|
|
274
|
+
if (unpushedIds.length > 0) {
|
|
275
|
+
await context.operate('oper', {
|
|
276
|
+
id: await (0, uuid_1.generateNewIdAsync)(),
|
|
277
|
+
action: 'update',
|
|
278
|
+
data: {
|
|
279
|
+
[types_1.TriggerDataAttribute]: null,
|
|
280
|
+
[types_1.TriggerUuidAttribute]: null,
|
|
281
|
+
},
|
|
282
|
+
filter: {
|
|
283
|
+
id: {
|
|
284
|
+
$in: unpushedIds,
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
}, {});
|
|
288
|
+
}
|
|
289
|
+
if (pushedIds.length > 0) {
|
|
290
|
+
await this.startAllChannel(context);
|
|
291
|
+
}
|
|
292
|
+
}
|
|
268
293
|
}
|
|
269
|
-
return 0;
|
|
270
294
|
}
|
|
271
295
|
makeCreateOperTrigger() {
|
|
272
296
|
const { config } = this;
|
|
@@ -278,21 +302,24 @@ class Synchronizer {
|
|
|
278
302
|
const pushEntities = [];
|
|
279
303
|
const endpoint2 = (0, path_1.join)(endpoint || 'sync', self.entity);
|
|
280
304
|
for (const def of pushEntityDefs) {
|
|
281
|
-
const {
|
|
305
|
+
const { pathToRemoteEntity, pathToSelfEntity, relationName, recursive, entity, actions, onSynchronized } = def;
|
|
282
306
|
pushEntities.push(entity);
|
|
283
307
|
const relationName2 = relationName || rnRemote;
|
|
284
|
-
const path2 = pathToUser ? `${
|
|
308
|
+
const path2 = pathToUser ? `${pathToRemoteEntity}.${pathToUser}` : pathToRemoteEntity;
|
|
309
|
+
(0, assert_1.default)(!recursive);
|
|
285
310
|
const { projection, getData } = relationName2 ? (0, relationPath_1.destructRelationPath)(this.schema, entity, path2, {
|
|
286
311
|
relation: {
|
|
287
312
|
name: relationName,
|
|
288
313
|
}
|
|
289
|
-
}, recursive) : (0, relationPath_1.
|
|
314
|
+
}, recursive) : (0, relationPath_1.destructDirectUserPath)(this.schema, entity, path2);
|
|
315
|
+
const toSelfEntity = (0, relationPath_1.destructDirectPath)(this.schema, entity, pathToSelfEntity);
|
|
290
316
|
const groupByUsers = (rows) => {
|
|
291
317
|
const userRowDict = {};
|
|
292
318
|
rows.forEach((row) => {
|
|
293
319
|
const goals = getData(row);
|
|
294
320
|
if (goals) {
|
|
295
321
|
goals.forEach(({ entity, entityId, userId }) => {
|
|
322
|
+
(0, assert_1.default)(userId);
|
|
296
323
|
if (userRowDict[userId]) {
|
|
297
324
|
// 逻辑上来说同一个userId,其关联的entity和entityId必然相同,这个entity/entityId代表了对方
|
|
298
325
|
(0, assert_1.default)(userRowDict[userId].entity === entity && userRowDict[userId].entityId === entityId);
|
|
@@ -310,10 +337,28 @@ class Synchronizer {
|
|
|
310
337
|
});
|
|
311
338
|
return userRowDict;
|
|
312
339
|
};
|
|
340
|
+
const projectionMerged = (0, lodash_2.merge)(projection, toSelfEntity.projection);
|
|
341
|
+
const groupBySelfEntity = (rows) => {
|
|
342
|
+
const selfEntityIdDict = {};
|
|
343
|
+
for (const row of rows) {
|
|
344
|
+
const selfEntityInfo = toSelfEntity.getData(row, pathToSelfEntity);
|
|
345
|
+
if (selfEntityInfo) {
|
|
346
|
+
const selfEntityIds = selfEntityInfo.map((info) => {
|
|
347
|
+
(0, assert_1.default)(info.entity === this.config.self.entity);
|
|
348
|
+
return info.data.id;
|
|
349
|
+
});
|
|
350
|
+
const uniqSelfEntityIds = (0, lodash_2.uniq)(selfEntityIds);
|
|
351
|
+
(0, assert_1.default)(uniqSelfEntityIds.length === 1, '同一行数据不可能关联在两行selfEntity上');
|
|
352
|
+
selfEntityIdDict[row.id] = uniqSelfEntityIds[0];
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
return selfEntityIdDict;
|
|
356
|
+
};
|
|
313
357
|
if (!this.pushAccessMap[entity]) {
|
|
314
358
|
this.pushAccessMap[entity] = [{
|
|
315
|
-
projection,
|
|
359
|
+
projection: projectionMerged,
|
|
316
360
|
groupByUsers,
|
|
361
|
+
groupBySelfEntity,
|
|
317
362
|
getRemotePushInfo: getPushInfo,
|
|
318
363
|
endpoint: endpoint2,
|
|
319
364
|
entity,
|
|
@@ -325,6 +370,7 @@ class Synchronizer {
|
|
|
325
370
|
this.pushAccessMap[entity].push({
|
|
326
371
|
projection,
|
|
327
372
|
groupByUsers,
|
|
373
|
+
groupBySelfEntity,
|
|
328
374
|
getRemotePushInfo: getPushInfo,
|
|
329
375
|
endpoint: endpoint2,
|
|
330
376
|
entity,
|
|
@@ -351,9 +397,9 @@ class Synchronizer {
|
|
|
351
397
|
},
|
|
352
398
|
fn: async ({ ids }, context) => {
|
|
353
399
|
(0, assert_1.default)(ids.length === 1);
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
throw new types_1.
|
|
400
|
+
this.trySynchronizeOpers(context);
|
|
401
|
+
// 内部自主处理triggerData,因此不需要让triggerExecutor处理
|
|
402
|
+
throw new types_1.OakMakeSureByMySelfException();
|
|
357
403
|
}
|
|
358
404
|
};
|
|
359
405
|
return createOperTrigger;
|
|
@@ -372,26 +418,10 @@ class Synchronizer {
|
|
|
372
418
|
getSyncRoutine() {
|
|
373
419
|
return {
|
|
374
420
|
name: 'checkpoint routine for sync',
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
[types_1.TriggerDataAttribute]: {
|
|
378
|
-
$exists: true,
|
|
379
|
-
}
|
|
380
|
-
},
|
|
381
|
-
projection: {
|
|
382
|
-
id: 1,
|
|
383
|
-
[types_1.TriggerDataAttribute]: 1,
|
|
384
|
-
},
|
|
385
|
-
fn: async (context, data) => {
|
|
386
|
-
for (const ele of data) {
|
|
387
|
-
const { id, [types_1.TriggerDataAttribute]: triggerData } = ele;
|
|
388
|
-
const { cxtStr = '{}' } = triggerData;
|
|
389
|
-
await context.initialize(JSON.parse(cxtStr), true);
|
|
390
|
-
const selfEncryptInfo = await this.config.self.getSelfEncryptInfo(context);
|
|
391
|
-
this.synchronizeOpersToRemote(id, context, selfEncryptInfo);
|
|
392
|
-
}
|
|
421
|
+
routine: async (context) => {
|
|
422
|
+
this.trySynchronizeOpers(context);
|
|
393
423
|
return {};
|
|
394
|
-
}
|
|
424
|
+
},
|
|
395
425
|
};
|
|
396
426
|
}
|
|
397
427
|
getSelfEndpoint() {
|
|
@@ -405,6 +435,7 @@ class Synchronizer {
|
|
|
405
435
|
const { [OAK_SYNC_HEADER_ENTITY]: meEntity, [OAK_SYNC_HEADER_ENTITYID]: meEntityId } = headers;
|
|
406
436
|
console.log('接收到来自远端的sync数据', entity, JSON.stringify(body));
|
|
407
437
|
const successIds = [];
|
|
438
|
+
const redundantIds = [];
|
|
408
439
|
let failed;
|
|
409
440
|
// todo 这里先缓存,不考虑本身同步相关信息的更新
|
|
410
441
|
if (!this.remotePullInfoMap[entity]) {
|
|
@@ -456,8 +487,8 @@ class Synchronizer {
|
|
|
456
487
|
}
|
|
457
488
|
let maxBornAt = this.pullMaxBornAtMap[entityId];
|
|
458
489
|
const opers = body;
|
|
459
|
-
const outdatedOpers = opers.filter(ele => ele
|
|
460
|
-
const freshOpers = opers.filter(ele => ele
|
|
490
|
+
const outdatedOpers = opers.filter(ele => ele.$$seq$$ <= maxBornAt);
|
|
491
|
+
const freshOpers = opers.filter(ele => ele.$$seq$$ > maxBornAt);
|
|
461
492
|
await Promise.all([
|
|
462
493
|
// 无法严格保证推送按bornAt,所以一旦还有outdatedOpers,检查其已经被apply
|
|
463
494
|
(async () => {
|
|
@@ -478,13 +509,13 @@ class Synchronizer {
|
|
|
478
509
|
// todo 这里如果远端业务逻辑严格,发生乱序应是无关的oper,直接执行就好 by Xc
|
|
479
510
|
throw new Error(`在sync过程中发现有丢失的oper数据「${missed}」`);
|
|
480
511
|
}
|
|
481
|
-
|
|
512
|
+
redundantIds.push(...ids);
|
|
482
513
|
}
|
|
483
514
|
})(),
|
|
484
515
|
(async () => {
|
|
485
516
|
for (const freshOper of freshOpers) {
|
|
486
|
-
// freshOpers
|
|
487
|
-
const { id, targetEntity, action, data,
|
|
517
|
+
// freshOpers是按$$seq$$序产生的
|
|
518
|
+
const { id, targetEntity, action, data, $$seq$$, filter } = freshOper;
|
|
488
519
|
const ids = (0, filter_1.getRelevantIds)(filter);
|
|
489
520
|
(0, assert_1.default)(ids.length > 0);
|
|
490
521
|
try {
|
|
@@ -503,11 +534,11 @@ class Synchronizer {
|
|
|
503
534
|
$in: ids,
|
|
504
535
|
},
|
|
505
536
|
},
|
|
506
|
-
bornAt:
|
|
537
|
+
bornAt: $$seq$$,
|
|
507
538
|
};
|
|
508
539
|
await context.operate(targetEntity, operation, {});
|
|
509
540
|
successIds.push(id);
|
|
510
|
-
maxBornAt =
|
|
541
|
+
maxBornAt = $$seq$$;
|
|
511
542
|
}
|
|
512
543
|
catch (err) {
|
|
513
544
|
console.error(err);
|
|
@@ -525,6 +556,7 @@ class Synchronizer {
|
|
|
525
556
|
return {
|
|
526
557
|
successIds,
|
|
527
558
|
failed,
|
|
559
|
+
redundantIds,
|
|
528
560
|
};
|
|
529
561
|
}
|
|
530
562
|
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "oak-backend-base",
|
|
3
|
-
"version": "3.4.
|
|
3
|
+
"version": "3.4.1",
|
|
4
4
|
"description": "oak-backend-base",
|
|
5
5
|
"main": "lib/index",
|
|
6
6
|
"author": {
|
|
@@ -22,7 +22,7 @@
|
|
|
22
22
|
"node-schedule": "^2.1.0",
|
|
23
23
|
"oak-common-aspect": "^2.3.0",
|
|
24
24
|
"oak-db": "^3.2.0",
|
|
25
|
-
"oak-domain": "^4.3.
|
|
25
|
+
"oak-domain": "^4.3.1",
|
|
26
26
|
"oak-frontend-base": "^4.3.0",
|
|
27
27
|
"socket.io": "^4.7.2",
|
|
28
28
|
"socket.io-client": "^4.7.2",
|