oak-backend-base 3.2.3 → 3.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/AppLoader.d.ts +40 -37
- package/lib/AppLoader.js +467 -383
- package/lib/ClusterAppLoader.d.ts +17 -17
- package/lib/ClusterAppLoader.js +142 -142
- package/lib/DataSubscriber.d.ts +18 -18
- package/lib/DataSubscriber.js +158 -158
- package/lib/DbStore.d.ts +20 -20
- package/lib/DbStore.js +122 -122
- package/lib/Synchronizer.d.ts +23 -0
- package/lib/Synchronizer.js +281 -0
- package/lib/cluster/DataSubscriber.d.ts +23 -23
- package/lib/cluster/DataSubscriber.js +83 -83
- package/lib/cluster/env.d.ts +5 -5
- package/lib/cluster/env.js +55 -55
- package/lib/index.d.ts +3 -3
- package/lib/index.js +9 -9
- package/lib/polyfill.d.ts +3 -3
- package/lib/types/Sync.d.ts +15 -0
- package/lib/types/Sync.js +5 -0
- package/package.json +42 -42
|
@@ -0,0 +1,281 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const relationPath_1 = require("oak-domain/lib/utils/relationPath");
|
|
4
|
+
const console_1 = require("console");
|
|
5
|
+
const lodash_1 = require("oak-domain/lib/utils/lodash");
|
|
6
|
+
const OAK_SYNC_HEADER_ITEM = 'oak-sync-remote-id';
|
|
7
|
+
async function pushRequestOnChannel(channel, selfEncryptInfo) {
|
|
8
|
+
const { queue, api } = channel;
|
|
9
|
+
channel.queue = [];
|
|
10
|
+
channel.lastPushTimestamp = Date.now();
|
|
11
|
+
channel.handler = undefined;
|
|
12
|
+
const opers = queue.map(ele => ele.oper);
|
|
13
|
+
try {
|
|
14
|
+
// todo 加密
|
|
15
|
+
const res = await fetch(api, {
|
|
16
|
+
method: 'post',
|
|
17
|
+
headers: {
|
|
18
|
+
'Content-Type': 'application/json',
|
|
19
|
+
[OAK_SYNC_HEADER_ITEM]: selfEncryptInfo.id,
|
|
20
|
+
},
|
|
21
|
+
body: JSON.stringify(opers),
|
|
22
|
+
});
|
|
23
|
+
if (res.status !== 200) {
|
|
24
|
+
throw new Error(`访问api「${api}」的结果不是200。「${res.status}」`);
|
|
25
|
+
}
|
|
26
|
+
const json = await res.json();
|
|
27
|
+
const { timestamp, error } = json;
|
|
28
|
+
if (error) {
|
|
29
|
+
throw new Error(`访问api「${api}」的结果出错,是${error}`);
|
|
30
|
+
}
|
|
31
|
+
if (!channel.remoteMaxTimestamp || channel.remoteMaxTimestamp < timestamp) {
|
|
32
|
+
channel.remoteMaxTimestamp = timestamp;
|
|
33
|
+
}
|
|
34
|
+
queue.forEach((ele) => ele.resolve());
|
|
35
|
+
}
|
|
36
|
+
catch (err) {
|
|
37
|
+
queue.forEach(({ reject }) => reject(err));
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
class Synchronizer {
|
|
41
|
+
config;
|
|
42
|
+
schema;
|
|
43
|
+
selfEncryptInfo;
|
|
44
|
+
remotePullInfoMap = {};
|
|
45
|
+
remotePushChannel = {};
|
|
46
|
+
// 将产生的oper推送到远端Node。注意要尽量在本地阻止重复推送
|
|
47
|
+
async pushOper(oper, userIds, getRemoteAccessInfo, endpoint) {
|
|
48
|
+
await Promise.all(userIds.map(async (userId) => {
|
|
49
|
+
if (!this.remotePushChannel[userId]) {
|
|
50
|
+
const { url } = await getRemoteAccessInfo(userId);
|
|
51
|
+
this.remotePushChannel[userId] = {
|
|
52
|
+
// todo 规范化
|
|
53
|
+
api: `${url}/endpoint/${endpoint || 'sync'}`,
|
|
54
|
+
queue: [],
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
const channel = this.remotePushChannel[userId];
|
|
58
|
+
if (channel.remoteMaxTimestamp && oper.bornAt < channel.remoteMaxTimestamp) {
|
|
59
|
+
// 说明已经同步过了
|
|
60
|
+
return;
|
|
61
|
+
}
|
|
62
|
+
const waiter = new Promise((resolve, reject) => {
|
|
63
|
+
channel.queue.push({
|
|
64
|
+
oper,
|
|
65
|
+
resolve,
|
|
66
|
+
reject
|
|
67
|
+
});
|
|
68
|
+
});
|
|
69
|
+
if (!channel.handler) {
|
|
70
|
+
channel.handler = setTimeout(async () => {
|
|
71
|
+
(0, console_1.assert)(this.selfEncryptInfo);
|
|
72
|
+
await pushRequestOnChannel(channel, this.selfEncryptInfo);
|
|
73
|
+
}, 1000); // 1秒钟集中同步一次
|
|
74
|
+
}
|
|
75
|
+
await waiter;
|
|
76
|
+
}));
|
|
77
|
+
}
|
|
78
|
+
async loadPublicKey() {
|
|
79
|
+
this.selfEncryptInfo = await this.config.self.getSelfEncryptInfo();
|
|
80
|
+
}
|
|
81
|
+
makeCreateOperTrigger() {
|
|
82
|
+
const { config } = this;
|
|
83
|
+
const { remotes, self } = config;
|
|
84
|
+
// 根据remotes定义,建立从entity到需要同步的远端结点信息的Map
|
|
85
|
+
const pushAccessMap = {};
|
|
86
|
+
remotes.forEach((remote) => {
|
|
87
|
+
const { getRemotePushInfo, syncEntities, endpoint } = remote;
|
|
88
|
+
const pushEntityDefs = syncEntities.filter(ele => ele.direction === 'push');
|
|
89
|
+
const pushEntities = pushEntityDefs.map(ele => ele.entity);
|
|
90
|
+
pushEntities.forEach((entity) => {
|
|
91
|
+
const def = syncEntities.find(ele => ele.entity === entity);
|
|
92
|
+
const { path, relationName, recursive } = def;
|
|
93
|
+
const { projection, getData } = relationName ? (0, relationPath_1.destructRelationPath)(this.schema, entity, path, {
|
|
94
|
+
relation: {
|
|
95
|
+
name: relationName,
|
|
96
|
+
}
|
|
97
|
+
}, recursive) : (0, relationPath_1.destructDirectPath)(this.schema, entity, path, recursive);
|
|
98
|
+
const getUserIds = (rows) => {
|
|
99
|
+
const urs = rows.map((row) => getData(row)).flat();
|
|
100
|
+
return (0, lodash_1.uniq)(urs.map(ele => ele.userId));
|
|
101
|
+
};
|
|
102
|
+
if (!pushAccessMap[entity]) {
|
|
103
|
+
pushAccessMap[entity] = [{
|
|
104
|
+
projection,
|
|
105
|
+
getUserIds,
|
|
106
|
+
getRemotePushInfo,
|
|
107
|
+
endpoint,
|
|
108
|
+
}];
|
|
109
|
+
}
|
|
110
|
+
else {
|
|
111
|
+
pushAccessMap[entity].push({
|
|
112
|
+
projection,
|
|
113
|
+
getUserIds,
|
|
114
|
+
getRemotePushInfo,
|
|
115
|
+
endpoint,
|
|
116
|
+
});
|
|
117
|
+
}
|
|
118
|
+
});
|
|
119
|
+
});
|
|
120
|
+
const pushEntities = Object.keys(pushAccessMap);
|
|
121
|
+
// push相关联的entity,在发生操作时,需要将operation推送到远端
|
|
122
|
+
const createOperTrigger = {
|
|
123
|
+
name: 'push oper to remote node',
|
|
124
|
+
entity: 'oper',
|
|
125
|
+
action: 'create',
|
|
126
|
+
when: 'commit',
|
|
127
|
+
strict: 'makeSure',
|
|
128
|
+
check: (operation) => {
|
|
129
|
+
const { data } = operation;
|
|
130
|
+
return pushEntities.includes(data.targetEntity);
|
|
131
|
+
},
|
|
132
|
+
fn: async ({ ids }, context) => {
|
|
133
|
+
(0, console_1.assert)(ids.length === 1);
|
|
134
|
+
const [oper] = await context.select('oper', {
|
|
135
|
+
data: {
|
|
136
|
+
id: 1,
|
|
137
|
+
action: 1,
|
|
138
|
+
data: 1,
|
|
139
|
+
targetEntity: 1,
|
|
140
|
+
operatorId: 1,
|
|
141
|
+
operEntity$oper: {
|
|
142
|
+
$entity: 'operEntity',
|
|
143
|
+
data: {
|
|
144
|
+
id: 1,
|
|
145
|
+
entity: 1,
|
|
146
|
+
entityId: 1,
|
|
147
|
+
},
|
|
148
|
+
},
|
|
149
|
+
$$createAt$$: 1,
|
|
150
|
+
},
|
|
151
|
+
filter: {
|
|
152
|
+
id: ids[0],
|
|
153
|
+
}
|
|
154
|
+
}, { dontCollect: true });
|
|
155
|
+
const { operatorId, targetEntity, operEntity$oper: operEntities } = oper;
|
|
156
|
+
const entityIds = operEntities.map(ele => ele.entityId);
|
|
157
|
+
const pushNodes = pushAccessMap[targetEntity];
|
|
158
|
+
if (pushNodes) {
|
|
159
|
+
await Promise.all(pushNodes.map(async ({ projection, getUserIds, getRemotePushInfo: getRemoteAccessInfo, endpoint }) => {
|
|
160
|
+
const rows = await context.select(targetEntity, {
|
|
161
|
+
data: {
|
|
162
|
+
id: 1,
|
|
163
|
+
...projection,
|
|
164
|
+
},
|
|
165
|
+
filter: {
|
|
166
|
+
id: {
|
|
167
|
+
$in: entityIds,
|
|
168
|
+
},
|
|
169
|
+
},
|
|
170
|
+
}, { dontCollect: true });
|
|
171
|
+
// userId就是需要发送给远端的user,但是要将本次操作的user过滤掉(他是操作的产生者)
|
|
172
|
+
const userIds = getUserIds(rows).filter((ele) => ele !== operatorId);
|
|
173
|
+
if (userIds.length > 0) {
|
|
174
|
+
await this.pushOper(oper, userIds, getRemoteAccessInfo, endpoint);
|
|
175
|
+
}
|
|
176
|
+
return undefined;
|
|
177
|
+
}));
|
|
178
|
+
return entityIds.length * pushNodes.length;
|
|
179
|
+
}
|
|
180
|
+
return 0;
|
|
181
|
+
}
|
|
182
|
+
};
|
|
183
|
+
return createOperTrigger;
|
|
184
|
+
}
|
|
185
|
+
constructor(config, schema) {
|
|
186
|
+
this.config = config;
|
|
187
|
+
this.schema = schema;
|
|
188
|
+
this.loadPublicKey();
|
|
189
|
+
}
|
|
190
|
+
/**
|
|
191
|
+
* 根据sync的定义,生成对应的 commit triggers
|
|
192
|
+
* @returns
|
|
193
|
+
*/
|
|
194
|
+
getSyncTriggers() {
|
|
195
|
+
return [this.makeCreateOperTrigger()];
|
|
196
|
+
}
|
|
197
|
+
async checkOperationConsistent(entity, ids, bornAt) {
|
|
198
|
+
}
|
|
199
|
+
getSelfEndpoint() {
|
|
200
|
+
return {
|
|
201
|
+
name: this.config.self.endpoint || 'sync',
|
|
202
|
+
method: 'post',
|
|
203
|
+
params: ['entity'],
|
|
204
|
+
fn: async (context, params, headers, req, body) => {
|
|
205
|
+
// body中是传过来的oper数组信息
|
|
206
|
+
const { entity } = params;
|
|
207
|
+
const { [OAK_SYNC_HEADER_ITEM]: id } = headers;
|
|
208
|
+
try {
|
|
209
|
+
// todo 这里先缓存,不考虑本身同步相关信息的更新
|
|
210
|
+
if (!this.remotePullInfoMap[entity]) {
|
|
211
|
+
this.remotePullInfoMap[entity] = {};
|
|
212
|
+
}
|
|
213
|
+
if (!this.remotePullInfoMap[entity][id]) {
|
|
214
|
+
const { getRemotePullInfo } = this.config.remotes.find(ele => ele.entity === entity);
|
|
215
|
+
this.remotePullInfoMap[entity][id] = await getRemotePullInfo(id);
|
|
216
|
+
}
|
|
217
|
+
const pullInfo = this.remotePullInfoMap[entity][id];
|
|
218
|
+
const { userId, algorithm, publicKey } = pullInfo;
|
|
219
|
+
// todo 解密
|
|
220
|
+
// 如果本次同步中有bornAt比本用户操作的最大的bornAt要小,则说明是重复更新,直接返回
|
|
221
|
+
const [maxHisOper] = await context.select('oper', {
|
|
222
|
+
data: {
|
|
223
|
+
id: 1,
|
|
224
|
+
bornAt: 1,
|
|
225
|
+
},
|
|
226
|
+
filter: {
|
|
227
|
+
operatorId: userId,
|
|
228
|
+
},
|
|
229
|
+
sorter: [
|
|
230
|
+
{
|
|
231
|
+
$attr: {
|
|
232
|
+
bornAt: 1,
|
|
233
|
+
},
|
|
234
|
+
$direction: 'desc',
|
|
235
|
+
},
|
|
236
|
+
],
|
|
237
|
+
indexFrom: 0,
|
|
238
|
+
count: 1,
|
|
239
|
+
}, { dontCollect: true });
|
|
240
|
+
const opers = body;
|
|
241
|
+
const legalOpers = maxHisOper ? opers.filter(ele => ele.bornAt > maxHisOper.bornAt) : opers;
|
|
242
|
+
if (legalOpers.length > 0) {
|
|
243
|
+
for (const oper of legalOpers) {
|
|
244
|
+
const { id, targetEntity, action, data, bornAt, operEntity$oper: operEntities } = oper;
|
|
245
|
+
const ids = operEntities.map(ele => ele.id);
|
|
246
|
+
this.checkOperationConsistent(targetEntity, ids, bornAt);
|
|
247
|
+
const operation = {
|
|
248
|
+
id,
|
|
249
|
+
data,
|
|
250
|
+
action,
|
|
251
|
+
filter: {
|
|
252
|
+
id: {
|
|
253
|
+
$in: ids,
|
|
254
|
+
},
|
|
255
|
+
},
|
|
256
|
+
bornAt: bornAt,
|
|
257
|
+
};
|
|
258
|
+
await context.operate(targetEntity, operation, {});
|
|
259
|
+
}
|
|
260
|
+
// 因为legalOpers就是排好序的,所以直接返回最后一项的bornAt
|
|
261
|
+
return {
|
|
262
|
+
timestamp: legalOpers[legalOpers.length - 1].bornAt,
|
|
263
|
+
};
|
|
264
|
+
}
|
|
265
|
+
else {
|
|
266
|
+
(0, console_1.assert)(maxHisOper);
|
|
267
|
+
return {
|
|
268
|
+
timestamp: maxHisOper.bornAt,
|
|
269
|
+
};
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
catch (err) {
|
|
273
|
+
return {
|
|
274
|
+
error: JSON.stringify(err),
|
|
275
|
+
};
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
};
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
exports.default = Synchronizer;
|
|
@@ -1,23 +1,23 @@
|
|
|
1
|
-
import { EntityDict, OperateOption, OpRecord } from 'oak-domain/lib/types';
|
|
2
|
-
import { EntityDict as BaseEntityDict } from 'oak-domain/lib/base-app-domain';
|
|
3
|
-
import { BackendRuntimeContext } from 'oak-frontend-base';
|
|
4
|
-
import { Namespace } from 'socket.io';
|
|
5
|
-
/**
|
|
6
|
-
* 集群行为备忘:
|
|
7
|
-
* 当socket.io通过adapter在集群间通信时,测试行为如下(测试环境为pm2 + cluster-adapter,其它adpater启用时需要再测一次):
|
|
8
|
-
* 1)当client连接到node1并join room1时,只有node1上会有create room事件(room结构本身在结点间并不共享)
|
|
9
|
-
* 2)当某一个node执行 .adapter.to('room1').emit()时,连接到任一结点的client均能收到消息(但使用room可以实现跨结点推包)
|
|
10
|
-
* 3) serverSideEmit执行时如果有callback,而不是所有的接收者都执行callback的话,会抛出一个异常(意味着不需要本结点来判定是否收到全部的返回值了)
|
|
11
|
-
*/
|
|
12
|
-
export default class DataSubscriber<ED extends EntityDict & BaseEntityDict, Context extends BackendRuntimeContext<ED>> {
|
|
13
|
-
private ns;
|
|
14
|
-
private nsServer?;
|
|
15
|
-
private contextBuilder;
|
|
16
|
-
constructor(ns: Namespace, contextBuilder: (scene?: string) => Promise<Context>, nsServer?: Namespace);
|
|
17
|
-
/**
|
|
18
|
-
* 来自外部的socket连接,监听数据变化
|
|
19
|
-
*/
|
|
20
|
-
private startup;
|
|
21
|
-
publishEvent(event: string, records: OpRecord<ED>[], sid?: string): void;
|
|
22
|
-
publishVolatileTrigger(entity: keyof ED, name: string, instanceNumber: string, ids: string[], cxtStr: string, option: OperateOption): void;
|
|
23
|
-
}
|
|
1
|
+
import { EntityDict, OperateOption, OpRecord } from 'oak-domain/lib/types';
|
|
2
|
+
import { EntityDict as BaseEntityDict } from 'oak-domain/lib/base-app-domain';
|
|
3
|
+
import { BackendRuntimeContext } from 'oak-frontend-base/lib/context/BackendRuntimeContext';
|
|
4
|
+
import { Namespace } from 'socket.io';
|
|
5
|
+
/**
|
|
6
|
+
* 集群行为备忘:
|
|
7
|
+
* 当socket.io通过adapter在集群间通信时,测试行为如下(测试环境为pm2 + cluster-adapter,其它adpater启用时需要再测一次):
|
|
8
|
+
* 1)当client连接到node1并join room1时,只有node1上会有create room事件(room结构本身在结点间并不共享)
|
|
9
|
+
* 2)当某一个node执行 .adapter.to('room1').emit()时,连接到任一结点的client均能收到消息(但使用room可以实现跨结点推包)
|
|
10
|
+
* 3) serverSideEmit执行时如果有callback,而不是所有的接收者都执行callback的话,会抛出一个异常(意味着不需要本结点来判定是否收到全部的返回值了)
|
|
11
|
+
*/
|
|
12
|
+
export default class DataSubscriber<ED extends EntityDict & BaseEntityDict, Context extends BackendRuntimeContext<ED>> {
|
|
13
|
+
private ns;
|
|
14
|
+
private nsServer?;
|
|
15
|
+
private contextBuilder;
|
|
16
|
+
constructor(ns: Namespace, contextBuilder: (scene?: string) => Promise<Context>, nsServer?: Namespace);
|
|
17
|
+
/**
|
|
18
|
+
* 来自外部的socket连接,监听数据变化
|
|
19
|
+
*/
|
|
20
|
+
private startup;
|
|
21
|
+
publishEvent(event: string, records: OpRecord<ED>[], sid?: string): void;
|
|
22
|
+
publishVolatileTrigger(entity: keyof ED, name: string, instanceNumber: string, ids: string[], cxtStr: string, option: OperateOption): void;
|
|
23
|
+
}
|
|
@@ -1,83 +1,83 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const env_1 = require("./env");
|
|
4
|
-
const console_1 = require("console");
|
|
5
|
-
/**
|
|
6
|
-
* 集群行为备忘:
|
|
7
|
-
* 当socket.io通过adapter在集群间通信时,测试行为如下(测试环境为pm2 + cluster-adapter,其它adpater启用时需要再测一次):
|
|
8
|
-
* 1)当client连接到node1并join room1时,只有node1上会有create room事件(room结构本身在结点间并不共享)
|
|
9
|
-
* 2)当某一个node执行 .adapter.to('room1').emit()时,连接到任一结点的client均能收到消息(但使用room可以实现跨结点推包)
|
|
10
|
-
* 3) serverSideEmit执行时如果有callback,而不是所有的接收者都执行callback的话,会抛出一个异常(意味着不需要本结点来判定是否收到全部的返回值了)
|
|
11
|
-
*/
|
|
12
|
-
class DataSubscriber {
|
|
13
|
-
ns;
|
|
14
|
-
nsServer;
|
|
15
|
-
contextBuilder;
|
|
16
|
-
constructor(ns, contextBuilder, nsServer) {
|
|
17
|
-
this.ns = ns;
|
|
18
|
-
this.nsServer = nsServer;
|
|
19
|
-
this.contextBuilder = contextBuilder;
|
|
20
|
-
this.startup();
|
|
21
|
-
}
|
|
22
|
-
/**
|
|
23
|
-
* 来自外部的socket连接,监听数据变化
|
|
24
|
-
*/
|
|
25
|
-
startup() {
|
|
26
|
-
this.ns.on('connection', async (socket) => {
|
|
27
|
-
try {
|
|
28
|
-
const { instanceId } = (0, env_1.getClusterInfo)();
|
|
29
|
-
// console.log('on connection', instanceId);
|
|
30
|
-
socket.on('sub', async (events) => {
|
|
31
|
-
events.forEach((event) => socket.join(event));
|
|
32
|
-
});
|
|
33
|
-
socket.on('unsub', (events) => {
|
|
34
|
-
// console.log('instance:', process.env.NODE_APP_INSTANCE, 'on unsub', JSON.stringify(ids));
|
|
35
|
-
events.forEach((id) => {
|
|
36
|
-
socket.leave(id);
|
|
37
|
-
});
|
|
38
|
-
});
|
|
39
|
-
}
|
|
40
|
-
catch (err) {
|
|
41
|
-
socket.emit('error', err.toString());
|
|
42
|
-
}
|
|
43
|
-
});
|
|
44
|
-
if (this.nsServer) {
|
|
45
|
-
this.nsServer.on('connection', async (socket) => {
|
|
46
|
-
try {
|
|
47
|
-
const { instanceId } = (0, env_1.getClusterInfo)();
|
|
48
|
-
console.log('on nsServer connection', instanceId);
|
|
49
|
-
socket.on('sub', async (events) => {
|
|
50
|
-
console.log('on nsServer sub', instanceId, events);
|
|
51
|
-
events.forEach((event) => socket.join(event));
|
|
52
|
-
});
|
|
53
|
-
socket.on('unsub', (events) => {
|
|
54
|
-
// console.log('instance:', process.env.NODE_APP_INSTANCE, 'on unsub', JSON.stringify(ids));
|
|
55
|
-
events.forEach((id) => {
|
|
56
|
-
socket.leave(id);
|
|
57
|
-
});
|
|
58
|
-
});
|
|
59
|
-
}
|
|
60
|
-
catch (err) {
|
|
61
|
-
socket.emit('error', err.toString());
|
|
62
|
-
}
|
|
63
|
-
});
|
|
64
|
-
}
|
|
65
|
-
}
|
|
66
|
-
publishEvent(event, records, sid) {
|
|
67
|
-
const { instanceId } = (0, env_1.getClusterInfo)();
|
|
68
|
-
// console.log('publishEvent', instanceId);
|
|
69
|
-
if (sid) {
|
|
70
|
-
this.ns.to(event).except(sid).emit('data', records);
|
|
71
|
-
}
|
|
72
|
-
else {
|
|
73
|
-
this.ns.to(event).emit('data', records);
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
publishVolatileTrigger(entity, name, instanceNumber, ids, cxtStr, option) {
|
|
77
|
-
const { instanceId } = (0, env_1.getClusterInfo)();
|
|
78
|
-
// console.log('publishVolatileTrigger', instanceId, instanceNumber);
|
|
79
|
-
(0, console_1.assert)(this.nsServer);
|
|
80
|
-
this.nsServer.to(`${name}-${instanceNumber}`).emit('data', entity, name, ids, cxtStr, option);
|
|
81
|
-
}
|
|
82
|
-
}
|
|
83
|
-
exports.default = DataSubscriber;
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const env_1 = require("./env");
|
|
4
|
+
const console_1 = require("console");
|
|
5
|
+
/**
|
|
6
|
+
* 集群行为备忘:
|
|
7
|
+
* 当socket.io通过adapter在集群间通信时,测试行为如下(测试环境为pm2 + cluster-adapter,其它adpater启用时需要再测一次):
|
|
8
|
+
* 1)当client连接到node1并join room1时,只有node1上会有create room事件(room结构本身在结点间并不共享)
|
|
9
|
+
* 2)当某一个node执行 .adapter.to('room1').emit()时,连接到任一结点的client均能收到消息(但使用room可以实现跨结点推包)
|
|
10
|
+
* 3) serverSideEmit执行时如果有callback,而不是所有的接收者都执行callback的话,会抛出一个异常(意味着不需要本结点来判定是否收到全部的返回值了)
|
|
11
|
+
*/
|
|
12
|
+
class DataSubscriber {
|
|
13
|
+
ns;
|
|
14
|
+
nsServer;
|
|
15
|
+
contextBuilder;
|
|
16
|
+
constructor(ns, contextBuilder, nsServer) {
|
|
17
|
+
this.ns = ns;
|
|
18
|
+
this.nsServer = nsServer;
|
|
19
|
+
this.contextBuilder = contextBuilder;
|
|
20
|
+
this.startup();
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* 来自外部的socket连接,监听数据变化
|
|
24
|
+
*/
|
|
25
|
+
startup() {
|
|
26
|
+
this.ns.on('connection', async (socket) => {
|
|
27
|
+
try {
|
|
28
|
+
const { instanceId } = (0, env_1.getClusterInfo)();
|
|
29
|
+
// console.log('on connection', instanceId);
|
|
30
|
+
socket.on('sub', async (events) => {
|
|
31
|
+
events.forEach((event) => socket.join(event));
|
|
32
|
+
});
|
|
33
|
+
socket.on('unsub', (events) => {
|
|
34
|
+
// console.log('instance:', process.env.NODE_APP_INSTANCE, 'on unsub', JSON.stringify(ids));
|
|
35
|
+
events.forEach((id) => {
|
|
36
|
+
socket.leave(id);
|
|
37
|
+
});
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
catch (err) {
|
|
41
|
+
socket.emit('error', err.toString());
|
|
42
|
+
}
|
|
43
|
+
});
|
|
44
|
+
if (this.nsServer) {
|
|
45
|
+
this.nsServer.on('connection', async (socket) => {
|
|
46
|
+
try {
|
|
47
|
+
const { instanceId } = (0, env_1.getClusterInfo)();
|
|
48
|
+
console.log('on nsServer connection', instanceId);
|
|
49
|
+
socket.on('sub', async (events) => {
|
|
50
|
+
console.log('on nsServer sub', instanceId, events);
|
|
51
|
+
events.forEach((event) => socket.join(event));
|
|
52
|
+
});
|
|
53
|
+
socket.on('unsub', (events) => {
|
|
54
|
+
// console.log('instance:', process.env.NODE_APP_INSTANCE, 'on unsub', JSON.stringify(ids));
|
|
55
|
+
events.forEach((id) => {
|
|
56
|
+
socket.leave(id);
|
|
57
|
+
});
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
catch (err) {
|
|
61
|
+
socket.emit('error', err.toString());
|
|
62
|
+
}
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
publishEvent(event, records, sid) {
|
|
67
|
+
const { instanceId } = (0, env_1.getClusterInfo)();
|
|
68
|
+
// console.log('publishEvent', instanceId);
|
|
69
|
+
if (sid) {
|
|
70
|
+
this.ns.to(event).except(sid).emit('data', records, event);
|
|
71
|
+
}
|
|
72
|
+
else {
|
|
73
|
+
this.ns.to(event).emit('data', records, event);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
publishVolatileTrigger(entity, name, instanceNumber, ids, cxtStr, option) {
|
|
77
|
+
const { instanceId } = (0, env_1.getClusterInfo)();
|
|
78
|
+
// console.log('publishVolatileTrigger', instanceId, instanceNumber);
|
|
79
|
+
(0, console_1.assert)(this.nsServer);
|
|
80
|
+
this.nsServer.to(`${name}-${instanceNumber}`).emit('data', entity, name, ids, cxtStr, option);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
exports.default = DataSubscriber;
|
package/lib/cluster/env.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { ClusterInfo } from 'oak-domain/lib/types/Cluster';
|
|
2
|
-
/**
|
|
3
|
-
* 得到当前环境的集群信息
|
|
4
|
-
*/
|
|
5
|
-
export declare function getClusterInfo(): ClusterInfo;
|
|
1
|
+
import { ClusterInfo } from 'oak-domain/lib/types/Cluster';
|
|
2
|
+
/**
|
|
3
|
+
* 得到当前环境的集群信息
|
|
4
|
+
*/
|
|
5
|
+
export declare function getClusterInfo(): ClusterInfo;
|
package/lib/cluster/env.js
CHANGED
|
@@ -1,55 +1,55 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.getClusterInfo = void 0;
|
|
4
|
-
function getProcessEnvOption(option) {
|
|
5
|
-
if (process.env.hasOwnProperty(option)) {
|
|
6
|
-
return process.env[option];
|
|
7
|
-
}
|
|
8
|
-
const lowerCase = option.toLowerCase();
|
|
9
|
-
if (process.env.hasOwnProperty(lowerCase)) {
|
|
10
|
-
return process.env[lowerCase];
|
|
11
|
-
}
|
|
12
|
-
const upperCase = option.toUpperCase();
|
|
13
|
-
if (process.env.hasOwnProperty(upperCase)) {
|
|
14
|
-
return process.env[upperCase];
|
|
15
|
-
}
|
|
16
|
-
}
|
|
17
|
-
// 初始化判定集群状态,需要在环境变量中注入两个值
|
|
18
|
-
/** pm2注入方法,见:https://pm2.fenxianglu.cn/docs/general/environment-variables
|
|
19
|
-
* apps: [
|
|
20
|
-
{
|
|
21
|
-
name: 'xxx',
|
|
22
|
-
script: "xxxjs",
|
|
23
|
-
instances: "2",
|
|
24
|
-
increment_var: "OAK_INSTANCE_ID",
|
|
25
|
-
env: {
|
|
26
|
-
OAK_INSTANCE_CNT: 9,
|
|
27
|
-
OAK_INSTANCE_ID: 8,
|
|
28
|
-
}
|
|
29
|
-
},
|
|
30
|
-
],
|
|
31
|
-
**/
|
|
32
|
-
function initialize() {
|
|
33
|
-
const instanceIdStr = getProcessEnvOption('OAK_INSTANCE_ID');
|
|
34
|
-
if (instanceIdStr) {
|
|
35
|
-
const usingCluster = true;
|
|
36
|
-
const instanceId = parseInt(instanceIdStr);
|
|
37
|
-
const instanceCount = parseInt(getProcessEnvOption('OAK_INSTANCE_CNT'));
|
|
38
|
-
return {
|
|
39
|
-
usingCluster,
|
|
40
|
-
instanceCount,
|
|
41
|
-
instanceId,
|
|
42
|
-
};
|
|
43
|
-
}
|
|
44
|
-
return {
|
|
45
|
-
usingCluster: false,
|
|
46
|
-
};
|
|
47
|
-
}
|
|
48
|
-
const MyClusterInfo = initialize();
|
|
49
|
-
/**
|
|
50
|
-
* 得到当前环境的集群信息
|
|
51
|
-
*/
|
|
52
|
-
function getClusterInfo() {
|
|
53
|
-
return MyClusterInfo;
|
|
54
|
-
}
|
|
55
|
-
exports.getClusterInfo = getClusterInfo;
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.getClusterInfo = void 0;
|
|
4
|
+
function getProcessEnvOption(option) {
|
|
5
|
+
if (process.env.hasOwnProperty(option)) {
|
|
6
|
+
return process.env[option];
|
|
7
|
+
}
|
|
8
|
+
const lowerCase = option.toLowerCase();
|
|
9
|
+
if (process.env.hasOwnProperty(lowerCase)) {
|
|
10
|
+
return process.env[lowerCase];
|
|
11
|
+
}
|
|
12
|
+
const upperCase = option.toUpperCase();
|
|
13
|
+
if (process.env.hasOwnProperty(upperCase)) {
|
|
14
|
+
return process.env[upperCase];
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
// 初始化判定集群状态,需要在环境变量中注入两个值
|
|
18
|
+
/** pm2注入方法,见:https://pm2.fenxianglu.cn/docs/general/environment-variables
|
|
19
|
+
* apps: [
|
|
20
|
+
{
|
|
21
|
+
name: 'xxx',
|
|
22
|
+
script: "xxxjs",
|
|
23
|
+
instances: "2",
|
|
24
|
+
increment_var: "OAK_INSTANCE_ID",
|
|
25
|
+
env: {
|
|
26
|
+
OAK_INSTANCE_CNT: 9,
|
|
27
|
+
OAK_INSTANCE_ID: 8,
|
|
28
|
+
}
|
|
29
|
+
},
|
|
30
|
+
],
|
|
31
|
+
**/
|
|
32
|
+
function initialize() {
|
|
33
|
+
const instanceIdStr = getProcessEnvOption('OAK_INSTANCE_ID');
|
|
34
|
+
if (instanceIdStr) {
|
|
35
|
+
const usingCluster = true;
|
|
36
|
+
const instanceId = parseInt(instanceIdStr);
|
|
37
|
+
const instanceCount = parseInt(getProcessEnvOption('OAK_INSTANCE_CNT'));
|
|
38
|
+
return {
|
|
39
|
+
usingCluster,
|
|
40
|
+
instanceCount,
|
|
41
|
+
instanceId,
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
return {
|
|
45
|
+
usingCluster: false,
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
const MyClusterInfo = initialize();
|
|
49
|
+
/**
|
|
50
|
+
* 得到当前环境的集群信息
|
|
51
|
+
*/
|
|
52
|
+
function getClusterInfo() {
|
|
53
|
+
return MyClusterInfo;
|
|
54
|
+
}
|
|
55
|
+
exports.getClusterInfo = getClusterInfo;
|
package/lib/index.d.ts
CHANGED
|
@@ -1,3 +1,3 @@
|
|
|
1
|
-
export { AppLoader } from './AppLoader';
|
|
2
|
-
export { ClusterAppLoader } from './ClusterAppLoader';
|
|
3
|
-
export * from './cluster/env';
|
|
1
|
+
export { AppLoader } from './AppLoader';
|
|
2
|
+
export { ClusterAppLoader } from './ClusterAppLoader';
|
|
3
|
+
export * from './cluster/env';
|
package/lib/index.js
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.ClusterAppLoader = exports.AppLoader = void 0;
|
|
4
|
-
const tslib_1 = require("tslib");
|
|
5
|
-
var AppLoader_1 = require("./AppLoader");
|
|
6
|
-
Object.defineProperty(exports, "AppLoader", { enumerable: true, get: function () { return AppLoader_1.AppLoader; } });
|
|
7
|
-
var ClusterAppLoader_1 = require("./ClusterAppLoader");
|
|
8
|
-
Object.defineProperty(exports, "ClusterAppLoader", { enumerable: true, get: function () { return ClusterAppLoader_1.ClusterAppLoader; } });
|
|
9
|
-
tslib_1.__exportStar(require("./cluster/env"), exports);
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ClusterAppLoader = exports.AppLoader = void 0;
|
|
4
|
+
const tslib_1 = require("tslib");
|
|
5
|
+
var AppLoader_1 = require("./AppLoader");
|
|
6
|
+
Object.defineProperty(exports, "AppLoader", { enumerable: true, get: function () { return AppLoader_1.AppLoader; } });
|
|
7
|
+
var ClusterAppLoader_1 = require("./ClusterAppLoader");
|
|
8
|
+
Object.defineProperty(exports, "ClusterAppLoader", { enumerable: true, get: function () { return ClusterAppLoader_1.ClusterAppLoader; } });
|
|
9
|
+
tslib_1.__exportStar(require("./cluster/env"), exports);
|
package/lib/polyfill.d.ts
CHANGED
|
@@ -1,3 +1,3 @@
|
|
|
1
|
-
export type GenerateIdOption = {
|
|
2
|
-
shuffle?: boolean;
|
|
3
|
-
};
|
|
1
|
+
export type GenerateIdOption = {
|
|
2
|
+
shuffle?: boolean;
|
|
3
|
+
};
|