antietcd 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +501 -0
- package/anticli.js +263 -0
- package/anticluster.js +526 -0
- package/antietcd-app.js +122 -0
- package/antietcd.d.ts +155 -0
- package/antietcd.js +552 -0
- package/antipersistence.js +138 -0
- package/common.js +38 -0
- package/etctree.js +875 -0
- package/package.json +55 -0
- package/stable-stringify.js +78 -0
package/anticluster.js
ADDED
|
@@ -0,0 +1,526 @@
|
|
|
1
|
+
// Clustering for AntiEtcd
|
|
2
|
+
// (c) Vitaliy Filippov, 2024
|
|
3
|
+
// License: Mozilla Public License 2.0 or Vitastor Network Public License 1.1
|
|
4
|
+
|
|
5
|
+
const ws = require('ws');
|
|
6
|
+
|
|
7
|
+
const TinyRaft = require('tinyraft');
|
|
8
|
+
const { runCallbacks, RequestError } = require('./common.js');
|
|
9
|
+
|
|
10
|
+
const LEADER_MISMATCH = 'raft leader/term mismatch';
|
|
11
|
+
|
|
12
|
+
const LEADER_ONLY = 1;
|
|
13
|
+
const NO_WAIT_QUORUM = 2;
|
|
14
|
+
const READ_FROM_FOLLOWER = 4;
|
|
15
|
+
|
|
16
|
+
class AntiCluster
|
|
17
|
+
{
|
|
18
|
+
constructor(antietcd)
|
|
19
|
+
{
|
|
20
|
+
this.antietcd = antietcd;
|
|
21
|
+
this.cfg = antietcd.cfg;
|
|
22
|
+
this.cluster_connections = {};
|
|
23
|
+
this.last_request_id = 1;
|
|
24
|
+
this.subrequests = {};
|
|
25
|
+
this.synced = false;
|
|
26
|
+
this.wait_sync = [];
|
|
27
|
+
if (!this.cfg.node_id || !this.cfg.cluster_key)
|
|
28
|
+
{
|
|
29
|
+
throw new Error('node_id and cluster_key are required in configuration if cluster is set');
|
|
30
|
+
}
|
|
31
|
+
if (!(this.cfg.cluster instanceof Object))
|
|
32
|
+
{
|
|
33
|
+
this.cfg.cluster = (''+this.cfg.cluster).trim().split(/[\s,]*,[\s,]*/)
|
|
34
|
+
.reduce((a, c) => { c = c.split(/\s*=\s*/); a[c[0]] = c[1]; return a; }, {});
|
|
35
|
+
}
|
|
36
|
+
this.raft = new TinyRaft({
|
|
37
|
+
nodes: Object.keys(this.cfg.cluster),
|
|
38
|
+
nodeId: this.cfg.node_id,
|
|
39
|
+
heartbeatTimeout: this.cfg.heartbeat_timeout,
|
|
40
|
+
electionTimeout: this.cfg.election_timeout,
|
|
41
|
+
leaderPriority: this.cfg.leader_priority||undefined,
|
|
42
|
+
initialTerm: this.antietcd.stored_term,
|
|
43
|
+
send: (to, msg) => this._sendRaftMessage(to, msg),
|
|
44
|
+
});
|
|
45
|
+
this.raft.on('change', (event) => this._handleRaftChange(event));
|
|
46
|
+
this.raft.start();
|
|
47
|
+
// Connect to all nodes and reconnect forever
|
|
48
|
+
for (const node_id in this.cfg.cluster)
|
|
49
|
+
{
|
|
50
|
+
this.connectToNode(node_id);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
connectToNode(node_id)
|
|
55
|
+
{
|
|
56
|
+
if (node_id != this.cfg.node_id && this.cfg.cluster[node_id] &&
|
|
57
|
+
(!this.cluster_connections[node_id] || !this.antietcd.clients[this.cluster_connections[node_id]]))
|
|
58
|
+
{
|
|
59
|
+
const socket = new ws.WebSocket(this.cfg.cluster[node_id].replace(/^http/, 'ws'), this.antietcd.tls);
|
|
60
|
+
const client_id = this.antietcd._startWebsocket(socket, () => setTimeout(() => this.connectToNode(node_id), this.cfg.reconnect_interval||1000));
|
|
61
|
+
this.cluster_connections[node_id] = client_id;
|
|
62
|
+
socket.on('open', () =>
|
|
63
|
+
{
|
|
64
|
+
if (this.antietcd.clients[client_id])
|
|
65
|
+
{
|
|
66
|
+
this.antietcd.clients[client_id].ready = true;
|
|
67
|
+
this.antietcd.clients[client_id].raft_node_id = node_id;
|
|
68
|
+
this.antietcd.clients[client_id].addr = socket._socket.remoteAddress+':'+socket._socket.remotePort;
|
|
69
|
+
socket.send(JSON.stringify({ identify: { key: this.cfg.cluster_key, node_id: this.cfg.node_id } }));
|
|
70
|
+
this.raft.start();
|
|
71
|
+
}
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
_peerRequest(client, request, timeout)
|
|
77
|
+
{
|
|
78
|
+
const request_id = this.last_request_id++;
|
|
79
|
+
request.request_id = request_id;
|
|
80
|
+
client.socket.send(JSON.stringify(request));
|
|
81
|
+
const req = this.subrequests[request_id] = { client_id: client.id };
|
|
82
|
+
const promise = new Promise(ok => req.cb = ok);
|
|
83
|
+
req.timer_id = setTimeout(() => this._completeRequest(null, request_id, { error: 'timeout' }), timeout);
|
|
84
|
+
return promise;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
async replicateChange(msg)
|
|
88
|
+
{
|
|
89
|
+
if (this.raft.state !== TinyRaft.LEADER)
|
|
90
|
+
{
|
|
91
|
+
return;
|
|
92
|
+
}
|
|
93
|
+
const mod_revision = this.antietcd.etctree.mod_revision;
|
|
94
|
+
await this._requestFollowers({ replicate: msg }, this.cfg.replication_timeout||1000);
|
|
95
|
+
// We have a guarantee that all revisions before mod_revision are applied by followers,
|
|
96
|
+
// because replication messages are either processed synchronously or serialized in
|
|
97
|
+
// AntiPersistence against <wait_persist>
|
|
98
|
+
this.sync_revision = mod_revision;
|
|
99
|
+
if (this.sync_revision - this.antietcd.etctree.compact_revision > (this.cfg.compact_revisions||1000)*2)
|
|
100
|
+
{
|
|
101
|
+
const revision = this.sync_revision - (this.cfg.compact_revisions||1000);
|
|
102
|
+
await this._requestFollowers({ compact: { revision } }, this.cfg.compact_timeout||1000);
|
|
103
|
+
this.antietcd.etctree.compact(revision);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
_log(msg)
|
|
108
|
+
{
|
|
109
|
+
if (this.cfg.log_level > 0)
|
|
110
|
+
{
|
|
111
|
+
console.log(msg);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
async _requestFollowers(msg, timeout)
|
|
116
|
+
{
|
|
117
|
+
msg.term = this.raft.term;
|
|
118
|
+
const followers = this.raft.followers;
|
|
119
|
+
for (const follower of followers)
|
|
120
|
+
{
|
|
121
|
+
if (follower != this.cfg.node_id)
|
|
122
|
+
{
|
|
123
|
+
const client = this._getPeer(follower);
|
|
124
|
+
if (!client)
|
|
125
|
+
{
|
|
126
|
+
// One of peers is unavailable - immediate failure, request should be retried
|
|
127
|
+
this._log('Lost peer connection during replication - restarting election');
|
|
128
|
+
this.raft.start();
|
|
129
|
+
throw new RequestError(503, 'Peer connection is lost, please retry request');
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
const promises = [];
|
|
134
|
+
for (const follower of followers)
|
|
135
|
+
{
|
|
136
|
+
if (follower != this.cfg.node_id)
|
|
137
|
+
{
|
|
138
|
+
const client = this._getPeer(follower);
|
|
139
|
+
const promise = this._peerRequest(client, msg, timeout);
|
|
140
|
+
promises.push(promise);
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
const results = await Promise.all(promises);
|
|
144
|
+
let i = 0;
|
|
145
|
+
for (const follower of followers)
|
|
146
|
+
{
|
|
147
|
+
if (follower != this.cfg.node_id)
|
|
148
|
+
{
|
|
149
|
+
const result = results[i];
|
|
150
|
+
if (!result || result.error)
|
|
151
|
+
{
|
|
152
|
+
// One of peers is unavailable - immediate failure, request should be retried
|
|
153
|
+
this._log('Replication failed ('+follower+': '+(result ? result.error : 'no result')+') - restarting election');
|
|
154
|
+
this.raft.start();
|
|
155
|
+
throw new RequestError(503, 'Replication failed, please retry request');
|
|
156
|
+
}
|
|
157
|
+
i++;
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
_completeRequest(client_id, request_id, result)
|
|
163
|
+
{
|
|
164
|
+
const req = this.subrequests[request_id];
|
|
165
|
+
if (!req || client_id && req.client_id != client_id)
|
|
166
|
+
{
|
|
167
|
+
return;
|
|
168
|
+
}
|
|
169
|
+
delete this.subrequests[request_id];
|
|
170
|
+
if (req.timer_id)
|
|
171
|
+
{
|
|
172
|
+
clearTimeout(req.timer_id);
|
|
173
|
+
req.timer_id = null;
|
|
174
|
+
}
|
|
175
|
+
req.cb(result);
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
_handleRaftChange(event)
|
|
179
|
+
{
|
|
180
|
+
this.antietcd.emit('raftchange', event);
|
|
181
|
+
this._log(
|
|
182
|
+
'Raft '+this.cfg.node_id+': '+(event.state == TinyRaft.FOLLOWER ? 'following '+event.leader : event.state)+
|
|
183
|
+
', term '+event.term+(event.state == TinyRaft.LEADER ? ', followers: '+event.followers.join(', ') : '')
|
|
184
|
+
);
|
|
185
|
+
if (event.state == TinyRaft.LEADER)
|
|
186
|
+
{
|
|
187
|
+
// (Re)sync with the new set of followers
|
|
188
|
+
this._resync(event.followers);
|
|
189
|
+
this.antietcd.etctree.resume_leases();
|
|
190
|
+
}
|
|
191
|
+
else
|
|
192
|
+
{
|
|
193
|
+
this.synced = false;
|
|
194
|
+
this.antietcd.etctree.pause_leases();
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
_resync(followers)
|
|
199
|
+
{
|
|
200
|
+
this.synced = false;
|
|
201
|
+
if (!this.resync_state)
|
|
202
|
+
{
|
|
203
|
+
this.resync_state = {
|
|
204
|
+
dumps: {},
|
|
205
|
+
loads: {},
|
|
206
|
+
};
|
|
207
|
+
}
|
|
208
|
+
const seen = {};
|
|
209
|
+
for (const f of followers)
|
|
210
|
+
{
|
|
211
|
+
seen[f] = true;
|
|
212
|
+
if (f != this.cfg.node_id && !(f in this.resync_state.dumps))
|
|
213
|
+
{
|
|
214
|
+
const client = this._getPeer(f);
|
|
215
|
+
if (client)
|
|
216
|
+
{
|
|
217
|
+
this.resync_state.dumps[f] = null;
|
|
218
|
+
this._peerRequest(client, { request: {}, handler: 'dump' }, this.cfg.dump_timeout||5000).then(res =>
|
|
219
|
+
{
|
|
220
|
+
if (this.resync_state && client.raft_node_id &&
|
|
221
|
+
(client.raft_node_id in this.resync_state.dumps))
|
|
222
|
+
{
|
|
223
|
+
if (res.error)
|
|
224
|
+
{
|
|
225
|
+
console.error(client.raft_node_id+' dump failed with error: '+res.error);
|
|
226
|
+
}
|
|
227
|
+
else
|
|
228
|
+
{
|
|
229
|
+
this._log('Got dump from '+client.raft_node_id+' with stored term '+res.term);
|
|
230
|
+
}
|
|
231
|
+
this.resync_state.dumps[client.raft_node_id] = res.error ? null : res;
|
|
232
|
+
this._continueResync();
|
|
233
|
+
}
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
for (const f in this.resync_state.dumps)
|
|
239
|
+
{
|
|
240
|
+
if (!seen[f])
|
|
241
|
+
{
|
|
242
|
+
delete this.resync_state.dumps[f];
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
this._continueResync();
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
_continueResync()
|
|
249
|
+
{
|
|
250
|
+
if (Object.values(this.resync_state.dumps).filter(d => !d).length > 0)
|
|
251
|
+
{
|
|
252
|
+
// Some dump(s) are still pending
|
|
253
|
+
return;
|
|
254
|
+
}
|
|
255
|
+
this.resync_state.dumps[this.cfg.node_id] = { ...this.antietcd.etctree.dump(), term: this.antietcd.stored_term };
|
|
256
|
+
let max_term = -1, with_max = [];
|
|
257
|
+
for (const follower in this.resync_state.dumps)
|
|
258
|
+
{
|
|
259
|
+
const dump = this.resync_state.dumps[follower];
|
|
260
|
+
if (dump.term > max_term)
|
|
261
|
+
{
|
|
262
|
+
max_term = dump.term;
|
|
263
|
+
with_max = [ follower ];
|
|
264
|
+
}
|
|
265
|
+
else if (dump.term == max_term)
|
|
266
|
+
{
|
|
267
|
+
with_max.push(follower);
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
if (max_term < 0 || with_max.length == 0)
|
|
271
|
+
{
|
|
272
|
+
throw new Error('BUG: no max term during resync');
|
|
273
|
+
}
|
|
274
|
+
this._log('Local term '+this.antietcd.stored_term+', max follower term '+max_term+' at nodes '+with_max.join(', '));
|
|
275
|
+
with_max = with_max.filter(w => w != this.cfg.node_id);
|
|
276
|
+
// Merge databases of all nodes with maximum term
|
|
277
|
+
// Force other nodes to replicate the merged DB, throwing away their own states
|
|
278
|
+
for (let i = 0; i < with_max.length; i++)
|
|
279
|
+
{
|
|
280
|
+
const update_only = !(i == 0 && this.antietcd.stored_term != max_term);
|
|
281
|
+
this._log(update_only ? 'Updating database from node '+with_max[i]+' state' : 'Copying node '+with_max[i]+' state');
|
|
282
|
+
this.antietcd.etctree.load(this.resync_state.dumps[with_max[i]], update_only);
|
|
283
|
+
}
|
|
284
|
+
let wait = 0;
|
|
285
|
+
const load_request = { term: this.raft.term, load: this.antietcd.etctree.dump() };
|
|
286
|
+
for (const follower in this.resync_state.dumps)
|
|
287
|
+
{
|
|
288
|
+
if (follower != this.cfg.node_id)
|
|
289
|
+
{
|
|
290
|
+
const dump = this.resync_state.dumps[follower];
|
|
291
|
+
if (dump.term <= max_term)
|
|
292
|
+
{
|
|
293
|
+
const client = this._getPeer(follower);
|
|
294
|
+
if (!client)
|
|
295
|
+
{
|
|
296
|
+
this._log('Lost peer connection during resync - restarting election');
|
|
297
|
+
this.raft.start();
|
|
298
|
+
return;
|
|
299
|
+
}
|
|
300
|
+
this._log('Copying state to '+follower);
|
|
301
|
+
const loadstate = this.resync_state.loads[follower] = {};
|
|
302
|
+
wait++;
|
|
303
|
+
this._peerRequest(client, load_request, this.cfg.load_timeout||5000).then(res =>
|
|
304
|
+
{
|
|
305
|
+
loadstate.result = res;
|
|
306
|
+
this._finishResync();
|
|
307
|
+
});
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
if (!wait)
|
|
312
|
+
{
|
|
313
|
+
this._finishResync();
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
_finishResync()
|
|
318
|
+
{
|
|
319
|
+
if (Object.values(this.resync_state.dumps).filter(d => !d).length > 0 ||
|
|
320
|
+
Object.values(this.resync_state.loads).filter(d => !d.result).length > 0)
|
|
321
|
+
{
|
|
322
|
+
return;
|
|
323
|
+
}
|
|
324
|
+
// All current peers have copied the database, we can proceed
|
|
325
|
+
this.antietcd.stored_term = this.raft.term;
|
|
326
|
+
this.synced = true;
|
|
327
|
+
runCallbacks(this, 'wait_sync', []);
|
|
328
|
+
this._log('Synchronized with followers, new term is '+this.raft.term);
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
_isWrite(path, data)
|
|
332
|
+
{
|
|
333
|
+
if (path == 'kv_txn')
|
|
334
|
+
{
|
|
335
|
+
return ((!data.compare || !data.compare.length) &&
|
|
336
|
+
(!data.success || !data.success.filter(f => f.request_put || f.requestPut || f.request_delete_range || f.requestDeleteRange).length) &&
|
|
337
|
+
(!data.failure || !data.failure.filter(f => f.request_put || f.requestPut || f.request_delete_range || f.requestDeleteRange).length));
|
|
338
|
+
}
|
|
339
|
+
return path != 'kv_range';
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
async checkRaftState(path, leaderonly, data)
|
|
343
|
+
{
|
|
344
|
+
if (!this.raft)
|
|
345
|
+
{
|
|
346
|
+
return null;
|
|
347
|
+
}
|
|
348
|
+
if (leaderonly == LEADER_ONLY && this.raft.state != TinyRaft.LEADER)
|
|
349
|
+
{
|
|
350
|
+
throw new RequestError(503, 'Not leader');
|
|
351
|
+
}
|
|
352
|
+
if (leaderonly == NO_WAIT_QUORUM && this.raft.state == TinyRaft.CANDIDATE)
|
|
353
|
+
{
|
|
354
|
+
throw new RequestError(503, 'Quorum not available');
|
|
355
|
+
}
|
|
356
|
+
if (!this.synced)
|
|
357
|
+
{
|
|
358
|
+
// Wait for quorum / initial sync with timeout
|
|
359
|
+
await new Promise((ok, no) =>
|
|
360
|
+
{
|
|
361
|
+
this.wait_sync.push(ok);
|
|
362
|
+
setTimeout(() =>
|
|
363
|
+
{
|
|
364
|
+
this.wait_sync = this.wait_sync.filter(cb => cb != ok);
|
|
365
|
+
no(new RequestError(503, 'Quorum not available'));
|
|
366
|
+
}, this.cfg.wait_quorum_timeout||30000);
|
|
367
|
+
});
|
|
368
|
+
}
|
|
369
|
+
if (this.raft.state == TinyRaft.FOLLOWER &&
|
|
370
|
+
(this._isWrite(path, data) || !this.cfg.stale_read && !(leaderonly & READ_FROM_FOLLOWER)))
|
|
371
|
+
{
|
|
372
|
+
// Forward to leader
|
|
373
|
+
return await this._forwardToLeader(path, data);
|
|
374
|
+
}
|
|
375
|
+
return null;
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
async _forwardToLeader(handler, data)
|
|
379
|
+
{
|
|
380
|
+
const client = this._getPeer(this.raft.leader);
|
|
381
|
+
if (!client)
|
|
382
|
+
{
|
|
383
|
+
throw new RequestError(503, 'Leader is unavailable');
|
|
384
|
+
}
|
|
385
|
+
return await this._peerRequest(client, { handler, request: data }, this.cfg.forward_timeout||1000);
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
handleWsMsg(client, msg)
|
|
389
|
+
{
|
|
390
|
+
if (msg.raft)
|
|
391
|
+
{
|
|
392
|
+
if (client.raft_node_id)
|
|
393
|
+
{
|
|
394
|
+
this.raft.onReceive(client.raft_node_id, msg.raft);
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
else if (msg.identify)
|
|
398
|
+
{
|
|
399
|
+
if (msg.identify.key === this.cfg.cluster_key &&
|
|
400
|
+
msg.identify.node_id != this.cfg.node_id)
|
|
401
|
+
{
|
|
402
|
+
client.raft_node_id = msg.identify.node_id;
|
|
403
|
+
this._log('Got a connection from '+client.raft_node_id);
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
else if (msg.load)
|
|
407
|
+
{
|
|
408
|
+
this._handleLoadMsg(client, msg).catch(console.error);
|
|
409
|
+
}
|
|
410
|
+
else if (msg.replicate)
|
|
411
|
+
{
|
|
412
|
+
this._handleReplicateMsg(client, msg).catch(console.error);
|
|
413
|
+
}
|
|
414
|
+
else if (msg.request)
|
|
415
|
+
{
|
|
416
|
+
this._handleRequestMsg(client, msg).catch(console.error);
|
|
417
|
+
}
|
|
418
|
+
else if (msg.reply)
|
|
419
|
+
{
|
|
420
|
+
this._completeRequest(client.id, msg.request_id, msg.reply);
|
|
421
|
+
}
|
|
422
|
+
else if (msg.compact)
|
|
423
|
+
{
|
|
424
|
+
this._handleCompactMsg(client, msg);
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
async _handleRequestMsg(client, msg)
|
|
429
|
+
{
|
|
430
|
+
try
|
|
431
|
+
{
|
|
432
|
+
const res = await this.antietcd.api(msg.handler, msg.request);
|
|
433
|
+
client.socket.send(JSON.stringify({ request_id: msg.request_id, reply: res }));
|
|
434
|
+
}
|
|
435
|
+
catch (e)
|
|
436
|
+
{
|
|
437
|
+
console.error(e);
|
|
438
|
+
client.socket.send(JSON.stringify({ request_id: msg.request_id, reply: { error: e.message } }));
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
async _handleLoadMsg(client, msg)
|
|
443
|
+
{
|
|
444
|
+
if (client.raft_node_id && this.raft.state == TinyRaft.FOLLOWER &&
|
|
445
|
+
this.raft.leader === client.raft_node_id && this.raft.term == msg.term)
|
|
446
|
+
{
|
|
447
|
+
this.antietcd.etctree.load(msg.load);
|
|
448
|
+
if (this.antietcd.persistence)
|
|
449
|
+
{
|
|
450
|
+
await this.antietcd.persistence.persist();
|
|
451
|
+
}
|
|
452
|
+
this.antietcd.stored_term = msg.term;
|
|
453
|
+
this.synced = true;
|
|
454
|
+
runCallbacks(this, 'wait_sync', []);
|
|
455
|
+
this._log('Synchronized with leader, new term is '+msg.term);
|
|
456
|
+
client.socket.send(JSON.stringify({ request_id: msg.request_id, reply: {} }));
|
|
457
|
+
}
|
|
458
|
+
else
|
|
459
|
+
{
|
|
460
|
+
client.socket.send(JSON.stringify({ request_id: msg.request_id, reply: { error: LEADER_MISMATCH } }));
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
async _handleReplicateMsg(client, msg)
|
|
465
|
+
{
|
|
466
|
+
if (client.raft_node_id && this.raft.state == TinyRaft.FOLLOWER &&
|
|
467
|
+
this.raft.leader === client.raft_node_id && this.raft.term == msg.term)
|
|
468
|
+
{
|
|
469
|
+
await this.antietcd.etctree.apply_replication(msg.replicate);
|
|
470
|
+
client.socket.send(JSON.stringify({ request_id: msg.request_id, reply: {} }));
|
|
471
|
+
}
|
|
472
|
+
else
|
|
473
|
+
{
|
|
474
|
+
client.socket.send(JSON.stringify({ request_id: msg.request_id, reply: { error: LEADER_MISMATCH } }));
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
_handleCompactMsg(client, msg)
|
|
479
|
+
{
|
|
480
|
+
if (client.raft_node_id && this.raft.state == TinyRaft.FOLLOWER &&
|
|
481
|
+
this.raft.leader === client.raft_node_id && this.raft.term == msg.term)
|
|
482
|
+
{
|
|
483
|
+
this.antietcd.etctree.compact(msg.compact.revision);
|
|
484
|
+
this._log('Compacted deletions up to '+msg.compact.revision);
|
|
485
|
+
client.socket.send(JSON.stringify({ request_id: msg.request_id, reply: {} }));
|
|
486
|
+
}
|
|
487
|
+
else
|
|
488
|
+
{
|
|
489
|
+
client.socket.send(JSON.stringify({ request_id: msg.request_id, reply: { error: LEADER_MISMATCH } }));
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
_getPeer(to)
|
|
494
|
+
{
|
|
495
|
+
if (to == this.cfg.node_id)
|
|
496
|
+
{
|
|
497
|
+
throw new Error('BUG: attempt to get connection to self');
|
|
498
|
+
}
|
|
499
|
+
const client_id = this.cluster_connections[to];
|
|
500
|
+
if (!client_id)
|
|
501
|
+
{
|
|
502
|
+
return null;
|
|
503
|
+
}
|
|
504
|
+
const client = this.antietcd.clients[client_id];
|
|
505
|
+
if (!client || !client.ready)
|
|
506
|
+
{
|
|
507
|
+
return null;
|
|
508
|
+
}
|
|
509
|
+
return client;
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
_sendRaftMessage(to, msg)
|
|
513
|
+
{
|
|
514
|
+
const client = this._getPeer(to);
|
|
515
|
+
if (client)
|
|
516
|
+
{
|
|
517
|
+
client.socket.send(JSON.stringify({ raft: msg }));
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
AntiCluster.LEADER_ONLY = LEADER_ONLY;
|
|
523
|
+
AntiCluster.NO_WAIT_QUORUM = NO_WAIT_QUORUM;
|
|
524
|
+
AntiCluster.READ_FROM_FOLLOWER = READ_FROM_FOLLOWER;
|
|
525
|
+
|
|
526
|
+
module.exports = AntiCluster;
|
package/antietcd-app.js
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
// AntiEtcd server application
|
|
4
|
+
// (c) Vitaliy Filippov, 2024
|
|
5
|
+
// License: Mozilla Public License 2.0 or Vitastor Network Public License 1.1
|
|
6
|
+
|
|
7
|
+
const AntiEtcd = require('./antietcd.js');
|
|
8
|
+
|
|
9
|
+
const help_text = `Miniature etcd replacement based on TinyRaft
|
|
10
|
+
(c) Vitaliy Filippov, 2024
|
|
11
|
+
License: Mozilla Public License 2.0 or Vitastor Network Public License 1.1
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
|
|
15
|
+
${process.argv[0]} ${process.argv[1]} \
|
|
16
|
+
[--cert ssl.crt] [--key ssl.key] [--port 12379] \
|
|
17
|
+
[--data data.gz] [--persist-filter ./filter.js] [--persist_interval 500] \
|
|
18
|
+
[--node_id node1 --cluster_key abcdef --cluster node1=http://localhost:12379,node2=http://localhost:12380,node3=http://localhost:12381] \
|
|
19
|
+
[other options]
|
|
20
|
+
|
|
21
|
+
Supported etcd REST APIs:
|
|
22
|
+
|
|
23
|
+
/v3/kv/txn /v3/kv/put /v3/kv/range /v3/kv/deleterange
|
|
24
|
+
/v3/lease/grant /v3/lease/keepalive /v3/lease/revoke /v3/kv/lease/revoke
|
|
25
|
+
websocket-based watch API (create_request, cancel_request, progress_request)
|
|
26
|
+
|
|
27
|
+
Options:
|
|
28
|
+
|
|
29
|
+
HTTP:
|
|
30
|
+
|
|
31
|
+
--port 2379
|
|
32
|
+
Listen port
|
|
33
|
+
--cert <cert>
|
|
34
|
+
Use TLS with this certificate file (PEM format)
|
|
35
|
+
--key <key>
|
|
36
|
+
Use TLS with this key file (PEM format)
|
|
37
|
+
--ca <ca>
|
|
38
|
+
Use trusted root certificates from this file.
|
|
39
|
+
Specify <ca> = <cert> if your certificate is self-signed.
|
|
40
|
+
--client_cert_auth 1
|
|
41
|
+
Require TLS client certificates signed by <ca> or by default CA to connect.
|
|
42
|
+
--ws_keepalive_interval 30000
|
|
43
|
+
Client websocket ping (keepalive) interval in milliseconds
|
|
44
|
+
|
|
45
|
+
Persistence:
|
|
46
|
+
|
|
47
|
+
--data <filename>
|
|
48
|
+
Store persistent data in <filename>
|
|
49
|
+
--persist_interval <milliseconds>
|
|
50
|
+
Persist data on disk after this interval, not immediately after change
|
|
51
|
+
--persist_filter ./filter.js
|
|
52
|
+
Use persistence filter from ./filter.js (or a module).
|
|
53
|
+
Persistence filter is a function(cfg) returning function(key, value) ran
|
|
54
|
+
for every change and returning a new value or undefined to skip persistence.
|
|
55
|
+
--compact_revisions 1000
|
|
56
|
+
Number of previous revisions to keep deletion information in memory
|
|
57
|
+
|
|
58
|
+
Clustering:
|
|
59
|
+
|
|
60
|
+
--node_id <id>
|
|
61
|
+
ID of this cluster node
|
|
62
|
+
--cluster <id1>=<url1>,<id2>=<url2>,...
|
|
63
|
+
All other cluster nodes
|
|
64
|
+
--cluster_key <key>
|
|
65
|
+
Shared cluster key for identification
|
|
66
|
+
--election_timeout 5000
|
|
67
|
+
Raft election timeout
|
|
68
|
+
--heartbeat_timeout 1000
|
|
69
|
+
Raft leader heartbeat timeout
|
|
70
|
+
--wait_quorum_timeout 30000
|
|
71
|
+
Timeout for requests to wait for quorum to come up
|
|
72
|
+
--leader_priority <number>
|
|
73
|
+
Raft leader priority for this node (optional)
|
|
74
|
+
--stale_read 1
|
|
75
|
+
Allow to serve reads from followers. Specify 0 to disallow
|
|
76
|
+
--reconnect_interval 1000
|
|
77
|
+
Unavailable peer connection retry interval
|
|
78
|
+
--dump_timeout 5000
|
|
79
|
+
Timeout for dump command in milliseconds
|
|
80
|
+
--load_timeout 5000
|
|
81
|
+
Timeout for load command in milliseconds
|
|
82
|
+
--forward_timeout 1000
|
|
83
|
+
Timeout for forwarding requests from follower to leader in milliseconds
|
|
84
|
+
--replication_timeout 1000
|
|
85
|
+
Timeout for replicating requests from leader to follower in milliseconds
|
|
86
|
+
--compact_timeout 1000
|
|
87
|
+
Timeout for compaction requests from leader to follower in milliseconds
|
|
88
|
+
`;
|
|
89
|
+
|
|
90
|
+
function parse()
|
|
91
|
+
{
|
|
92
|
+
const options = { stale_read: 1 };
|
|
93
|
+
for (let i = 2; i < process.argv.length; i++)
|
|
94
|
+
{
|
|
95
|
+
const arg = process.argv[i].toLowerCase().replace(/^--(.+)$/, (m, m1) => '--'+m1.replace(/-/g, '_'));
|
|
96
|
+
if (arg === '-h' || arg === '--help')
|
|
97
|
+
{
|
|
98
|
+
process.stderr.write(help_text);
|
|
99
|
+
process.exit();
|
|
100
|
+
}
|
|
101
|
+
else if (arg.substr(0, 2) == '--')
|
|
102
|
+
{
|
|
103
|
+
options[arg.substr(2)] = process.argv[++i];
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
options['stale_read'] = options['stale_read'] === '1' || options['stale_read'] === 'yes' || options['stale_read'] === 'true';
|
|
107
|
+
if (options['persist_filter'])
|
|
108
|
+
{
|
|
109
|
+
options['persist_filter'] = require(options['persist_filter'])(options);
|
|
110
|
+
}
|
|
111
|
+
return options;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
const antietcd = new AntiEtcd(parse());
|
|
115
|
+
|
|
116
|
+
// Set exit hook
|
|
117
|
+
const on_stop_cb = async () => { await antietcd.stop(); process.exit(0); };
|
|
118
|
+
process.on('SIGINT', on_stop_cb);
|
|
119
|
+
process.on('SIGTERM', on_stop_cb);
|
|
120
|
+
process.on('SIGQUIT', on_stop_cb);
|
|
121
|
+
|
|
122
|
+
antietcd.start().catch(console.error);
|