@yz-social/kdht 0.1.2 → 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +27 -0
- package/dht/kbucket.js +2 -2
- package/dht/node.js +67 -90
- package/dht/nodeContacts.js +7 -5
- package/dht/nodeMessages.js +44 -4
- package/dht/nodeProbe.js +240 -47
- package/dht/nodeRefresh.js +6 -4
- package/dht/nodeStorage.js +4 -0
- package/dht/nodeTransports.js +3 -3
- package/dht/nodeUtilities.js +1 -1
- package/package.json +8 -18
- package/portals/node.js +7 -0
- package/spec/bots.js +13 -3
- package/spec/dhtAcceptanceSpec.js +23 -9
- package/spec/dhtImplementation.js +3 -3
- package/spec/dhtInternalsSpec.js +304 -15
- package/spec/dhtKeySpec.js +0 -3
- package/spec/dhtWriteReadSpec.js +85 -0
- package/spec/portal.js +6 -3
- package/transports/contact.js +88 -20
- package/transports/simulations.js +87 -20
- package/transports/webrtc.js +99 -97
- package/spec/dhtWriteRead.js +0 -56
package/spec/dhtInternalsSpec.js
CHANGED
|
@@ -1,14 +1,15 @@
|
|
|
1
|
-
import { Node, KBucket,
|
|
1
|
+
import { Node, KBucket, SimulatedContact, Helper } from '../index.js';
|
|
2
2
|
const { describe, it, expect, beforeAll, afterAll, BigInt} = globalThis; // For linters.
|
|
3
3
|
|
|
4
4
|
describe("DHT internals", function () {
|
|
5
5
|
beforeAll(function () {
|
|
6
|
-
//
|
|
7
|
-
|
|
8
|
-
|
|
6
|
+
// Subtle: None of these tests depend on automatic refresh (of buckets or storage), but some
|
|
7
|
+
// of the tests trigger the refresh. By doing this before we start, the nodes will not schedule any refresh.
|
|
8
|
+
// If we failed to do that, then the refreshes would continue to happen after the test, when other
|
|
9
|
+
// tests might be running.
|
|
10
|
+
// Note: Do not fail to set Node.refreshTimeIntervalMS in such other tests that need it.
|
|
9
11
|
Node.stopRefresh();
|
|
10
12
|
});
|
|
11
|
-
|
|
12
13
|
describe("structure", function () {
|
|
13
14
|
let example;
|
|
14
15
|
beforeAll(async function () {
|
|
@@ -32,7 +33,7 @@ describe("DHT internals", function () {
|
|
|
32
33
|
expect(retrieved).toBeUndefined();
|
|
33
34
|
});
|
|
34
35
|
});
|
|
35
|
-
|
|
36
|
+
|
|
36
37
|
describe("report", function () {
|
|
37
38
|
beforeAll(async function () { // Add some data for which we know the expected internal structure.
|
|
38
39
|
example.storeLocally(await Node.key("foo"), 17); // May or may not have already been set to same value, depending on test order.
|
|
@@ -64,12 +65,12 @@ describe("DHT internals", function () {
|
|
|
64
65
|
});
|
|
65
66
|
});
|
|
66
67
|
});
|
|
67
|
-
|
|
68
|
+
|
|
68
69
|
describe("operations", function () {
|
|
69
70
|
const one = 1n;
|
|
70
71
|
const two = 2n;
|
|
71
72
|
const three = 3n;
|
|
72
|
-
const max =
|
|
73
|
+
const max = one << BigInt(Node.keySize);
|
|
73
74
|
describe("commonPrefixLength", function () {
|
|
74
75
|
it("is keySize for 0n.", function () {
|
|
75
76
|
expect(Node.commonPrefixLength(Node.zero)).toBe(Node.keySize);
|
|
@@ -87,7 +88,7 @@ describe("DHT internals", function () {
|
|
|
87
88
|
describe("getBucketIndex", function () {
|
|
88
89
|
let node;
|
|
89
90
|
beforeAll(function () {
|
|
90
|
-
node = Node.fromKey(Node.zero);
|
|
91
|
+
node = Node.fromKey(Node.zero);
|
|
91
92
|
});
|
|
92
93
|
it("bucket keySize -1 is farthest.", function () {
|
|
93
94
|
const distance = max - Node.one; // max distance within nTagBits. All bits on.
|
|
@@ -112,6 +113,7 @@ describe("DHT internals", function () {
|
|
|
112
113
|
node = await Node.create();
|
|
113
114
|
});
|
|
114
115
|
function test(bucketIndex) {
|
|
116
|
+
// console.log(`[dhtInternalsSpec:index=${bucketIndex}]`);
|
|
115
117
|
it(`computes random of ${bucketIndex}.`, function () {
|
|
116
118
|
const random = node.ensureBucket(bucketIndex).randomTarget;
|
|
117
119
|
const computedBucket = node.getBucketIndex(random);
|
|
@@ -131,7 +133,7 @@ describe("DHT internals", function () {
|
|
|
131
133
|
const bucket0 = new KBucket(node, 0);
|
|
132
134
|
const bucket10 = new KBucket(node, 10);
|
|
133
135
|
const bucket60 = new KBucket(node, 60);
|
|
134
|
-
const bucket90 = new KBucket(node, 90);
|
|
136
|
+
const bucket90 = new KBucket(node, 90);
|
|
135
137
|
const addTo = async bucket => {
|
|
136
138
|
const key = bucket.randomTarget;
|
|
137
139
|
keys.push(key);
|
|
@@ -142,9 +144,9 @@ describe("DHT internals", function () {
|
|
|
142
144
|
await addTo(bucket60);
|
|
143
145
|
await addTo(bucket90);
|
|
144
146
|
node.routingTable.set(0, bucket0);
|
|
145
|
-
node.routingTable.set(10, bucket10);
|
|
147
|
+
node.routingTable.set(10, bucket10);
|
|
146
148
|
node.routingTable.set(60, bucket60);
|
|
147
|
-
node.routingTable.set(90, bucket90);
|
|
149
|
+
node.routingTable.set(90, bucket90);
|
|
148
150
|
});
|
|
149
151
|
it("is initially empty.", async function () {
|
|
150
152
|
const node = await Node.create();
|
|
@@ -170,7 +172,7 @@ describe("DHT internals", function () {
|
|
|
170
172
|
0: ${node.routingTable.get(0).contacts.map(c => c.key.toString() + 'n').join(', ')}
|
|
171
173
|
10: ${node.routingTable.get(10).contacts.map(c => c.key.toString() + 'n').join(', ')}
|
|
172
174
|
60: ${node.routingTable.get(60).contacts.map(c => c.key.toString() + 'n').join(', ')}
|
|
173
|
-
90: ${node.routingTable.get(90).contacts.map(c => c.key.toString() + 'n').join(', ')}`;
|
|
175
|
+
90: ${node.routingTable.get(90).contacts.map(c => c.key.toString() + 'n').join(', ')}`;
|
|
174
176
|
expect(report).toBe(expected);
|
|
175
177
|
});
|
|
176
178
|
});
|
|
@@ -198,8 +200,9 @@ describe("DHT internals", function () {
|
|
|
198
200
|
node = host.node;
|
|
199
201
|
// These others are all constructed to have distances that increase by one from node.
|
|
200
202
|
for (let i = 1; i <= nOthers; i++) {
|
|
201
|
-
let other = SimulatedContact.fromKey(BigInt(i)
|
|
202
|
-
|
|
203
|
+
let other = SimulatedContact.fromKey(BigInt(i));
|
|
204
|
+
let ourViewOfIt = node.ensureContact(other);
|
|
205
|
+
await node.addToRoutingTable(ourViewOfIt);
|
|
203
206
|
}
|
|
204
207
|
//node.report();
|
|
205
208
|
}, 20e3);
|
|
@@ -250,4 +253,290 @@ describe("DHT internals", function () {
|
|
|
250
253
|
});
|
|
251
254
|
});
|
|
252
255
|
});
|
|
256
|
+
|
|
257
|
+
describe("lookup performance with laggy nodes", function() {
|
|
258
|
+
// Test to demonstrate blocking behavior with slow nodes
|
|
259
|
+
// After implementing racing, this will show the improvement
|
|
260
|
+
let network;
|
|
261
|
+
const nNodes = 10;
|
|
262
|
+
|
|
263
|
+
beforeAll(async function() {
|
|
264
|
+
// Create a small network
|
|
265
|
+
network = [];
|
|
266
|
+
for (let i = 0; i < nNodes; i++) {
|
|
267
|
+
const contact = await SimulatedContact.create(i);
|
|
268
|
+
network.push(contact);
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// Make 6 out of 10 nodes laggy with varying delays
|
|
272
|
+
network[4].node.delayMs = 200;
|
|
273
|
+
network[5].node.delayMs = 200;
|
|
274
|
+
network[6].node.delayMs = 400;
|
|
275
|
+
network[7].node.delayMs = 400;
|
|
276
|
+
network[8].node.delayMs = 600;
|
|
277
|
+
network[9].node.delayMs = 600;
|
|
278
|
+
|
|
279
|
+
console.log('Network setup: nodes 0-3 are fast (40ms), nodes 4-9 are laggy (200/400/600ms)');
|
|
280
|
+
|
|
281
|
+
// Build network: each node knows about all others
|
|
282
|
+
for (let i = 0; i < nNodes; i++) {
|
|
283
|
+
const node = network[i].node;
|
|
284
|
+
for (let j = 0; j < nNodes; j++) {
|
|
285
|
+
if (i !== j) {
|
|
286
|
+
await node.addToRoutingTable(network[j].clone(node));
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
}, 30e3);
|
|
291
|
+
|
|
292
|
+
afterAll(function() {
|
|
293
|
+
network.forEach(contact => contact.disconnect());
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
it("completes lookup without blocking on all laggy nodes", async function() {
|
|
297
|
+
// Tests that the continuous-flow algorithm (keeping alpha requests in flight)
|
|
298
|
+
// makes progress without waiting for ALL slow nodes to respond.
|
|
299
|
+
// With 6/10 nodes being laggy (200-600ms), the lookup must contact some slow nodes,
|
|
300
|
+
// but shouldn't need to wait for every single slow response.
|
|
301
|
+
const searcher = network[0].node;
|
|
302
|
+
const targetKey = await Node.key("test-value");
|
|
303
|
+
|
|
304
|
+
// Perform a findNodes lookup with timing enabled
|
|
305
|
+
const startTime = Date.now();
|
|
306
|
+
const result = await searcher.iterate(targetKey, 'findNodes', Node.k, false, true);
|
|
307
|
+
const elapsed = Date.now() - startTime;
|
|
308
|
+
|
|
309
|
+
console.log(`Lookup: ${elapsed}ms, ${result.length} nodes found`);
|
|
310
|
+
|
|
311
|
+
// If we had to wait for ALL 6 slow nodes sequentially, it would take 2400ms+.
|
|
312
|
+
// The continuous flow should allow faster completion by not blocking on stragglers.
|
|
313
|
+
expect(elapsed).toBeLessThan(1500);
|
|
314
|
+
expect(result.length).toBeGreaterThan(0);
|
|
315
|
+
expect(result.length).toBeLessThanOrEqual(Node.k);
|
|
316
|
+
}, 30e3);
|
|
317
|
+
|
|
318
|
+
it("completes lookup despite some nodes timing out", async function() {
|
|
319
|
+
// Temporarily set some nodes to have delays exceeding the 10s timeout
|
|
320
|
+
const originalDelays = network.map(c => c.node.delayMs);
|
|
321
|
+
network[8].node.delayMs = 12000; // Will timeout
|
|
322
|
+
network[9].node.delayMs = 15000; // Will timeout
|
|
323
|
+
|
|
324
|
+
const searcher = network[0].node;
|
|
325
|
+
const targetKey = await Node.key("timeout-test");
|
|
326
|
+
|
|
327
|
+
const startTime = Date.now();
|
|
328
|
+
const result = await searcher.iterate(targetKey, 'findNodes', Node.k, true, true);
|
|
329
|
+
const elapsed = Date.now() - startTime;
|
|
330
|
+
|
|
331
|
+
// Restore original delays
|
|
332
|
+
network.forEach((c, i) => c.node.delayMs = originalDelays[i]);
|
|
333
|
+
|
|
334
|
+
console.log(`Timeout test: ${elapsed}ms, ${result.length} nodes found`);
|
|
335
|
+
|
|
336
|
+
// Should complete around 10s (the timeout), not 15s (waiting for slowest)
|
|
337
|
+
expect(elapsed).toBeGreaterThan(9000); // At least one timeout triggered
|
|
338
|
+
expect(elapsed).toBeLessThan(12000); // Didn't wait for 15s node
|
|
339
|
+
expect(result.length).toBeGreaterThan(0); // Still found some nodes
|
|
340
|
+
}, 20e3);
|
|
341
|
+
|
|
342
|
+
// Verbose diagnostic test - disabled by default. Change xit to it to enable.
|
|
343
|
+
it("can store and retrieve values with diagnostic tracing", async function() {
|
|
344
|
+
// Enable diagnostic tracing to see store/read details
|
|
345
|
+
Node.diagnosticTrace = true;
|
|
346
|
+
|
|
347
|
+
const storer = network[0].node;
|
|
348
|
+
const reader = network[3].node; // Different node reads
|
|
349
|
+
const targetKey = await Node.key("diagnostic-test-key");
|
|
350
|
+
const testValue = "diagnostic-test-value";
|
|
351
|
+
|
|
352
|
+
console.log('\n--- Store/Read Diagnostic Test ---');
|
|
353
|
+
console.log(`Storing "${testValue}" from node ${storer.name}`);
|
|
354
|
+
|
|
355
|
+
const storeCount = await storer.storeValue(targetKey, testValue);
|
|
356
|
+
console.log(`Store completed: ${storeCount} copies`);
|
|
357
|
+
|
|
358
|
+
console.log(`\nReading from node ${reader.name}`);
|
|
359
|
+
const retrieved = await reader.locateValue(targetKey);
|
|
360
|
+
|
|
361
|
+
console.log(`\n--- Store/Read Summary ---`);
|
|
362
|
+
console.log(`Stored to ${storeCount} nodes`);
|
|
363
|
+
console.log(`Retrieved: ${retrieved}`);
|
|
364
|
+
console.log(`Match: ${retrieved === testValue}`);
|
|
365
|
+
|
|
366
|
+
Node.diagnosticTrace = false; // Clean up
|
|
367
|
+
|
|
368
|
+
expect(storeCount).toBeGreaterThan(0);
|
|
369
|
+
expect(retrieved).toBe(testValue);
|
|
370
|
+
}, 30e3);
|
|
371
|
+
});
|
|
372
|
+
|
|
373
|
+
// Randomized store/read tests - disabled by default. Change xdescribe to describe to enable.
|
|
374
|
+
describe("store/read with randomized networks", function() {
|
|
375
|
+
// Track results across all tests for summary
|
|
376
|
+
const testResults = [];
|
|
377
|
+
|
|
378
|
+
afterAll(function() {
|
|
379
|
+
// Print summary of all test results
|
|
380
|
+
console.log('\n========== STORE/READ TEST SUMMARY ==========');
|
|
381
|
+
console.log(`Total tests: ${testResults.length}`);
|
|
382
|
+
const successes = testResults.filter(r => r.found);
|
|
383
|
+
const failures = testResults.filter(r => !r.found);
|
|
384
|
+
console.log(`Successes: ${successes.length}, Failures: ${failures.length}`);
|
|
385
|
+
|
|
386
|
+
if (successes.length > 0) {
|
|
387
|
+
const ranks = successes.map(r => r.rank);
|
|
388
|
+
const avgRank = ranks.reduce((a, b) => a + b, 0) / ranks.length;
|
|
389
|
+
const maxRank = Math.max(...ranks);
|
|
390
|
+
const storeCounts = successes.map(r => r.storedToCount);
|
|
391
|
+
const avgStoreCount = storeCounts.reduce((a, b) => a + b, 0) / storeCounts.length;
|
|
392
|
+
console.log(`Average rank of responder: ${avgRank.toFixed(2)} (1 = closest)`);
|
|
393
|
+
console.log(`Worst rank: ${maxRank}`);
|
|
394
|
+
console.log(`Average nodes stored to: ${avgStoreCount.toFixed(1)}`);
|
|
395
|
+
|
|
396
|
+
// Distribution of ranks
|
|
397
|
+
const rankDist = {};
|
|
398
|
+
ranks.forEach(r => rankDist[r] = (rankDist[r] || 0) + 1);
|
|
399
|
+
console.log('Rank distribution:', rankDist);
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
if (failures.length > 0) {
|
|
403
|
+
console.log('\nFailed tests:');
|
|
404
|
+
failures.forEach(f => console.log(` Test ${f.testId}: stored to ${f.storedToCount} nodes, queried ${f.queriedCount} nodes`));
|
|
405
|
+
}
|
|
406
|
+
console.log('==============================================\n');
|
|
407
|
+
});
|
|
408
|
+
|
|
409
|
+
// Helper to create a randomized network
|
|
410
|
+
async function createRandomNetwork(nNodes, connectivityFactor = 0.5) {
|
|
411
|
+
const network = [];
|
|
412
|
+
for (let i = 0; i < nNodes; i++) {
|
|
413
|
+
const contact = await SimulatedContact.create(i);
|
|
414
|
+
network.push(contact);
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
// Randomly connect nodes based on connectivity factor
|
|
418
|
+
for (let i = 0; i < nNodes; i++) {
|
|
419
|
+
const node = network[i].node;
|
|
420
|
+
for (let j = 0; j < nNodes; j++) {
|
|
421
|
+
if (i !== j && Math.random() < connectivityFactor) {
|
|
422
|
+
await node.addToRoutingTable(network[j].clone(node));
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
return network;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
// Helper to run a store/read test and track where value was found
|
|
431
|
+
async function runStoreReadTest(testId, network, storerIdx, readerIdx, keyString) {
|
|
432
|
+
const storer = network[storerIdx].node;
|
|
433
|
+
const reader = network[readerIdx].node;
|
|
434
|
+
const targetKey = await Node.key(keyString);
|
|
435
|
+
const testValue = `value-${testId}`;
|
|
436
|
+
|
|
437
|
+
// Perform store and track recipients (sorted by distance from targetKey)
|
|
438
|
+
const k = storer.constructor.k;
|
|
439
|
+
let helpers = await storer.locateNodes(targetKey, k * 2);
|
|
440
|
+
helpers = [...helpers].sort(Helper.compare); // Ensure sorted by distance
|
|
441
|
+
|
|
442
|
+
const storedTo = [];
|
|
443
|
+
for (const helper of helpers.slice(0, k)) {
|
|
444
|
+
const stored = await helper.contact.store(targetKey, testValue);
|
|
445
|
+
if (stored) {
|
|
446
|
+
storedTo.push({
|
|
447
|
+
name: helper.name,
|
|
448
|
+
key: helper.key,
|
|
449
|
+
distance: helper.distance
|
|
450
|
+
});
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
// Now read - iterate now returns { value, responder } when found
|
|
455
|
+
const result = await reader.iterate(targetKey, 'findValue', k, false); // trace=false for cleaner output
|
|
456
|
+
|
|
457
|
+
const found = Node.isValueResult(result);
|
|
458
|
+
let rank = -1;
|
|
459
|
+
let responderName = 'unknown';
|
|
460
|
+
let responderDistance = null;
|
|
461
|
+
|
|
462
|
+
if (found && result.responder) {
|
|
463
|
+
// Find the rank of the responder in the storedTo list
|
|
464
|
+
const responderKey = result.responder.key;
|
|
465
|
+
responderName = result.responder.name;
|
|
466
|
+
responderDistance = result.responder.distance;
|
|
467
|
+
|
|
468
|
+
for (let i = 0; i < storedTo.length; i++) {
|
|
469
|
+
if (storedTo[i].key === responderKey) {
|
|
470
|
+
rank = i + 1;
|
|
471
|
+
break;
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
// If responder not in storedTo list, it might have gotten the value via cache propagation
|
|
476
|
+
if (rank === -1) {
|
|
477
|
+
responderName = `${result.responder.name} (not in store list!)`;
|
|
478
|
+
rank = storedTo.length + 1; // Worse than any stored node
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
const testResult = {
|
|
483
|
+
testId,
|
|
484
|
+
found,
|
|
485
|
+
rank: found ? rank : -1,
|
|
486
|
+
responderName,
|
|
487
|
+
responderDistance,
|
|
488
|
+
storedToCount: storedTo.length,
|
|
489
|
+
storedToNames: storedTo.map(s => s.name),
|
|
490
|
+
queriedCount: found ? 0 : (Array.isArray(result) ? result.length : 0),
|
|
491
|
+
value: found ? result.value : undefined
|
|
492
|
+
};
|
|
493
|
+
|
|
494
|
+
testResults.push(testResult);
|
|
495
|
+
return testResult;
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
// Generate 20 randomized store/read tests
|
|
499
|
+
for (let testNum = 1; testNum <= 20; testNum++) {
|
|
500
|
+
it(`randomized store/read test ${testNum}`, async function() {
|
|
501
|
+
// Randomize network parameters
|
|
502
|
+
const nNodes = 8 + Math.floor(Math.random() * 8); // 8-15 nodes
|
|
503
|
+
const connectivity = 0.4 + Math.random() * 0.4; // 40-80% connectivity
|
|
504
|
+
|
|
505
|
+
const network = await createRandomNetwork(nNodes, connectivity);
|
|
506
|
+
|
|
507
|
+
// Random storer and reader (different nodes)
|
|
508
|
+
const storerIdx = Math.floor(Math.random() * nNodes);
|
|
509
|
+
let readerIdx = Math.floor(Math.random() * nNodes);
|
|
510
|
+
while (readerIdx === storerIdx) {
|
|
511
|
+
readerIdx = Math.floor(Math.random() * nNodes);
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
const result = await runStoreReadTest(
|
|
515
|
+
testNum,
|
|
516
|
+
network,
|
|
517
|
+
storerIdx,
|
|
518
|
+
readerIdx,
|
|
519
|
+
`test-key-${testNum}-${Date.now()}`
|
|
520
|
+
);
|
|
521
|
+
|
|
522
|
+
// Log individual test result
|
|
523
|
+
if (result.found) {
|
|
524
|
+
const distStr = result.responderDistance ? ` dist=${String(result.responderDistance).length}digits` : '';
|
|
525
|
+
console.log(`Test ${testNum}: FOUND at rank ${result.rank}/${result.storedToCount} (${result.responderName}${distStr}) - ${nNodes} nodes, ${(connectivity*100).toFixed(0)}% connectivity`);
|
|
526
|
+
if (result.rank > 3) {
|
|
527
|
+
console.log(` ⚠ WARNING: Value found at rank ${result.rank} (not among top 3 closest to key)`);
|
|
528
|
+
console.log(` Stored to: ${result.storedToNames.slice(0, 5).join(', ')}${result.storedToNames.length > 5 ? '...' : ''}`);
|
|
529
|
+
}
|
|
530
|
+
} else {
|
|
531
|
+
console.log(`Test ${testNum}: NOT FOUND - stored to ${result.storedToCount}, queried ${result.queriedCount} - ${nNodes} nodes, ${(connectivity*100).toFixed(0)}% connectivity`);
|
|
532
|
+
console.log(` Stored to: ${result.storedToNames.join(', ')}`);
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
// Clean up network
|
|
536
|
+
network.forEach(contact => contact.disconnect());
|
|
537
|
+
|
|
538
|
+
expect(result.found).toBe(true);
|
|
539
|
+
}, 60e3);
|
|
540
|
+
}
|
|
541
|
+
});
|
|
253
542
|
});
|
package/spec/dhtKeySpec.js
CHANGED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
#!/usr/bin/env npx jasmine
|
|
2
|
+
const { describe, it, expect, beforeAll, afterAll, beforeEach, afterEach } = globalThis; // For linters.
|
|
3
|
+
import process from 'node:process';
|
|
4
|
+
import { spawn, exec } from 'node:child_process';
|
|
5
|
+
import {cpus, availableParallelism } from 'node:os';
|
|
6
|
+
import { v4 as uuidv4 } from 'uuid';
|
|
7
|
+
import { WebContact, Node } from '../index.js';
|
|
8
|
+
import { fileURLToPath } from 'url';
|
|
9
|
+
import path from 'path';
|
|
10
|
+
|
|
11
|
+
describe("DHT write/read", function () {
|
|
12
|
+
let contact, portalProcess, botProcess;
|
|
13
|
+
const verbose = false;
|
|
14
|
+
const baseURL = 'http://localhost:3000/kdht';
|
|
15
|
+
const logicalCores = availableParallelism();
|
|
16
|
+
console.log(`Model description "${cpus()[0].model}", ${logicalCores} logical cores.`);
|
|
17
|
+
const maxPerCluster = logicalCores / 2; // Why half? Because we have at least two processes.
|
|
18
|
+
const nPortals = maxPerCluster;
|
|
19
|
+
const nBots = maxPerCluster;
|
|
20
|
+
const fixedSpacing = 2; // Between portals.
|
|
21
|
+
const variableSpacing = 5; // Additional random between portals.
|
|
22
|
+
const nWrites = 40;
|
|
23
|
+
const waitBeforeRead = 15e3;
|
|
24
|
+
const thrash = true;
|
|
25
|
+
const showPortals = true;
|
|
26
|
+
const showBots = true;
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
30
|
+
const __dirname = path.dirname(__filename);
|
|
31
|
+
const portalSeconds = fixedSpacing * nPortals + 1.5 * variableSpacing;
|
|
32
|
+
const botsMilliseconds = 2 * Node.refreshTimeIntervalMS;
|
|
33
|
+
|
|
34
|
+
beforeAll(async function () {
|
|
35
|
+
function echo(data) { data = data.slice(0, -1); console.log(data.toString()); }
|
|
36
|
+
|
|
37
|
+
console.log(new Date(), 'starting', nPortals, 'portals over', portalSeconds, 'seconds');
|
|
38
|
+
portalProcess = spawn('node', [path.resolve(__dirname, 'portal.js'), '--nPortals', nPortals, '--verbose', verbose.toString()]);
|
|
39
|
+
if (showPortals) {
|
|
40
|
+
portalProcess.stdout.on('data', echo);
|
|
41
|
+
portalProcess.stderr.on('data', echo);
|
|
42
|
+
}
|
|
43
|
+
await Node.delay(portalSeconds * 1e3);
|
|
44
|
+
|
|
45
|
+
if (nBots) {
|
|
46
|
+
for (let launched = 0, round = Math.min(nBots, maxPerCluster); launched < nBots; round = Math.min(nBots - launched, maxPerCluster), launched += round) {
|
|
47
|
+
console.log(new Date(), 'starting', round, 'bots over', botsMilliseconds/1e3, 'seconds');
|
|
48
|
+
botProcess = spawn('node', [path.resolve(__dirname, 'bots.js'), '--nBots', round, '--thrash', thrash.toString(), '--verbose', verbose.toString()]);
|
|
49
|
+
if (showBots) {
|
|
50
|
+
botProcess.stdout.on('data', echo);
|
|
51
|
+
botProcess.stderr.on('data', echo);
|
|
52
|
+
}
|
|
53
|
+
await Node.delay(botsMilliseconds);
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
contact = await WebContact.create({name: uuidv4(), debug: verbose});
|
|
58
|
+
const bootstrapName = await contact.fetchBootstrap(baseURL);
|
|
59
|
+
const bootstrapContact = await contact.ensureRemoteContact(bootstrapName, baseURL);
|
|
60
|
+
console.log(new Date(), contact.sname, 'joining', bootstrapContact.sname);
|
|
61
|
+
await contact.join(bootstrapContact);
|
|
62
|
+
console.log(new Date(), contact.sname, 'joined');
|
|
63
|
+
for (let index = 0; index < nWrites; index++) {
|
|
64
|
+
const wrote = await contact.storeValue(index, index);
|
|
65
|
+
console.log('Wrote', index);
|
|
66
|
+
}
|
|
67
|
+
if (waitBeforeRead) {
|
|
68
|
+
console.log(new Date(), `Written. Waiting ${waitBeforeRead.toLocaleString()} ms before reading.`);
|
|
69
|
+
await Node.delay(waitBeforeRead);
|
|
70
|
+
}
|
|
71
|
+
console.log(new Date(), 'Reading');
|
|
72
|
+
}, 5e3 * nWrites + (1 + Math.ceil(nBots / maxPerCluster)) * Node.refreshTimeIntervalMS);
|
|
73
|
+
afterAll(async function () {
|
|
74
|
+
contact.disconnect();
|
|
75
|
+
console.log(new Date(), 'killing portals and bots');
|
|
76
|
+
exec('pkill kdht-');
|
|
77
|
+
});
|
|
78
|
+
for (let index = 0; index < nWrites; index++) {
|
|
79
|
+
it(`reads ${index}.`, async function () {
|
|
80
|
+
const read = await contact.node.locateValue(index);
|
|
81
|
+
console.log('read', read);
|
|
82
|
+
expect(read).toBe(index);
|
|
83
|
+
}, 10e3); // Can take longer to re-establish multiple connections.
|
|
84
|
+
}
|
|
85
|
+
});
|
package/spec/portal.js
CHANGED
|
@@ -6,19 +6,22 @@ import { launchWriteRead } from './writes.js';
|
|
|
6
6
|
import express from 'express';
|
|
7
7
|
import logger from 'morgan';
|
|
8
8
|
import path from 'path';
|
|
9
|
+
import {cpus, availableParallelism } from 'node:os';
|
|
9
10
|
import { fileURLToPath } from 'url';
|
|
10
11
|
import yargs from 'yargs';
|
|
11
12
|
import { hideBin } from 'yargs/helpers';
|
|
12
13
|
import { Node } from '../index.js';
|
|
13
14
|
|
|
15
|
+
const logicalCores = availableParallelism();
|
|
16
|
+
|
|
14
17
|
// TODO: Allow a remote portal to be specified that this portal will hook with, forming one big network.
|
|
15
18
|
const argv = yargs(hideBin(process.argv))
|
|
16
|
-
.usage(
|
|
19
|
+
.usage(`Start an http post server through which nodes can connect to set of nPortals stable nodes. Model description "${cpus()[0].model}", ${logicalCores} logical cores.`)
|
|
17
20
|
.option('nPortals', {
|
|
18
21
|
alias: 'nportals',
|
|
19
22
|
alias: 'p',
|
|
20
23
|
type: 'number',
|
|
21
|
-
default:
|
|
24
|
+
default: Math.min(logicalCores / 2, 2),
|
|
22
25
|
description: "The number of steady nodes that handle initial connections."
|
|
23
26
|
})
|
|
24
27
|
.option('nBots', {
|
|
@@ -115,5 +118,5 @@ if (cluster.isPrimary) { // Parent process with portal webserver through which c
|
|
|
115
118
|
} else { // A portal node through which client's can connect.
|
|
116
119
|
const portalNode = await import('../portals/node.js');
|
|
117
120
|
const {baseURL, externalBaseURL, fixedSpacing, variableSpacing, verbose} = argv;
|
|
118
|
-
portalNode.setup({baseURL, externalBaseURL, fixedSpacing, variableSpacing, verbose});
|
|
121
|
+
await portalNode.setup({baseURL, externalBaseURL, fixedSpacing, variableSpacing, verbose});
|
|
119
122
|
}
|
package/transports/contact.js
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { v4 as uuidv4 } from 'uuid';
|
|
1
2
|
import { Node } from '../dht/node.js';
|
|
2
3
|
|
|
3
4
|
export class Contact {
|
|
@@ -31,7 +32,8 @@ export class Contact {
|
|
|
31
32
|
return this.fromNode(node, host || node);
|
|
32
33
|
}
|
|
33
34
|
clone(hostNode, searchHost = true) { // Answer a Contact that is set up for hostNode - either this instance or a new one.
|
|
34
|
-
//
|
|
35
|
+
// I.e., a Contact with node: this.node and host: hostNode.
|
|
36
|
+
// Unless searchHost is null, a matching existing contact on hostNode will be returned.
|
|
35
37
|
if (this.host === hostNode) return this; // All good.
|
|
36
38
|
|
|
37
39
|
// Reuse existing contact in hostNode -- if still running.
|
|
@@ -57,41 +59,108 @@ export class Contact {
|
|
|
57
59
|
store(key, value) {
|
|
58
60
|
return this.sendRPC('store', key, value);
|
|
59
61
|
}
|
|
60
|
-
disconnect() { // Simulate a disconnection of node, marking as such and rejecting any RPCs in flight.
|
|
62
|
+
async disconnect() { // Simulate a disconnection of node, marking as such and rejecting any RPCs in flight.
|
|
61
63
|
Node.assert(this.host === this.node, "Disconnect", this.name, "not invoked on home contact", this.host.name);
|
|
62
|
-
|
|
64
|
+
// Attempt to ensure that there are other copies.
|
|
65
|
+
if (!this.host.isStopped()) {
|
|
66
|
+
await Promise.all(this.host.storage.entries().map(([key, value]) => this.storeValue(key, value)));
|
|
67
|
+
}
|
|
63
68
|
this.host.stopRefresh();
|
|
64
|
-
this.host.contacts
|
|
69
|
+
for (const contact of this.host.contacts) {
|
|
65
70
|
const far = contact.connection;
|
|
66
71
|
if (!far) return;
|
|
67
|
-
contact.
|
|
68
|
-
|
|
72
|
+
contact.synchronousSend(['-', 'bye']); // May have already been closed by other side.
|
|
73
|
+
await contact.disconnectTransport(false);
|
|
74
|
+
}
|
|
75
|
+
this.host.isRunning = false;
|
|
76
|
+
}
|
|
77
|
+
disconnectTransport(andNotify = true) { // There are asynchronous things that happen, but they each get triggered synchronously
|
|
78
|
+
if (andNotify) this.synchronousSend(['-', 'close']); // May have already send "bye" and closed.
|
|
79
|
+
}
|
|
80
|
+
close() { // The sender is closing their connection, but not necessarilly disconnected entirely (e.g., maybe maxTransports)
|
|
81
|
+
this.host.log('closing disconnected contact', this.sname, this.xxx++);
|
|
82
|
+
this.disconnectTransport(false);
|
|
83
|
+
this.host.removeLooseTransport(this.key); // If any.
|
|
84
|
+
}
|
|
85
|
+
bye() { // The sender is disconnecting from the network
|
|
86
|
+
this.host.log('removing disconnected contact', this.sname);
|
|
87
|
+
this.host.removeContact(this).then(bucket => bucket?.resetRefresh('now')); // Accelerate the bucket refresh
|
|
69
88
|
}
|
|
70
89
|
distance(key) { return this.host.constructor.distance(this.key, key); }
|
|
71
90
|
|
|
72
91
|
// RPC
|
|
73
|
-
|
|
92
|
+
static maxPingMs = 330; // Not including connect time. These are single-hop WebRTC data channels.
|
|
93
|
+
serializeRequest(...rest) { // Return the composite datum suitable for transport over the wire.
|
|
94
|
+
return rest; // Non-simulation subclases must override.
|
|
95
|
+
}
|
|
96
|
+
async deserializeRequest(...rest) { // Inverse of serializeRequest. Response object will be spread for Node receiveRPC.
|
|
97
|
+
return rest; // Non-simulation subclases must override.
|
|
98
|
+
}
|
|
99
|
+
serializeResponse(response) { // Like serializeRequest, but specifically for a probe response.
|
|
100
|
+
return response;
|
|
101
|
+
}
|
|
102
|
+
async deserializeResponse(result) { // Inverse of serializeResponse.
|
|
103
|
+
return result;
|
|
104
|
+
}
|
|
105
|
+
rpcTimeout(method) { // Promise to resolve to null at appriate timeout for RPC method
|
|
106
|
+
let hops = 15; // recursive calls
|
|
107
|
+
if (method === 'signals') hops = 2;
|
|
108
|
+
else if (['ping', 'findNodes', 'findValue', 'store'].includes(method)) hops = 1;
|
|
109
|
+
return Node.delay(hops * this.constructor.maxPingMs, null);
|
|
110
|
+
}
|
|
111
|
+
async sendRPC(method, ...rest) { // Promise the result of a network call to node, or null if not possible.
|
|
74
112
|
const sender = this.host.contact;
|
|
75
|
-
|
|
76
|
-
if (!sender.isRunning)
|
|
77
|
-
if (sender.key === this.key) {
|
|
78
|
-
const result = this.receiveRPC(method, sender, ...rest);
|
|
113
|
+
|
|
114
|
+
if (!sender.isRunning) return null; // sender closed before call.
|
|
115
|
+
if (sender.key === this.key) { // self-send short-circuit
|
|
116
|
+
const result = this.host.receiveRPC(method, sender, ...rest);
|
|
79
117
|
if (!result) this.host.xlog('no local result');
|
|
80
118
|
return result;
|
|
81
119
|
}
|
|
120
|
+
if (!await this.connect()) return null;
|
|
121
|
+
// uuid so that the two sides don't send a request with the same id to each other.
|
|
122
|
+
// Alternatively, we could concatenate a counter to our host.name.
|
|
123
|
+
let messageTag = uuidv4();
|
|
124
|
+
// if (method === 'signals') {
|
|
125
|
+
// messageTag = 'X' + messageTag;
|
|
126
|
+
// this.host.xlog(this.counter, 'requesting', messageTag, method, 'of', this.sname);
|
|
127
|
+
// }
|
|
128
|
+
const message = this.serializeRequest(messageTag, method, sender, ...rest);
|
|
82
129
|
|
|
83
130
|
const start = Date.now();
|
|
84
|
-
return this.transmitRPC(
|
|
131
|
+
return this.transmitRPC(...message)
|
|
85
132
|
.then(result => {
|
|
86
|
-
if (!sender.isRunning)
|
|
133
|
+
if (!sender.isRunning) return null; // Sender closed after call.
|
|
87
134
|
return result;
|
|
88
135
|
})
|
|
89
136
|
.finally(() => Node.noteStatistic(start, 'rpc'));
|
|
90
137
|
}
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
138
|
+
getResponsePromise(messageTag) { // Get a promise that will resolve when a response comes in as messageTag.
|
|
139
|
+
return new Promise(resolve => this.host.messageResolvers.set(messageTag, resolve));
|
|
140
|
+
}
|
|
141
|
+
async receiveRPC(messageTag, ...data) { // Call the message method to act on the 'to' node side.
|
|
142
|
+
const responder = this.host.messageResolvers.get(messageTag);
|
|
143
|
+
if (responder) { // A response to something we sent and are waiting for.
|
|
144
|
+
let [result] = data;
|
|
145
|
+
this.host.messageResolvers.delete(messageTag);
|
|
146
|
+
result = await this.deserializeResponse(result);
|
|
147
|
+
responder(result);
|
|
148
|
+
} else if (!this.host.isRunning) {
|
|
149
|
+
this.disconnectTransport();
|
|
150
|
+
// Kludge: In testing, it is possible for a disconnecting node to send a request that will respond to a new session of the same id.
|
|
151
|
+
} else if (typeof(data[0]) !== 'string' || data[0] === 'pong') {
|
|
152
|
+
; //this.host.xlog(this.counter, 'received result without responder', messageTag, data, 'at', this.sname);
|
|
153
|
+
} else if (data[0] === 'close') {
|
|
154
|
+
this.close();
|
|
155
|
+
} else if (data[0] === 'bye') {
|
|
156
|
+
this.bye();
|
|
157
|
+
} else { // An incoming request.
|
|
158
|
+
const deserialized = await this.deserializeRequest(...data);
|
|
159
|
+
let response = await this.host.receiveRPC(...deserialized);
|
|
160
|
+
response = this.serializeResponse(response);
|
|
161
|
+
//if (messageTag.startsWith('X')) this.host.xlog(this.counter, 'responding', messageTag, response, 'to', this.sname);
|
|
162
|
+
await this.send([messageTag, response]);
|
|
163
|
+
}
|
|
95
164
|
}
|
|
96
165
|
// Sponsorship
|
|
97
166
|
_sponsors = new Map(); // maps key => contact
|
|
@@ -115,13 +184,12 @@ export class Contact {
|
|
|
115
184
|
//return `${this.connection ? '_' : ''}${this.sname}v${this.counter}${this.isRunning ? '' : '*'}`;
|
|
116
185
|
return `${this.connection ? '_' : ''}${this.sname}${this.isRunning ? '' : '*'}`; // simpler version
|
|
117
186
|
}
|
|
118
|
-
static pingTimeMS =
|
|
187
|
+
static pingTimeMS = 40; // ms
|
|
119
188
|
static async ensureTime(thunk, ms = this.pingTimeMS) { // Promise that thunk takes at least ms to execute.
|
|
120
189
|
const start = Date.now();
|
|
121
190
|
const result = await thunk();
|
|
122
191
|
const elapsed = Date.now() - start;
|
|
123
|
-
|
|
124
|
-
await new Promise(resolve => setTimeout(resolve, ms - elapsed));
|
|
192
|
+
await Node.delay(ms - elapsed);
|
|
125
193
|
return result;
|
|
126
194
|
}
|
|
127
195
|
}
|