@effect/cluster 0.50.6 → 0.52.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/RunnerStorage/package.json +6 -0
- package/SqlRunnerStorage/package.json +6 -0
- package/dist/cjs/ClusterError.js +2 -24
- package/dist/cjs/ClusterError.js.map +1 -1
- package/dist/cjs/ClusterMetrics.js +13 -15
- package/dist/cjs/ClusterMetrics.js.map +1 -1
- package/dist/cjs/ClusterSchema.js +17 -2
- package/dist/cjs/ClusterSchema.js.map +1 -1
- package/dist/cjs/ClusterWorkflowEngine.js +50 -83
- package/dist/cjs/ClusterWorkflowEngine.js.map +1 -1
- package/dist/cjs/Entity.js +1 -13
- package/dist/cjs/Entity.js.map +1 -1
- package/dist/cjs/EntityAddress.js +9 -1
- package/dist/cjs/EntityAddress.js.map +1 -1
- package/dist/cjs/EntityId.js +7 -1
- package/dist/cjs/EntityId.js.map +1 -1
- package/dist/cjs/EntityProxy.js +1 -1
- package/dist/cjs/EntityProxy.js.map +1 -1
- package/dist/cjs/HttpRunner.js +69 -43
- package/dist/cjs/HttpRunner.js.map +1 -1
- package/dist/cjs/MessageStorage.js +64 -16
- package/dist/cjs/MessageStorage.js.map +1 -1
- package/dist/cjs/Runner.js +3 -3
- package/dist/cjs/Runner.js.map +1 -1
- package/dist/cjs/RunnerAddress.js +7 -0
- package/dist/cjs/RunnerAddress.js.map +1 -1
- package/dist/cjs/RunnerHealth.js +91 -32
- package/dist/cjs/RunnerHealth.js.map +1 -1
- package/dist/cjs/RunnerServer.js +38 -24
- package/dist/cjs/RunnerServer.js.map +1 -1
- package/dist/cjs/RunnerStorage.js +100 -0
- package/dist/cjs/RunnerStorage.js.map +1 -0
- package/dist/cjs/Runners.js +18 -22
- package/dist/cjs/Runners.js.map +1 -1
- package/dist/cjs/ShardId.js +17 -7
- package/dist/cjs/ShardId.js.map +1 -1
- package/dist/cjs/Sharding.js +444 -320
- package/dist/cjs/Sharding.js.map +1 -1
- package/dist/cjs/ShardingConfig.js +10 -14
- package/dist/cjs/ShardingConfig.js.map +1 -1
- package/dist/cjs/Snowflake.js +1 -1
- package/dist/cjs/SocketRunner.js +1 -1
- package/dist/cjs/SocketRunner.js.map +1 -1
- package/dist/cjs/SqlMessageStorage.js +22 -28
- package/dist/cjs/SqlMessageStorage.js.map +1 -1
- package/dist/cjs/SqlRunnerStorage.js +375 -0
- package/dist/cjs/SqlRunnerStorage.js.map +1 -0
- package/dist/cjs/index.js +5 -15
- package/dist/cjs/internal/entityManager.js +42 -23
- package/dist/cjs/internal/entityManager.js.map +1 -1
- package/dist/dts/ClusterError.d.ts +0 -22
- package/dist/dts/ClusterError.d.ts.map +1 -1
- package/dist/dts/ClusterMetrics.d.ts +4 -14
- package/dist/dts/ClusterMetrics.d.ts.map +1 -1
- package/dist/dts/ClusterSchema.d.ts +9 -1
- package/dist/dts/ClusterSchema.d.ts.map +1 -1
- package/dist/dts/ClusterWorkflowEngine.d.ts.map +1 -1
- package/dist/dts/Entity.d.ts +3 -14
- package/dist/dts/Entity.d.ts.map +1 -1
- package/dist/dts/EntityAddress.d.ts +11 -0
- package/dist/dts/EntityAddress.d.ts.map +1 -1
- package/dist/dts/EntityId.d.ts +5 -0
- package/dist/dts/EntityId.d.ts.map +1 -1
- package/dist/dts/EntityProxy.d.ts +5 -6
- package/dist/dts/EntityProxy.d.ts.map +1 -1
- package/dist/dts/HttpRunner.d.ts +48 -25
- package/dist/dts/HttpRunner.d.ts.map +1 -1
- package/dist/dts/MessageStorage.d.ts +13 -5
- package/dist/dts/MessageStorage.d.ts.map +1 -1
- package/dist/dts/Runner.d.ts +4 -4
- package/dist/dts/Runner.d.ts.map +1 -1
- package/dist/dts/RunnerAddress.d.ts +5 -0
- package/dist/dts/RunnerAddress.d.ts.map +1 -1
- package/dist/dts/RunnerHealth.d.ts +24 -16
- package/dist/dts/RunnerHealth.d.ts.map +1 -1
- package/dist/dts/RunnerServer.d.ts +5 -4
- package/dist/dts/RunnerServer.d.ts.map +1 -1
- package/dist/dts/{ShardStorage.d.ts → RunnerStorage.d.ts} +41 -54
- package/dist/dts/RunnerStorage.d.ts.map +1 -0
- package/dist/dts/Runners.d.ts +15 -11
- package/dist/dts/Runners.d.ts.map +1 -1
- package/dist/dts/ShardId.d.ts +1 -1
- package/dist/dts/ShardId.d.ts.map +1 -1
- package/dist/dts/Sharding.d.ts +20 -10
- package/dist/dts/Sharding.d.ts.map +1 -1
- package/dist/dts/ShardingConfig.d.ts +40 -14
- package/dist/dts/ShardingConfig.d.ts.map +1 -1
- package/dist/dts/SocketRunner.d.ts +4 -3
- package/dist/dts/SocketRunner.d.ts.map +1 -1
- package/dist/dts/SqlMessageStorage.d.ts +2 -3
- package/dist/dts/SqlMessageStorage.d.ts.map +1 -1
- package/dist/dts/SqlRunnerStorage.d.ts +40 -0
- package/dist/dts/SqlRunnerStorage.d.ts.map +1 -0
- package/dist/dts/index.d.ts +4 -24
- package/dist/dts/index.d.ts.map +1 -1
- package/dist/esm/ClusterError.js +0 -21
- package/dist/esm/ClusterError.js.map +1 -1
- package/dist/esm/ClusterMetrics.js +12 -14
- package/dist/esm/ClusterMetrics.js.map +1 -1
- package/dist/esm/ClusterSchema.js +17 -2
- package/dist/esm/ClusterSchema.js.map +1 -1
- package/dist/esm/ClusterWorkflowEngine.js +50 -83
- package/dist/esm/ClusterWorkflowEngine.js.map +1 -1
- package/dist/esm/Entity.js +0 -12
- package/dist/esm/Entity.js.map +1 -1
- package/dist/esm/EntityAddress.js +7 -0
- package/dist/esm/EntityAddress.js.map +1 -1
- package/dist/esm/EntityId.js +5 -0
- package/dist/esm/EntityId.js.map +1 -1
- package/dist/esm/EntityProxy.js +2 -2
- package/dist/esm/EntityProxy.js.map +1 -1
- package/dist/esm/HttpRunner.js +62 -39
- package/dist/esm/HttpRunner.js.map +1 -1
- package/dist/esm/MessageStorage.js +65 -17
- package/dist/esm/MessageStorage.js.map +1 -1
- package/dist/esm/Runner.js +3 -3
- package/dist/esm/Runner.js.map +1 -1
- package/dist/esm/RunnerAddress.js +7 -0
- package/dist/esm/RunnerAddress.js.map +1 -1
- package/dist/esm/RunnerHealth.js +88 -30
- package/dist/esm/RunnerHealth.js.map +1 -1
- package/dist/esm/RunnerServer.js +38 -24
- package/dist/esm/RunnerServer.js.map +1 -1
- package/dist/esm/RunnerStorage.js +90 -0
- package/dist/esm/RunnerStorage.js.map +1 -0
- package/dist/esm/Runners.js +19 -23
- package/dist/esm/Runners.js.map +1 -1
- package/dist/esm/ShardId.js +16 -6
- package/dist/esm/ShardId.js.map +1 -1
- package/dist/esm/Sharding.js +447 -323
- package/dist/esm/Sharding.js.map +1 -1
- package/dist/esm/ShardingConfig.js +10 -14
- package/dist/esm/ShardingConfig.js.map +1 -1
- package/dist/esm/Snowflake.js +1 -1
- package/dist/esm/SocketRunner.js +1 -1
- package/dist/esm/SocketRunner.js.map +1 -1
- package/dist/esm/SqlMessageStorage.js +22 -28
- package/dist/esm/SqlMessageStorage.js.map +1 -1
- package/dist/esm/SqlRunnerStorage.js +366 -0
- package/dist/esm/SqlRunnerStorage.js.map +1 -0
- package/dist/esm/index.js +4 -24
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/internal/entityManager.js +41 -22
- package/dist/esm/internal/entityManager.js.map +1 -1
- package/package.json +20 -60
- package/src/ClusterError.ts +0 -24
- package/src/ClusterMetrics.ts +12 -16
- package/src/ClusterSchema.ts +17 -2
- package/src/ClusterWorkflowEngine.ts +48 -80
- package/src/Entity.ts +3 -21
- package/src/EntityAddress.ts +10 -0
- package/src/EntityId.ts +6 -0
- package/src/EntityProxy.ts +10 -10
- package/src/HttpRunner.ts +132 -67
- package/src/MessageStorage.ts +89 -24
- package/src/Runner.ts +4 -4
- package/src/RunnerAddress.ts +8 -0
- package/src/RunnerHealth.ts +119 -56
- package/src/RunnerServer.ts +64 -47
- package/src/RunnerStorage.ts +218 -0
- package/src/Runners.ts +32 -45
- package/src/ShardId.ts +14 -3
- package/src/Sharding.ts +561 -417
- package/src/ShardingConfig.ts +39 -31
- package/src/Snowflake.ts +1 -1
- package/src/SocketRunner.ts +6 -4
- package/src/SqlMessageStorage.ts +28 -30
- package/src/SqlRunnerStorage.ts +537 -0
- package/src/index.ts +4 -29
- package/src/internal/entityManager.ts +45 -29
- package/HttpCommon/package.json +0 -6
- package/HttpShardManager/package.json +0 -6
- package/ShardManager/package.json +0 -6
- package/ShardStorage/package.json +0 -6
- package/SocketShardManager/package.json +0 -6
- package/SqlShardStorage/package.json +0 -6
- package/SynchronizedClock/package.json +0 -6
- package/dist/cjs/HttpCommon.js +0 -48
- package/dist/cjs/HttpCommon.js.map +0 -1
- package/dist/cjs/HttpShardManager.js +0 -139
- package/dist/cjs/HttpShardManager.js.map +0 -1
- package/dist/cjs/ShardManager.js +0 -549
- package/dist/cjs/ShardManager.js.map +0 -1
- package/dist/cjs/ShardStorage.js +0 -151
- package/dist/cjs/ShardStorage.js.map +0 -1
- package/dist/cjs/SocketShardManager.js +0 -32
- package/dist/cjs/SocketShardManager.js.map +0 -1
- package/dist/cjs/SqlShardStorage.js +0 -253
- package/dist/cjs/SqlShardStorage.js.map +0 -1
- package/dist/cjs/SynchronizedClock.js +0 -65
- package/dist/cjs/SynchronizedClock.js.map +0 -1
- package/dist/cjs/internal/shardManager.js +0 -353
- package/dist/cjs/internal/shardManager.js.map +0 -1
- package/dist/dts/HttpCommon.d.ts +0 -25
- package/dist/dts/HttpCommon.d.ts.map +0 -1
- package/dist/dts/HttpShardManager.d.ts +0 -119
- package/dist/dts/HttpShardManager.d.ts.map +0 -1
- package/dist/dts/ShardManager.d.ts +0 -459
- package/dist/dts/ShardManager.d.ts.map +0 -1
- package/dist/dts/ShardStorage.d.ts.map +0 -1
- package/dist/dts/SocketShardManager.d.ts +0 -17
- package/dist/dts/SocketShardManager.d.ts.map +0 -1
- package/dist/dts/SqlShardStorage.d.ts +0 -38
- package/dist/dts/SqlShardStorage.d.ts.map +0 -1
- package/dist/dts/SynchronizedClock.d.ts +0 -19
- package/dist/dts/SynchronizedClock.d.ts.map +0 -1
- package/dist/dts/internal/shardManager.d.ts +0 -2
- package/dist/dts/internal/shardManager.d.ts.map +0 -1
- package/dist/esm/HttpCommon.js +0 -38
- package/dist/esm/HttpCommon.js.map +0 -1
- package/dist/esm/HttpShardManager.js +0 -128
- package/dist/esm/HttpShardManager.js.map +0 -1
- package/dist/esm/ShardManager.js +0 -535
- package/dist/esm/ShardManager.js.map +0 -1
- package/dist/esm/ShardStorage.js +0 -141
- package/dist/esm/ShardStorage.js.map +0 -1
- package/dist/esm/SocketShardManager.js +0 -24
- package/dist/esm/SocketShardManager.js.map +0 -1
- package/dist/esm/SqlShardStorage.js +0 -244
- package/dist/esm/SqlShardStorage.js.map +0 -1
- package/dist/esm/SynchronizedClock.js +0 -57
- package/dist/esm/SynchronizedClock.js.map +0 -1
- package/dist/esm/internal/shardManager.js +0 -342
- package/dist/esm/internal/shardManager.js.map +0 -1
- package/src/HttpCommon.ts +0 -73
- package/src/HttpShardManager.ts +0 -273
- package/src/ShardManager.ts +0 -823
- package/src/ShardStorage.ts +0 -297
- package/src/SocketShardManager.ts +0 -48
- package/src/SqlShardStorage.ts +0 -329
- package/src/SynchronizedClock.ts +0 -82
- package/src/internal/shardManager.ts +0 -412
package/dist/cjs/Sharding.js
CHANGED
|
@@ -9,29 +9,27 @@ var _RpcMessage = require("@effect/rpc/RpcMessage");
|
|
|
9
9
|
var Arr = _interopRequireWildcard(require("effect/Array"));
|
|
10
10
|
var Cause = _interopRequireWildcard(require("effect/Cause"));
|
|
11
11
|
var Context = _interopRequireWildcard(require("effect/Context"));
|
|
12
|
-
var Deferred = _interopRequireWildcard(require("effect/Deferred"));
|
|
13
12
|
var Effect = _interopRequireWildcard(require("effect/Effect"));
|
|
13
|
+
var Either = _interopRequireWildcard(require("effect/Either"));
|
|
14
14
|
var Equal = _interopRequireWildcard(require("effect/Equal"));
|
|
15
|
-
var Exit = _interopRequireWildcard(require("effect/Exit"));
|
|
16
15
|
var Fiber = _interopRequireWildcard(require("effect/Fiber"));
|
|
17
|
-
var FiberHandle = _interopRequireWildcard(require("effect/FiberHandle"));
|
|
18
16
|
var FiberMap = _interopRequireWildcard(require("effect/FiberMap"));
|
|
19
17
|
var FiberRef = _interopRequireWildcard(require("effect/FiberRef"));
|
|
18
|
+
var FiberSet = _interopRequireWildcard(require("effect/FiberSet"));
|
|
20
19
|
var _Function = require("effect/Function");
|
|
21
20
|
var HashMap = _interopRequireWildcard(require("effect/HashMap"));
|
|
22
|
-
var
|
|
21
|
+
var HashRing = _interopRequireWildcard(require("effect/HashRing"));
|
|
23
22
|
var Layer = _interopRequireWildcard(require("effect/Layer"));
|
|
24
23
|
var MutableHashMap = _interopRequireWildcard(require("effect/MutableHashMap"));
|
|
25
24
|
var MutableHashSet = _interopRequireWildcard(require("effect/MutableHashSet"));
|
|
26
25
|
var MutableRef = _interopRequireWildcard(require("effect/MutableRef"));
|
|
27
26
|
var Option = _interopRequireWildcard(require("effect/Option"));
|
|
28
|
-
var Predicate = _interopRequireWildcard(require("effect/Predicate"));
|
|
29
27
|
var PubSub = _interopRequireWildcard(require("effect/PubSub"));
|
|
30
28
|
var Schedule = _interopRequireWildcard(require("effect/Schedule"));
|
|
31
29
|
var Scope = _interopRequireWildcard(require("effect/Scope"));
|
|
32
30
|
var Stream = _interopRequireWildcard(require("effect/Stream"));
|
|
33
|
-
var _ClusterError =
|
|
34
|
-
var
|
|
31
|
+
var _ClusterError = require("./ClusterError.js");
|
|
32
|
+
var ClusterMetrics = _interopRequireWildcard(require("./ClusterMetrics.js"));
|
|
35
33
|
var _ClusterSchema = _interopRequireWildcard(require("./ClusterSchema.js"));
|
|
36
34
|
var ClusterSchema = _ClusterSchema;
|
|
37
35
|
var _EntityAddress = require("./EntityAddress.js");
|
|
@@ -45,12 +43,13 @@ var _resourceMap = require("./internal/resourceMap.js");
|
|
|
45
43
|
var Message = _interopRequireWildcard(require("./Message.js"));
|
|
46
44
|
var MessageStorage = _interopRequireWildcard(require("./MessageStorage.js"));
|
|
47
45
|
var Reply = _interopRequireWildcard(require("./Reply.js"));
|
|
46
|
+
var _Runner = require("./Runner.js");
|
|
47
|
+
var RunnerHealth = _interopRequireWildcard(require("./RunnerHealth.js"));
|
|
48
48
|
var _Runners = require("./Runners.js");
|
|
49
|
+
var _RunnerStorage = require("./RunnerStorage.js");
|
|
49
50
|
var _ShardId = require("./ShardId.js");
|
|
50
51
|
var _ShardingConfig = require("./ShardingConfig.js");
|
|
51
52
|
var _ShardingRegistrationEvent = require("./ShardingRegistrationEvent.js");
|
|
52
|
-
var _ShardManager = require("./ShardManager.js");
|
|
53
|
-
var _ShardStorage = require("./ShardStorage.js");
|
|
54
53
|
var _SingletonAddress = require("./SingletonAddress.js");
|
|
55
54
|
var Snowflake = _interopRequireWildcard(require("./Snowflake.js"));
|
|
56
55
|
function _interopRequireWildcard(e, t) { if ("function" == typeof WeakMap) var r = new WeakMap(), n = new WeakMap(); return (_interopRequireWildcard = function (e, t) { if (!t && e && e.__esModule) return e; var o, i, f = { __proto__: null, default: e }; if (null === e || "object" != typeof e && "function" != typeof e) return f; if (o = t ? n : r) { if (o.has(e)) return o.get(e); o.set(e, f); } for (const t in e) "default" !== t && {}.hasOwnProperty.call(e, t) && ((i = (o = Object.defineProperty) && Object.getOwnPropertyDescriptor(e, t)) && (i.get || i.set) ? o(f, t, i) : f[t] = e[t]); return f; })(e, t); }
|
|
@@ -62,14 +61,16 @@ class Sharding extends /*#__PURE__*/Context.Tag("@effect/cluster/Sharding")() {}
|
|
|
62
61
|
exports.Sharding = Sharding;
|
|
63
62
|
const make = /*#__PURE__*/Effect.gen(function* () {
|
|
64
63
|
const config = yield* _ShardingConfig.ShardingConfig;
|
|
65
|
-
const
|
|
66
|
-
const
|
|
64
|
+
const runnersService = yield* _Runners.Runners;
|
|
65
|
+
const runnerHealth = yield* RunnerHealth.RunnerHealth;
|
|
67
66
|
const snowflakeGen = yield* Snowflake.Generator;
|
|
68
67
|
const shardingScope = yield* Effect.scope;
|
|
69
68
|
const isShutdown = MutableRef.make(false);
|
|
69
|
+
const fiberSet = yield* FiberSet.make();
|
|
70
|
+
const runFork = yield* FiberSet.runtime(fiberSet)().pipe(Effect.mapInputContext(context => Context.omit(Scope.Scope)(context)));
|
|
70
71
|
const storage = yield* MessageStorage.MessageStorage;
|
|
71
72
|
const storageEnabled = storage !== MessageStorage.noop;
|
|
72
|
-
const
|
|
73
|
+
const runnerStorage = yield* _RunnerStorage.RunnerStorage;
|
|
73
74
|
const entityManagers = new Map();
|
|
74
75
|
const shardAssignments = MutableHashMap.empty();
|
|
75
76
|
const selfShards = MutableHashSet.empty();
|
|
@@ -81,24 +82,24 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
81
82
|
const isLocalRunner = address => Option.isSome(config.runnerAddress) && Equal.equals(address, config.runnerAddress.value);
|
|
82
83
|
function getShardId(entityId, group) {
|
|
83
84
|
const id = Math.abs((0, _hash.hashString)(entityId) % config.shardsPerGroup) + 1;
|
|
84
|
-
return _ShardId.
|
|
85
|
-
group,
|
|
86
|
-
id
|
|
87
|
-
}, {
|
|
88
|
-
disableValidation: true
|
|
89
|
-
});
|
|
85
|
+
return (0, _ShardId.make)(group, id);
|
|
90
86
|
}
|
|
91
87
|
function isEntityOnLocalShards(address) {
|
|
92
88
|
return MutableHashSet.has(acquiredShards, address.shardId);
|
|
93
89
|
}
|
|
94
90
|
// --- Shard acquisition ---
|
|
91
|
+
//
|
|
92
|
+
// Responsible for acquiring and releasing shards from RunnerStorage.
|
|
93
|
+
//
|
|
94
|
+
// This should be shutdown last, when all entities have been shutdown, to
|
|
95
|
+
// allow them to move to another runner.
|
|
96
|
+
const releasingShards = MutableHashSet.empty();
|
|
95
97
|
if (Option.isSome(config.runnerAddress)) {
|
|
96
98
|
const selfAddress = config.runnerAddress.value;
|
|
97
99
|
yield* Scope.addFinalizerExit(shardingScope, () => {
|
|
98
100
|
// the locks expire over time, so if this fails we ignore it
|
|
99
|
-
return Effect.ignore(
|
|
101
|
+
return Effect.ignore(runnerStorage.releaseAll(selfAddress));
|
|
100
102
|
});
|
|
101
|
-
const releasingShards = MutableHashSet.empty();
|
|
102
103
|
yield* Effect.gen(function* () {
|
|
103
104
|
activeShardsLatch.unsafeOpen();
|
|
104
105
|
while (true) {
|
|
@@ -110,140 +111,174 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
110
111
|
MutableHashSet.remove(acquiredShards, shardId);
|
|
111
112
|
MutableHashSet.add(releasingShards, shardId);
|
|
112
113
|
}
|
|
114
|
+
if (MutableHashSet.size(releasingShards) > 0) {
|
|
115
|
+
yield* Effect.forkIn(syncSingletons, shardingScope);
|
|
116
|
+
yield* releaseShards;
|
|
117
|
+
}
|
|
113
118
|
// if a shard has been assigned to this runner, we acquire it
|
|
114
119
|
const unacquiredShards = MutableHashSet.empty();
|
|
115
120
|
for (const shardId of selfShards) {
|
|
116
121
|
if (MutableHashSet.has(acquiredShards, shardId) || MutableHashSet.has(releasingShards, shardId)) continue;
|
|
117
122
|
MutableHashSet.add(unacquiredShards, shardId);
|
|
118
123
|
}
|
|
119
|
-
if (MutableHashSet.size(releasingShards) > 0) {
|
|
120
|
-
yield* Effect.forkIn(syncSingletons, shardingScope);
|
|
121
|
-
yield* releaseShards;
|
|
122
|
-
}
|
|
123
124
|
if (MutableHashSet.size(unacquiredShards) === 0) {
|
|
124
125
|
continue;
|
|
125
126
|
}
|
|
126
|
-
const acquired = yield*
|
|
127
|
+
const acquired = yield* runnerStorage.acquire(selfAddress, unacquiredShards);
|
|
127
128
|
yield* Effect.ignore(storage.resetShards(acquired));
|
|
128
129
|
for (const shardId of acquired) {
|
|
130
|
+
if (MutableHashSet.has(releasingShards, shardId) || !MutableHashSet.has(selfShards, shardId)) {
|
|
131
|
+
continue;
|
|
132
|
+
}
|
|
129
133
|
MutableHashSet.add(acquiredShards, shardId);
|
|
130
134
|
}
|
|
131
135
|
if (acquired.length > 0) {
|
|
132
136
|
yield* storageReadLatch.open;
|
|
133
137
|
yield* Effect.forkIn(syncSingletons, shardingScope);
|
|
138
|
+
// update metrics
|
|
139
|
+
ClusterMetrics.shards.unsafeUpdate(BigInt(MutableHashSet.size(acquiredShards)), []);
|
|
134
140
|
}
|
|
135
141
|
yield* Effect.sleep(1000);
|
|
136
142
|
activeShardsLatch.unsafeOpen();
|
|
137
143
|
}
|
|
138
|
-
}).pipe(Effect.catchAllCause(cause => Effect.logWarning("Could not acquire/release shards", cause)), Effect.
|
|
144
|
+
}).pipe(Effect.catchAllCause(cause => Effect.logWarning("Could not acquire/release shards", cause)), Effect.repeat(Schedule.spaced(config.entityMessagePollInterval)), Effect.annotateLogs({
|
|
139
145
|
package: "@effect/cluster",
|
|
140
146
|
module: "Sharding",
|
|
141
147
|
fiber: "Shard acquisition loop",
|
|
142
148
|
runner: selfAddress
|
|
143
|
-
}), Effect.
|
|
144
|
-
// refresh the shard locks every
|
|
145
|
-
yield* Effect.suspend(() =>
|
|
149
|
+
}), Effect.forkIn(shardingScope));
|
|
150
|
+
// refresh the shard locks every `shardLockRefreshInterval`
|
|
151
|
+
yield* Effect.suspend(() => runnerStorage.refresh(selfAddress, [...acquiredShards, ...releasingShards])).pipe(Effect.flatMap(acquired => {
|
|
146
152
|
for (const shardId of acquiredShards) {
|
|
147
|
-
if (!acquired.
|
|
153
|
+
if (!acquired.includes(shardId)) {
|
|
154
|
+
MutableHashSet.remove(acquiredShards, shardId);
|
|
155
|
+
MutableHashSet.add(releasingShards, shardId);
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
for (let i = 0; i < acquired.length; i++) {
|
|
159
|
+
const shardId = acquired[i];
|
|
160
|
+
if (!MutableHashSet.has(selfShards, shardId)) {
|
|
148
161
|
MutableHashSet.remove(acquiredShards, shardId);
|
|
149
162
|
MutableHashSet.add(releasingShards, shardId);
|
|
150
163
|
}
|
|
151
164
|
}
|
|
152
|
-
return MutableHashSet.size(releasingShards) > 0 ?
|
|
165
|
+
return MutableHashSet.size(releasingShards) > 0 ? activeShardsLatch.open : Effect.void;
|
|
153
166
|
}), Effect.retry({
|
|
154
167
|
times: 5,
|
|
155
168
|
schedule: Schedule.spaced(50)
|
|
156
|
-
}), Effect.catchAllCause(cause => Effect.logError("Could not refresh shard locks", cause).pipe(Effect.andThen(clearSelfShards))), Effect.
|
|
169
|
+
}), Effect.catchAllCause(cause => Effect.logError("Could not refresh shard locks", cause).pipe(Effect.andThen(clearSelfShards))), Effect.repeat(Schedule.fixed(config.shardLockRefreshInterval)), Effect.forever, Effect.forkIn(shardingScope));
|
|
157
170
|
const releaseShardsLock = Effect.unsafeMakeSemaphore(1).withPermits(1);
|
|
158
171
|
const releaseShards = releaseShardsLock(Effect.suspend(() => Effect.forEach(releasingShards, shardId => Effect.forEach(entityManagers.values(), state => state.manager.interruptShard(shardId), {
|
|
159
172
|
concurrency: "unbounded",
|
|
160
173
|
discard: true
|
|
161
|
-
}).pipe(Effect.andThen(
|
|
174
|
+
}).pipe(Effect.andThen(runnerStorage.release(selfAddress, shardId)), Effect.annotateLogs({
|
|
162
175
|
runner: selfAddress
|
|
163
|
-
}), Effect.
|
|
176
|
+
}), Effect.flatMap(() => {
|
|
164
177
|
MutableHashSet.remove(releasingShards, shardId);
|
|
178
|
+
return storage.unregisterShardReplyHandlers(shardId);
|
|
165
179
|
})), {
|
|
166
180
|
concurrency: "unbounded",
|
|
167
181
|
discard: true
|
|
168
|
-
}))
|
|
182
|
+
})));
|
|
183
|
+
// open the shard latch every poll interval
|
|
184
|
+
yield* activeShardsLatch.open.pipe(Effect.delay(config.entityMessagePollInterval), Effect.forever, Effect.forkIn(shardingScope));
|
|
169
185
|
}
|
|
170
|
-
const clearSelfShards = Effect.
|
|
186
|
+
const clearSelfShards = Effect.sync(() => {
|
|
171
187
|
MutableHashSet.clear(selfShards);
|
|
172
|
-
|
|
188
|
+
activeShardsLatch.unsafeOpen();
|
|
173
189
|
});
|
|
174
|
-
// --- Singletons ---
|
|
175
|
-
const singletons = new Map();
|
|
176
|
-
const singletonFibers = yield* FiberMap.make();
|
|
177
|
-
const withSingletonLock = Effect.unsafeMakeSemaphore(1).withPermits(1);
|
|
178
|
-
const registerSingleton = Effect.fnUntraced(function* (name, run, options) {
|
|
179
|
-
const shardGroup = options?.shardGroup ?? "default";
|
|
180
|
-
const address = new _SingletonAddress.SingletonAddress({
|
|
181
|
-
shardId: getShardId(_EntityId.EntityId.make(name), shardGroup),
|
|
182
|
-
name
|
|
183
|
-
});
|
|
184
|
-
let map = singletons.get(address.shardId);
|
|
185
|
-
if (!map) {
|
|
186
|
-
map = MutableHashMap.empty();
|
|
187
|
-
singletons.set(address.shardId, map);
|
|
188
|
-
}
|
|
189
|
-
if (MutableHashMap.has(map, address)) {
|
|
190
|
-
return yield* Effect.dieMessage(`Singleton '${name}' is already registered`);
|
|
191
|
-
}
|
|
192
|
-
const context = yield* Effect.context();
|
|
193
|
-
const wrappedRun = run.pipe(Effect.locally(FiberRef.currentLogAnnotations, HashMap.empty()), Effect.andThen(Effect.never), Effect.scoped, Effect.provide(context), Effect.orDie, Effect.interruptible);
|
|
194
|
-
MutableHashMap.set(map, address, wrappedRun);
|
|
195
|
-
yield* PubSub.publish(events, (0, _ShardingRegistrationEvent.SingletonRegistered)({
|
|
196
|
-
address
|
|
197
|
-
}));
|
|
198
|
-
// start if we are on the right shard
|
|
199
|
-
if (MutableHashSet.has(acquiredShards, address.shardId)) {
|
|
200
|
-
yield* Effect.logDebug("Starting singleton", address);
|
|
201
|
-
yield* FiberMap.run(singletonFibers, address, wrappedRun);
|
|
202
|
-
}
|
|
203
|
-
}, withSingletonLock);
|
|
204
|
-
const syncSingletons = withSingletonLock(Effect.gen(function* () {
|
|
205
|
-
for (const [shardId, map] of singletons) {
|
|
206
|
-
for (const [address, run] of map) {
|
|
207
|
-
const running = FiberMap.unsafeHas(singletonFibers, address);
|
|
208
|
-
const shouldBeRunning = MutableHashSet.has(acquiredShards, shardId);
|
|
209
|
-
if (running && !shouldBeRunning) {
|
|
210
|
-
yield* Effect.logDebug("Stopping singleton", address);
|
|
211
|
-
_interruptors.internalInterruptors.add(yield* Effect.fiberId);
|
|
212
|
-
yield* FiberMap.remove(singletonFibers, address);
|
|
213
|
-
} else if (!running && shouldBeRunning) {
|
|
214
|
-
yield* Effect.logDebug("Starting singleton", address);
|
|
215
|
-
yield* FiberMap.run(singletonFibers, address, run);
|
|
216
|
-
}
|
|
217
|
-
}
|
|
218
|
-
}
|
|
219
|
-
}));
|
|
220
190
|
// --- Storage inbox ---
|
|
191
|
+
//
|
|
192
|
+
// Responsible for reading unprocessed messages from storage and sending them
|
|
193
|
+
// to the appropriate entity manager.
|
|
194
|
+
//
|
|
195
|
+
// This should be shutdown before shard acquisition, to ensure no messages are
|
|
196
|
+
// being processed before the shards are released.
|
|
197
|
+
//
|
|
198
|
+
// It should also be shutdown after the entity managers, to ensure interrupt
|
|
199
|
+
// & ack envelopes can still be processed.
|
|
221
200
|
const storageReadLatch = yield* Effect.makeLatch(true);
|
|
222
201
|
const openStorageReadLatch = (0, _Function.constant)(storageReadLatch.open);
|
|
223
202
|
const storageReadLock = Effect.unsafeMakeSemaphore(1);
|
|
224
203
|
const withStorageReadLock = storageReadLock.withPermits(1);
|
|
225
|
-
let storageAlreadyProcessed = _message => true;
|
|
226
|
-
// keep track of the last sent request ids to avoid duplicates
|
|
227
|
-
// we only keep the last 30 sets to avoid memory leaks
|
|
228
|
-
const sentRequestIds = new Set();
|
|
229
|
-
const sentRequestIdSets = new Set();
|
|
230
204
|
if (storageEnabled && Option.isSome(config.runnerAddress)) {
|
|
231
205
|
const selfAddress = config.runnerAddress.value;
|
|
232
206
|
yield* Effect.gen(function* () {
|
|
233
207
|
yield* Effect.logDebug("Starting");
|
|
234
208
|
yield* Effect.addFinalizer(() => Effect.logDebug("Shutting down"));
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
209
|
+
let index = 0;
|
|
210
|
+
let messages = [];
|
|
211
|
+
const removableNotifications = new Set();
|
|
212
|
+
const resetAddresses = MutableHashSet.empty();
|
|
213
|
+
const processMessages = Effect.whileLoop({
|
|
214
|
+
while: () => index < messages.length,
|
|
215
|
+
step: () => index++,
|
|
216
|
+
body: () => send
|
|
217
|
+
});
|
|
218
|
+
const send = Effect.catchAllCause(Effect.suspend(() => {
|
|
219
|
+
const message = messages[index];
|
|
220
|
+
// if we are shutting down, we don't accept new requests
|
|
221
|
+
if (message._tag === "IncomingRequest" && isShutdown.current) {
|
|
222
|
+
if (isShutdown.current) {
|
|
223
|
+
return Effect.void;
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
const address = message.envelope.address;
|
|
227
|
+
if (!MutableHashSet.has(acquiredShards, address.shardId)) {
|
|
228
|
+
return Effect.void;
|
|
229
|
+
}
|
|
230
|
+
const state = entityManagers.get(address.entityType);
|
|
231
|
+
if (!state) {
|
|
232
|
+
// reset address in the case that the entity is slow to register
|
|
233
|
+
MutableHashSet.add(resetAddresses, address);
|
|
234
|
+
return Effect.void;
|
|
235
|
+
} else if (state.closed) {
|
|
236
|
+
return Effect.void;
|
|
237
|
+
}
|
|
238
|
+
const isProcessing = state.manager.isProcessingFor(message);
|
|
239
|
+
// If the message might affect a currently processing request, we
|
|
240
|
+
// send it to the entity manager to be processed.
|
|
241
|
+
if (message._tag === "IncomingEnvelope" && isProcessing) {
|
|
242
|
+
return state.manager.send(message);
|
|
243
|
+
} else if (isProcessing) {
|
|
244
|
+
return Effect.void;
|
|
245
|
+
} else if (message._tag === "IncomingRequest" && pendingNotifications.has(message.envelope.requestId)) {
|
|
246
|
+
const entry = pendingNotifications.get(message.envelope.requestId);
|
|
247
|
+
pendingNotifications.delete(message.envelope.requestId);
|
|
248
|
+
removableNotifications.delete(entry);
|
|
249
|
+
entry.resume(Effect.void);
|
|
250
|
+
}
|
|
251
|
+
// If the entity was resuming in another fiber, we add the message
|
|
252
|
+
// id to the unprocessed set.
|
|
253
|
+
const resumptionState = MutableHashMap.get(entityResumptionState, address);
|
|
254
|
+
if (Option.isSome(resumptionState)) {
|
|
255
|
+
resumptionState.value.unprocessed.add(message.envelope.requestId);
|
|
256
|
+
if (message.envelope._tag === "Interrupt") {
|
|
257
|
+
resumptionState.value.interrupts.set(message.envelope.requestId, message);
|
|
258
|
+
}
|
|
259
|
+
return Effect.void;
|
|
260
|
+
}
|
|
261
|
+
return state.manager.send(message);
|
|
262
|
+
}), cause => {
|
|
263
|
+
const message = messages[index];
|
|
264
|
+
const error = Cause.failureOrCause(cause);
|
|
265
|
+
// if we get a defect, then update storage
|
|
266
|
+
if (Either.isRight(error)) {
|
|
267
|
+
if (Cause.isInterrupted(cause)) {
|
|
268
|
+
return Effect.void;
|
|
269
|
+
}
|
|
270
|
+
return Effect.ignore(storage.saveReply(Reply.ReplyWithContext.fromDefect({
|
|
271
|
+
id: snowflakeGen.unsafeNext(),
|
|
272
|
+
requestId: message.envelope.requestId,
|
|
273
|
+
defect: Cause.squash(cause)
|
|
274
|
+
})));
|
|
275
|
+
}
|
|
276
|
+
if (error.left._tag === "MailboxFull") {
|
|
277
|
+
// MailboxFull can only happen for requests, so this cast is safe
|
|
278
|
+
return resumeEntityFromStorage(message);
|
|
279
|
+
}
|
|
280
|
+
return Effect.void;
|
|
281
|
+
});
|
|
247
282
|
while (true) {
|
|
248
283
|
// wait for the next poll interval, or if we get notified of a change
|
|
249
284
|
yield* storageReadLatch.await;
|
|
@@ -254,98 +289,45 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
254
289
|
// more items are added to the unprocessed set while the semaphore is
|
|
255
290
|
// acquired.
|
|
256
291
|
yield* storageReadLock.take(1);
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
}
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
address
|
|
281
|
-
})
|
|
282
|
-
})));
|
|
283
|
-
}
|
|
284
|
-
return Effect.void;
|
|
285
|
-
}
|
|
286
|
-
const isProcessing = state.manager.isProcessingFor(message);
|
|
287
|
-
// If the message might affect a currently processing request, we
|
|
288
|
-
// send it to the entity manager to be processed.
|
|
289
|
-
if (message._tag === "IncomingEnvelope" && isProcessing) {
|
|
290
|
-
return state.manager.send(message);
|
|
291
|
-
} else if (isProcessing) {
|
|
292
|
-
return Effect.void;
|
|
293
|
-
}
|
|
294
|
-
// If the entity was resuming in another fiber, we add the message
|
|
295
|
-
// id to the unprocessed set.
|
|
296
|
-
const resumptionState = MutableHashMap.get(entityResumptionState, address);
|
|
297
|
-
if (Option.isSome(resumptionState)) {
|
|
298
|
-
resumptionState.value.unprocessed.add(message.envelope.requestId);
|
|
299
|
-
if (message.envelope._tag === "Interrupt") {
|
|
300
|
-
resumptionState.value.interrupts.set(message.envelope.requestId, message);
|
|
301
|
-
}
|
|
302
|
-
return Effect.void;
|
|
303
|
-
}
|
|
304
|
-
return state.manager.send(message);
|
|
305
|
-
}), cause => {
|
|
306
|
-
const message = messages[index];
|
|
307
|
-
const error = Cause.failureOption(cause);
|
|
308
|
-
// if we get a defect, then update storage
|
|
309
|
-
if (Option.isNone(error)) {
|
|
310
|
-
if (Cause.isInterrupted(cause)) {
|
|
311
|
-
return Effect.void;
|
|
312
|
-
}
|
|
313
|
-
return storage.saveReply(Reply.ReplyWithContext.fromDefect({
|
|
314
|
-
id: snowflakeGen.unsafeNext(),
|
|
315
|
-
requestId: message.envelope.requestId,
|
|
316
|
-
defect: Cause.squash(cause)
|
|
292
|
+
entityManagers.forEach(state => state.manager.clearProcessed());
|
|
293
|
+
if (pendingNotifications.size > 0) {
|
|
294
|
+
pendingNotifications.forEach(entry => removableNotifications.add(entry));
|
|
295
|
+
}
|
|
296
|
+
messages = yield* storage.unprocessedMessages(acquiredShards);
|
|
297
|
+
index = 0;
|
|
298
|
+
yield* processMessages;
|
|
299
|
+
if (removableNotifications.size > 0) {
|
|
300
|
+
removableNotifications.forEach(({
|
|
301
|
+
message,
|
|
302
|
+
resume
|
|
303
|
+
}) => {
|
|
304
|
+
pendingNotifications.delete(message.envelope.requestId);
|
|
305
|
+
resume(Effect.fail(new _ClusterError.EntityNotAssignedToRunner({
|
|
306
|
+
address: message.envelope.address
|
|
307
|
+
})));
|
|
308
|
+
});
|
|
309
|
+
removableNotifications.clear();
|
|
310
|
+
}
|
|
311
|
+
if (MutableHashSet.size(resetAddresses) > 0) {
|
|
312
|
+
for (const address of resetAddresses) {
|
|
313
|
+
yield* Effect.logWarning("Could not find entity manager for address, retrying").pipe(Effect.annotateLogs({
|
|
314
|
+
address
|
|
317
315
|
}));
|
|
316
|
+
yield* Effect.forkIn(storage.resetAddress(address), shardingScope);
|
|
318
317
|
}
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
return resumeEntityFromStorage(message);
|
|
322
|
-
}
|
|
323
|
-
return Effect.void;
|
|
324
|
-
});
|
|
325
|
-
let index = 0;
|
|
326
|
-
yield* Effect.whileLoop({
|
|
327
|
-
while: () => index < messages.length,
|
|
328
|
-
step: () => index++,
|
|
329
|
-
body: (0, _Function.constant)(send)
|
|
330
|
-
});
|
|
318
|
+
MutableHashSet.clear(resetAddresses);
|
|
319
|
+
}
|
|
331
320
|
// let the resuming entities check if they are done
|
|
332
321
|
yield* storageReadLock.release(1);
|
|
333
|
-
while (sentRequestIdSets.size > 30) {
|
|
334
|
-
const oldest = Iterable.unsafeHead(sentRequestIdSets);
|
|
335
|
-
sentRequestIdSets.delete(oldest);
|
|
336
|
-
for (const id of oldest) {
|
|
337
|
-
sentRequestIds.delete(id);
|
|
338
|
-
}
|
|
339
|
-
}
|
|
340
322
|
}
|
|
341
|
-
}).pipe(Effect.scoped, Effect.ensuring(storageReadLock.releaseAll), Effect.catchAllCause(cause => Effect.logWarning("Could not read messages from storage", cause)), Effect.
|
|
323
|
+
}).pipe(Effect.scoped, Effect.ensuring(storageReadLock.releaseAll), Effect.catchAllCause(cause => Effect.logWarning("Could not read messages from storage", cause)), Effect.forever, Effect.annotateLogs({
|
|
342
324
|
package: "@effect/cluster",
|
|
343
325
|
module: "Sharding",
|
|
344
326
|
fiber: "Storage read loop",
|
|
345
327
|
runner: selfAddress
|
|
346
|
-
}), Effect.
|
|
328
|
+
}), Effect.withUnhandledErrorLogLevel(Option.none()), Effect.forkIn(shardingScope));
|
|
347
329
|
// open the storage latch every poll interval
|
|
348
|
-
yield* storageReadLatch.open.pipe(Effect.delay(config.entityMessagePollInterval), Effect.forever, Effect.
|
|
330
|
+
yield* storageReadLatch.open.pipe(Effect.delay(config.entityMessagePollInterval), Effect.forever, Effect.forkIn(shardingScope));
|
|
349
331
|
// Resume unprocessed messages for entities that reached a full mailbox.
|
|
350
332
|
const entityResumptionState = MutableHashMap.empty();
|
|
351
333
|
const resumeEntityFromStorage = lastReceivedMessage => {
|
|
@@ -424,16 +406,16 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
424
406
|
}, Effect.retry({
|
|
425
407
|
while: e => e._tag === "PersistenceError",
|
|
426
408
|
schedule: Schedule.spaced(config.entityMessagePollInterval)
|
|
427
|
-
}), Effect.catchAllCause(cause => Effect.
|
|
409
|
+
}), Effect.catchAllCause(cause => Effect.logDebug("Could not resume unprocessed messages", cause)), (effect, address) => Effect.annotateLogs(effect, {
|
|
428
410
|
package: "@effect/cluster",
|
|
429
411
|
module: "Sharding",
|
|
430
412
|
fiber: "Resuming unprocessed messages",
|
|
431
413
|
runner: selfAddress,
|
|
432
414
|
entity: address
|
|
433
|
-
}), (effect, address) => Effect.ensuring(effect, Effect.sync(() => MutableHashMap.remove(entityResumptionState, address))), Effect.
|
|
415
|
+
}), (effect, address) => Effect.ensuring(effect, Effect.sync(() => MutableHashMap.remove(entityResumptionState, address))), Effect.withUnhandledErrorLogLevel(Option.none()), Effect.forkIn(shardingScope));
|
|
434
416
|
}
|
|
435
417
|
// --- Sending messages ---
|
|
436
|
-
const sendLocal = message => Effect.suspend(()
|
|
418
|
+
const sendLocal = message => Effect.suspend(function loop() {
|
|
437
419
|
const address = message.envelope.address;
|
|
438
420
|
if (!isEntityOnLocalShards(address)) {
|
|
439
421
|
return Effect.fail(new _ClusterError.EntityNotAssignedToRunner({
|
|
@@ -442,57 +424,84 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
442
424
|
}
|
|
443
425
|
const state = entityManagers.get(address.entityType);
|
|
444
426
|
if (!state) {
|
|
445
|
-
return Effect.
|
|
427
|
+
return Effect.flatMap(waitForEntityManager(address.entityType), loop);
|
|
428
|
+
} else if (state.closed || isShutdown.current && message._tag === "IncomingRequest") {
|
|
429
|
+
// if we are shutting down, we don't accept new requests
|
|
430
|
+
return Effect.fail(new _ClusterError.EntityNotAssignedToRunner({
|
|
446
431
|
address
|
|
447
432
|
}));
|
|
448
433
|
}
|
|
449
|
-
return message._tag === "IncomingRequest" || message._tag === "IncomingEnvelope" ? state.manager.send(message) :
|
|
434
|
+
return message._tag === "IncomingRequest" || message._tag === "IncomingEnvelope" ? state.manager.send(message) : runnersService.sendLocal({
|
|
450
435
|
message,
|
|
451
436
|
send: state.manager.sendLocal,
|
|
452
437
|
simulateRemoteSerialization: config.simulateRemoteSerialization
|
|
453
438
|
});
|
|
454
439
|
});
|
|
455
|
-
const
|
|
440
|
+
const pendingNotifications = new Map();
|
|
441
|
+
const notifyLocal = (message, discard, options) => Effect.suspend(function loop() {
|
|
456
442
|
const address = message.envelope.address;
|
|
457
|
-
|
|
458
|
-
|
|
443
|
+
const state = entityManagers.get(address.entityType);
|
|
444
|
+
if (!state) {
|
|
445
|
+
return Effect.flatMap(waitForEntityManager(address.entityType), loop);
|
|
446
|
+
} else if (state.closed || !isEntityOnLocalShards(address)) {
|
|
447
|
+
return Effect.fail(new _ClusterError.EntityNotAssignedToRunner({
|
|
459
448
|
address
|
|
460
449
|
}));
|
|
461
450
|
}
|
|
462
451
|
const isLocal = isEntityOnLocalShards(address);
|
|
463
|
-
const notify = storageEnabled ? openStorageReadLatch : () => Effect.
|
|
452
|
+
const notify = storageEnabled ? openStorageReadLatch : () => Effect.die("Sharding.notifyLocal: storage is disabled");
|
|
464
453
|
if (message._tag === "IncomingRequest" || message._tag === "IncomingEnvelope") {
|
|
465
|
-
if (
|
|
454
|
+
if (!isLocal) {
|
|
455
|
+
return Effect.fail(new _ClusterError.EntityNotAssignedToRunner({
|
|
456
|
+
address
|
|
457
|
+
}));
|
|
458
|
+
} else if (message._tag === "IncomingRequest" && state.manager.isProcessingFor(message, {
|
|
459
|
+
excludeReplies: true
|
|
460
|
+
})) {
|
|
466
461
|
return Effect.fail(new _ClusterError.AlreadyProcessingMessage({
|
|
467
462
|
address,
|
|
468
463
|
envelopeId: message.envelope.requestId
|
|
469
464
|
}));
|
|
470
|
-
} else if (
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
465
|
+
} else if (message._tag === "IncomingRequest" && options?.waitUntilRead) {
|
|
466
|
+
if (!storageEnabled) return notify();
|
|
467
|
+
return Effect.async(resume => {
|
|
468
|
+
let entry = pendingNotifications.get(message.envelope.requestId);
|
|
469
|
+
if (entry) {
|
|
470
|
+
const prevResume = entry.resume;
|
|
471
|
+
entry.resume = effect => {
|
|
472
|
+
prevResume(effect);
|
|
473
|
+
resume(effect);
|
|
474
|
+
};
|
|
475
|
+
return;
|
|
476
|
+
}
|
|
477
|
+
entry = {
|
|
478
|
+
resume,
|
|
479
|
+
message
|
|
480
|
+
};
|
|
481
|
+
pendingNotifications.set(message.envelope.requestId, entry);
|
|
482
|
+
storageReadLatch.unsafeOpen();
|
|
483
|
+
});
|
|
474
484
|
}
|
|
475
485
|
return notify();
|
|
476
486
|
}
|
|
477
|
-
return
|
|
487
|
+
return runnersService.notifyLocal({
|
|
478
488
|
message,
|
|
479
489
|
notify,
|
|
480
490
|
discard,
|
|
481
491
|
storageOnly: !isLocal
|
|
482
492
|
});
|
|
483
493
|
});
|
|
484
|
-
const isTransientError = Predicate.or(_ClusterError.RunnerUnavailable.is, _ClusterError.EntityNotAssignedToRunner.is);
|
|
485
494
|
function sendOutgoing(message, discard, retries) {
|
|
486
495
|
return Effect.catchIf(Effect.suspend(() => {
|
|
487
496
|
const address = message.envelope.address;
|
|
488
|
-
const maybeRunner = MutableHashMap.get(shardAssignments, address.shardId);
|
|
489
497
|
const isPersisted = Context.get(message.rpc.annotations, _ClusterSchema.Persisted);
|
|
490
498
|
if (isPersisted && !storageEnabled) {
|
|
491
|
-
return Effect.
|
|
499
|
+
return Effect.die("Sharding.sendOutgoing: Persisted messages require MessageStorage");
|
|
492
500
|
}
|
|
501
|
+
const maybeRunner = MutableHashMap.get(shardAssignments, address.shardId);
|
|
493
502
|
const runnerIsLocal = Option.isSome(maybeRunner) && isLocalRunner(maybeRunner.value);
|
|
494
503
|
if (isPersisted) {
|
|
495
|
-
return runnerIsLocal ? notifyLocal(message, discard) :
|
|
504
|
+
return runnerIsLocal ? notifyLocal(message, discard) : runnersService.notify({
|
|
496
505
|
address: maybeRunner,
|
|
497
506
|
message,
|
|
498
507
|
discard
|
|
@@ -502,127 +511,138 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
502
511
|
address
|
|
503
512
|
}));
|
|
504
513
|
}
|
|
505
|
-
return runnerIsLocal ? sendLocal(message) :
|
|
514
|
+
return runnerIsLocal ? sendLocal(message) : runnersService.send({
|
|
506
515
|
address: maybeRunner.value,
|
|
507
516
|
message
|
|
508
517
|
});
|
|
509
|
-
}),
|
|
518
|
+
}), error => error._tag === "EntityNotAssignedToRunner" || error._tag === "RunnerUnavailable", error => {
|
|
510
519
|
if (retries === 0) {
|
|
511
520
|
return Effect.die(error);
|
|
512
521
|
}
|
|
513
522
|
return Effect.delay(sendOutgoing(message, discard, retries && retries - 1), config.sendRetryInterval);
|
|
514
523
|
});
|
|
515
524
|
}
|
|
516
|
-
const reset = Effect.
|
|
517
|
-
yield* storage.clearReplies(requestId);
|
|
518
|
-
sentRequestIds.delete(requestId);
|
|
519
|
-
}, Effect.matchCause({
|
|
525
|
+
const reset = requestId => Effect.matchCause(storage.clearReplies(requestId), {
|
|
520
526
|
onSuccess: () => true,
|
|
521
527
|
onFailure: () => false
|
|
522
|
-
}));
|
|
523
|
-
// --- Shard Manager sync ---
|
|
524
|
-
const shardManagerTimeoutFiber = yield* FiberHandle.make().pipe(Scope.extend(shardingScope));
|
|
525
|
-
const startShardManagerTimeout = FiberHandle.run(shardManagerTimeoutFiber, Effect.flatMap(Effect.sleep(config.shardManagerUnavailableTimeout), () => {
|
|
526
|
-
MutableHashMap.clear(shardAssignments);
|
|
527
|
-
return clearSelfShards;
|
|
528
|
-
}), {
|
|
529
|
-
onlyIfMissing: true
|
|
530
528
|
});
|
|
531
|
-
|
|
532
|
-
//
|
|
533
|
-
//
|
|
529
|
+
// --- RunnerStorage sync ---
|
|
530
|
+
//
|
|
531
|
+
// This is responsible for syncing the local view of runners and shard
|
|
532
|
+
// assignments with RunnerStorage.
|
|
533
|
+
//
|
|
534
|
+
// It should be shutdown after the clients, so that they can still get correct
|
|
535
|
+
// shard assignments for outgoing messages (they could still be in use by
|
|
536
|
+
// entities that are shutting down).
|
|
537
|
+
const selfRunner = Option.isSome(config.runnerAddress) ? new _Runner.Runner({
|
|
538
|
+
address: config.runnerAddress.value,
|
|
539
|
+
groups: config.shardGroups,
|
|
540
|
+
weight: config.runnerShardWeight
|
|
541
|
+
}) : undefined;
|
|
542
|
+
let allRunners = MutableHashMap.empty();
|
|
543
|
+
let healthyRunnerCount = 0;
|
|
544
|
+
// update metrics
|
|
545
|
+
if (selfRunner) {
|
|
546
|
+
ClusterMetrics.runners.unsafeUpdate(BigInt(1), []);
|
|
547
|
+
ClusterMetrics.runnersHealthy.unsafeUpdate(BigInt(1), []);
|
|
548
|
+
}
|
|
534
549
|
yield* Effect.gen(function* () {
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
}
|
|
556
|
-
case "ShardsAssigned":
|
|
557
|
-
{
|
|
558
|
-
for (const shard of event.shards) {
|
|
559
|
-
MutableHashMap.set(shardAssignments, shard, event.address);
|
|
560
|
-
}
|
|
561
|
-
if (!MutableRef.get(isShutdown) && isLocalRunner(event.address)) {
|
|
562
|
-
for (const shardId of event.shards) {
|
|
563
|
-
if (MutableHashSet.has(selfShards, shardId)) continue;
|
|
564
|
-
MutableHashSet.add(selfShards, shardId);
|
|
565
|
-
}
|
|
566
|
-
yield* activeShardsLatch.open;
|
|
567
|
-
}
|
|
568
|
-
break;
|
|
569
|
-
}
|
|
570
|
-
case "ShardsUnassigned":
|
|
571
|
-
{
|
|
572
|
-
for (const shard of event.shards) {
|
|
573
|
-
MutableHashMap.remove(shardAssignments, shard);
|
|
574
|
-
}
|
|
575
|
-
if (isLocalRunner(event.address)) {
|
|
576
|
-
for (const shard of event.shards) {
|
|
577
|
-
MutableHashSet.remove(selfShards, shard);
|
|
578
|
-
}
|
|
579
|
-
yield* activeShardsLatch.open;
|
|
580
|
-
}
|
|
581
|
-
break;
|
|
582
|
-
}
|
|
583
|
-
case "RunnerUnregistered":
|
|
584
|
-
{
|
|
585
|
-
if (!isLocalRunner(event.address)) break;
|
|
586
|
-
return yield* Effect.fail(new ClusterError.RunnerNotRegistered({
|
|
587
|
-
address: event.address
|
|
588
|
-
}));
|
|
589
|
-
}
|
|
550
|
+
const hashRings = new Map();
|
|
551
|
+
let nextRunners = MutableHashMap.empty();
|
|
552
|
+
const healthyRunners = MutableHashSet.empty();
|
|
553
|
+
while (true) {
|
|
554
|
+
// Ensure the current runner is registered
|
|
555
|
+
if (selfRunner && !isShutdown.current && !MutableHashMap.has(allRunners, selfRunner)) {
|
|
556
|
+
yield* Effect.logDebug("Registering runner", selfRunner);
|
|
557
|
+
const machineId = yield* runnerStorage.register(selfRunner, true);
|
|
558
|
+
yield* snowflakeGen.setMachineId(machineId);
|
|
559
|
+
}
|
|
560
|
+
const runners = yield* runnerStorage.getRunners;
|
|
561
|
+
let changed = false;
|
|
562
|
+
for (let i = 0; i < runners.length; i++) {
|
|
563
|
+
const [runner, healthy] = runners[i];
|
|
564
|
+
MutableHashMap.set(nextRunners, runner, healthy);
|
|
565
|
+
const wasHealthy = MutableHashSet.has(healthyRunners, runner);
|
|
566
|
+
if (!healthy || wasHealthy) {
|
|
567
|
+
if (healthy === wasHealthy || !wasHealthy) {
|
|
568
|
+
// no change
|
|
569
|
+
MutableHashMap.remove(allRunners, runner);
|
|
590
570
|
}
|
|
571
|
+
continue;
|
|
572
|
+
}
|
|
573
|
+
changed = true;
|
|
574
|
+
MutableHashSet.add(healthyRunners, runner);
|
|
575
|
+
MutableHashMap.remove(allRunners, runner);
|
|
576
|
+
for (let j = 0; j < runner.groups.length; j++) {
|
|
577
|
+
const group = runner.groups[j];
|
|
578
|
+
let ring = hashRings.get(group);
|
|
579
|
+
if (!ring) {
|
|
580
|
+
ring = HashRing.make();
|
|
581
|
+
hashRings.set(group, ring);
|
|
582
|
+
}
|
|
583
|
+
HashRing.add(ring, runner.address, {
|
|
584
|
+
weight: runner.weight
|
|
585
|
+
});
|
|
591
586
|
}
|
|
592
587
|
}
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
if (
|
|
610
|
-
MutableHashMap.remove(shardAssignments, shardId);
|
|
611
|
-
MutableHashSet.remove(selfShards, shardId);
|
|
588
|
+
// Remove runners that are no longer present or healthy
|
|
589
|
+
MutableHashMap.forEach(allRunners, (_, runner) => {
|
|
590
|
+
changed = true;
|
|
591
|
+
MutableHashMap.remove(allRunners, runner);
|
|
592
|
+
MutableHashSet.remove(healthyRunners, runner);
|
|
593
|
+
runFork(runnersService.onRunnerUnavailable(runner.address));
|
|
594
|
+
for (let i = 0; i < runner.groups.length; i++) {
|
|
595
|
+
HashRing.remove(hashRings.get(runner.groups[i]), runner.address);
|
|
596
|
+
}
|
|
597
|
+
});
|
|
598
|
+
// swap allRunners and nextRunners
|
|
599
|
+
const prevRunners = allRunners;
|
|
600
|
+
allRunners = nextRunners;
|
|
601
|
+
nextRunners = prevRunners;
|
|
602
|
+
healthyRunnerCount = MutableHashSet.size(healthyRunners);
|
|
603
|
+
// Ensure the current runner is registered
|
|
604
|
+
if (selfRunner && !isShutdown.current && !MutableHashMap.has(allRunners, selfRunner)) {
|
|
612
605
|
continue;
|
|
613
606
|
}
|
|
614
|
-
|
|
615
|
-
if (
|
|
616
|
-
MutableHashSet.
|
|
617
|
-
|
|
607
|
+
// Recompute shard assignments if the set of healthy runners has changed.
|
|
608
|
+
if (changed) {
|
|
609
|
+
MutableHashSet.clear(selfShards);
|
|
610
|
+
hashRings.forEach((ring, group) => {
|
|
611
|
+
const newAssignments = HashRing.getShards(ring, config.shardsPerGroup);
|
|
612
|
+
for (let i = 0; i < config.shardsPerGroup; i++) {
|
|
613
|
+
const shard = (0, _ShardId.make)(group, i + 1);
|
|
614
|
+
if (newAssignments) {
|
|
615
|
+
const runner = newAssignments[i];
|
|
616
|
+
MutableHashMap.set(shardAssignments, shard, runner);
|
|
617
|
+
if (isLocalRunner(runner)) {
|
|
618
|
+
MutableHashSet.add(selfShards, shard);
|
|
619
|
+
}
|
|
620
|
+
} else {
|
|
621
|
+
MutableHashMap.remove(shardAssignments, shard);
|
|
622
|
+
}
|
|
623
|
+
}
|
|
624
|
+
});
|
|
625
|
+
yield* Effect.logDebug("New shard assignments", selfShards);
|
|
626
|
+
activeShardsLatch.unsafeOpen();
|
|
627
|
+
// update metrics
|
|
628
|
+
if (selfRunner) {
|
|
629
|
+
ClusterMetrics.runnersHealthy.unsafeUpdate(BigInt(MutableHashSet.has(healthyRunners, selfRunner) ? 1 : 0), []);
|
|
630
|
+
}
|
|
618
631
|
}
|
|
619
|
-
if (
|
|
620
|
-
|
|
632
|
+
if (selfRunner && MutableHashSet.size(healthyRunners) === 0) {
|
|
633
|
+
yield* Effect.logWarning("No healthy runners available");
|
|
634
|
+
// to prevent a deadlock, we will mark the current node as healthy to
|
|
635
|
+
// start the health check singleton again
|
|
636
|
+
yield* runnerStorage.setRunnerHealth(selfRunner.address, true);
|
|
621
637
|
}
|
|
622
|
-
|
|
638
|
+
yield* Effect.sleep(config.refreshAssignmentsInterval);
|
|
623
639
|
}
|
|
624
|
-
|
|
625
|
-
|
|
640
|
+
}).pipe(Effect.catchAllCause(cause => Effect.logDebug(cause)), Effect.repeat(Schedule.spaced(1000)), Effect.annotateLogs({
|
|
641
|
+
package: "@effect/cluster",
|
|
642
|
+
module: "Sharding",
|
|
643
|
+
fiber: "RunnerStorage sync",
|
|
644
|
+
runner: config.runnerAddress
|
|
645
|
+
}), Effect.forkIn(shardingScope));
|
|
626
646
|
const clientRequests = new Map();
|
|
627
647
|
const clients = yield* _resourceMap.ResourceMap.make(Effect.fnUntraced(function* (entity) {
|
|
628
648
|
const client = yield* RpcClient.makeNoSerialization(entity.protocol, {
|
|
@@ -643,7 +663,7 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
643
663
|
if (!options.discard) {
|
|
644
664
|
const entry = {
|
|
645
665
|
rpc: rpc,
|
|
646
|
-
|
|
666
|
+
services: fiber.currentContext
|
|
647
667
|
};
|
|
648
668
|
clientRequests.set(id, entry);
|
|
649
669
|
respond = makeClientRespond(entry, client.write);
|
|
@@ -688,7 +708,7 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
688
708
|
const entry = clientRequests.get(requestId);
|
|
689
709
|
if (!entry) return Effect.void;
|
|
690
710
|
clientRequests.delete(requestId);
|
|
691
|
-
if (
|
|
711
|
+
if (_ClusterSchema.Uninterruptible.forClient(entry.rpc.annotations)) {
|
|
692
712
|
return Effect.void;
|
|
693
713
|
}
|
|
694
714
|
// for durable messages, we ignore interrupts on shutdown or as a
|
|
@@ -715,8 +735,8 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
715
735
|
return Effect.void;
|
|
716
736
|
}));
|
|
717
737
|
return entityId => {
|
|
718
|
-
const id = _EntityId.
|
|
719
|
-
const address = ClientAddressTag.context(_EntityAddress.
|
|
738
|
+
const id = (0, _EntityId.make)(entityId);
|
|
739
|
+
const address = ClientAddressTag.context((0, _EntityAddress.make)({
|
|
720
740
|
shardId: getShardId(id, entity.getShardGroup(entityId)),
|
|
721
741
|
entityId: id,
|
|
722
742
|
entityType: entity.type
|
|
@@ -770,23 +790,84 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
770
790
|
}
|
|
771
791
|
}
|
|
772
792
|
};
|
|
793
|
+
// --- Singletons ---
|
|
794
|
+
const singletons = new Map();
|
|
795
|
+
const singletonFibers = yield* FiberMap.make();
|
|
796
|
+
const withSingletonLock = Effect.unsafeMakeSemaphore(1).withPermits(1);
|
|
797
|
+
const registerSingleton = Effect.fnUntraced(function* (name, run, options) {
|
|
798
|
+
const shardGroup = options?.shardGroup ?? "default";
|
|
799
|
+
const address = new _SingletonAddress.SingletonAddress({
|
|
800
|
+
shardId: getShardId((0, _EntityId.make)(name), shardGroup),
|
|
801
|
+
name
|
|
802
|
+
});
|
|
803
|
+
let map = singletons.get(address.shardId);
|
|
804
|
+
if (!map) {
|
|
805
|
+
map = MutableHashMap.empty();
|
|
806
|
+
singletons.set(address.shardId, map);
|
|
807
|
+
}
|
|
808
|
+
if (MutableHashMap.has(map, address)) {
|
|
809
|
+
return yield* Effect.die(`Singleton '${name}' is already registered`);
|
|
810
|
+
}
|
|
811
|
+
const context = yield* Effect.context();
|
|
812
|
+
const wrappedRun = run.pipe(Effect.locally(FiberRef.currentLogAnnotations, HashMap.empty()), Effect.andThen(Effect.never), Effect.scoped, Effect.provide(context), Effect.orDie, Effect.interruptible);
|
|
813
|
+
MutableHashMap.set(map, address, wrappedRun);
|
|
814
|
+
yield* PubSub.publish(events, (0, _ShardingRegistrationEvent.SingletonRegistered)({
|
|
815
|
+
address
|
|
816
|
+
}));
|
|
817
|
+
// start if we are on the right shard
|
|
818
|
+
if (MutableHashSet.has(acquiredShards, address.shardId)) {
|
|
819
|
+
yield* Effect.logDebug("Starting singleton", address);
|
|
820
|
+
yield* FiberMap.run(singletonFibers, address, wrappedRun);
|
|
821
|
+
}
|
|
822
|
+
}, withSingletonLock);
|
|
823
|
+
const syncSingletons = withSingletonLock(Effect.gen(function* () {
|
|
824
|
+
for (const [shardId, map] of singletons) {
|
|
825
|
+
for (const [address, run] of map) {
|
|
826
|
+
const running = FiberMap.unsafeHas(singletonFibers, address);
|
|
827
|
+
const shouldBeRunning = MutableHashSet.has(acquiredShards, shardId);
|
|
828
|
+
if (running && !shouldBeRunning) {
|
|
829
|
+
yield* Effect.logDebug("Stopping singleton", address);
|
|
830
|
+
_interruptors.internalInterruptors.add(Option.getOrThrow(Fiber.getCurrentFiber()).id());
|
|
831
|
+
yield* FiberMap.remove(singletonFibers, address);
|
|
832
|
+
} else if (!running && shouldBeRunning) {
|
|
833
|
+
yield* Effect.logDebug("Starting singleton", address);
|
|
834
|
+
yield* FiberMap.run(singletonFibers, address, run);
|
|
835
|
+
}
|
|
836
|
+
}
|
|
837
|
+
}
|
|
838
|
+
ClusterMetrics.singletons.unsafeUpdate(BigInt(yield* FiberMap.size(singletonFibers)), []);
|
|
839
|
+
}));
|
|
773
840
|
// --- Entities ---
|
|
774
841
|
const context = yield* Effect.context();
|
|
775
842
|
const reaper = yield* _entityReaper.EntityReaper;
|
|
843
|
+
const entityManagerLatches = new Map();
|
|
776
844
|
const registerEntity = Effect.fnUntraced(function* (entity, build, options) {
|
|
777
845
|
if (Option.isNone(config.runnerAddress) || entityManagers.has(entity.type)) return;
|
|
778
846
|
const scope = yield* Scope.make();
|
|
847
|
+
yield* Scope.addFinalizer(scope, Effect.sync(() => {
|
|
848
|
+
state.closed = true;
|
|
849
|
+
}));
|
|
779
850
|
const manager = yield* EntityManager.make(entity, build, {
|
|
780
851
|
...options,
|
|
781
852
|
storage,
|
|
782
853
|
runnerAddress: config.runnerAddress.value,
|
|
783
854
|
sharding
|
|
784
855
|
}).pipe(Effect.provide(context.pipe(Context.add(_entityReaper.EntityReaper, reaper), Context.add(Scope.Scope, scope), Context.add(Snowflake.Generator, snowflakeGen))));
|
|
785
|
-
|
|
856
|
+
const state = {
|
|
786
857
|
entity,
|
|
787
858
|
scope,
|
|
859
|
+
closed: false,
|
|
788
860
|
manager
|
|
789
|
-
}
|
|
861
|
+
};
|
|
862
|
+
// register entities while storage is idle
|
|
863
|
+
// this ensures message order is preserved
|
|
864
|
+
yield* withStorageReadLock(Effect.sync(() => {
|
|
865
|
+
entityManagers.set(entity.type, state);
|
|
866
|
+
if (entityManagerLatches.has(entity.type)) {
|
|
867
|
+
entityManagerLatches.get(entity.type).unsafeOpen();
|
|
868
|
+
entityManagerLatches.delete(entity.type);
|
|
869
|
+
}
|
|
870
|
+
}));
|
|
790
871
|
yield* PubSub.publish(events, (0, _ShardingRegistrationEvent.EntityRegistered)({
|
|
791
872
|
entity
|
|
792
873
|
}));
|
|
@@ -797,20 +878,58 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
797
878
|
concurrency: "unbounded",
|
|
798
879
|
discard: true
|
|
799
880
|
}));
|
|
881
|
+
const waitForEntityManager = entityType => {
|
|
882
|
+
let latch = entityManagerLatches.get(entityType);
|
|
883
|
+
if (!latch) {
|
|
884
|
+
latch = Effect.unsafeMakeLatch();
|
|
885
|
+
entityManagerLatches.set(entityType, latch);
|
|
886
|
+
}
|
|
887
|
+
return latch.await;
|
|
888
|
+
};
|
|
889
|
+
// --- Runner health checks ---
|
|
890
|
+
if (selfRunner) {
|
|
891
|
+
const checkRunner = ([runner, healthy]) => Effect.flatMap(runnerHealth.isAlive(runner.address), isAlive => {
|
|
892
|
+
if (healthy === isAlive) return Effect.void;
|
|
893
|
+
if (isAlive) {
|
|
894
|
+
healthyRunnerCount++;
|
|
895
|
+
return Effect.logDebug(`Runner is healthy`, runner).pipe(Effect.andThen(runnerStorage.setRunnerHealth(runner.address, isAlive)));
|
|
896
|
+
}
|
|
897
|
+
if (healthyRunnerCount <= 1) {
|
|
898
|
+
// never mark the last runner as unhealthy, to prevent a deadlock
|
|
899
|
+
return Effect.void;
|
|
900
|
+
}
|
|
901
|
+
healthyRunnerCount--;
|
|
902
|
+
return Effect.logDebug(`Runner is unhealthy`, runner).pipe(Effect.andThen(runnerStorage.setRunnerHealth(runner.address, isAlive)));
|
|
903
|
+
});
|
|
904
|
+
yield* registerSingleton("effect/cluster/Sharding/RunnerHealth", Effect.gen(function* () {
|
|
905
|
+
while (true) {
|
|
906
|
+
// Skip health checks if we are the only runner
|
|
907
|
+
if (MutableHashMap.size(allRunners) > 1) {
|
|
908
|
+
yield* Effect.forEach(allRunners, checkRunner, {
|
|
909
|
+
discard: true,
|
|
910
|
+
concurrency: 10
|
|
911
|
+
});
|
|
912
|
+
}
|
|
913
|
+
yield* Effect.sleep(config.runnerHealthCheckInterval);
|
|
914
|
+
}
|
|
915
|
+
}).pipe(Effect.catchAllCause(cause => Effect.logDebug("Runner health check failed", cause)), Effect.forever, Effect.annotateLogs({
|
|
916
|
+
package: "@effect/cluster",
|
|
917
|
+
module: "Sharding",
|
|
918
|
+
fiber: "Runner health check"
|
|
919
|
+
})));
|
|
920
|
+
}
|
|
800
921
|
// --- Finalization ---
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
yield* Effect.logDebug("Unregistering runner from shard manager", selfAddress);
|
|
806
|
-
yield* shardManager.unregister(selfAddress).pipe(Effect.catchAllCause(cause => Effect.logError("Error calling unregister with shard manager", cause)));
|
|
807
|
-
yield* clearSelfShards;
|
|
922
|
+
yield* Scope.addFinalizerExit(shardingScope, Effect.fnUntraced(function* (exit) {
|
|
923
|
+
yield* Effect.logDebug("Shutting down", exit._tag === "Success" ? {} : exit.cause).pipe(Effect.annotateLogs({
|
|
924
|
+
package: "@effect/cluster",
|
|
925
|
+
module: "Sharding"
|
|
808
926
|
}));
|
|
809
|
-
|
|
810
|
-
yield* Scope.addFinalizer(shardingScope, Effect.withFiberRuntime(fiber => {
|
|
927
|
+
const fiberId = yield* Effect.fiberId;
|
|
811
928
|
MutableRef.set(isShutdown, true);
|
|
812
|
-
_interruptors.internalInterruptors.add(
|
|
813
|
-
|
|
929
|
+
_interruptors.internalInterruptors.add(fiberId);
|
|
930
|
+
if (selfRunner) {
|
|
931
|
+
yield* Effect.ignore(runnerStorage.unregister(selfRunner.address));
|
|
932
|
+
}
|
|
814
933
|
}));
|
|
815
934
|
const activeEntityCount = Effect.gen(function* () {
|
|
816
935
|
let count = 0;
|
|
@@ -822,13 +941,18 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
822
941
|
const sharding = Sharding.of({
|
|
823
942
|
getRegistrationEvents,
|
|
824
943
|
getShardId,
|
|
944
|
+
hasShardId(shardId) {
|
|
945
|
+
if (isShutdown.current) return false;
|
|
946
|
+
return MutableHashSet.has(acquiredShards, shardId);
|
|
947
|
+
},
|
|
948
|
+
getSnowflake: Effect.sync(() => snowflakeGen.unsafeNext()),
|
|
825
949
|
isShutdown: Effect.sync(() => MutableRef.get(isShutdown)),
|
|
826
950
|
registerEntity,
|
|
827
951
|
registerSingleton,
|
|
828
952
|
makeClient,
|
|
829
953
|
send: sendLocal,
|
|
830
954
|
sendOutgoing: (message, discard) => sendOutgoing(message, discard),
|
|
831
|
-
notify: message => notifyLocal(message, false),
|
|
955
|
+
notify: (message, options) => notifyLocal(message, false, options),
|
|
832
956
|
activeEntityCount,
|
|
833
957
|
pollStorage: storageReadLatch.open,
|
|
834
958
|
reset
|
|
@@ -839,7 +963,7 @@ const make = /*#__PURE__*/Effect.gen(function* () {
|
|
|
839
963
|
* @since 1.0.0
|
|
840
964
|
* @category layers
|
|
841
965
|
*/
|
|
842
|
-
const layer = exports.layer = /*#__PURE__*/Layer.scoped(Sharding
|
|
966
|
+
const layer = exports.layer = /*#__PURE__*/Layer.scoped(Sharding)(make).pipe(/*#__PURE__*/Layer.provide([Snowflake.layerGenerator, _entityReaper.EntityReaper.Default]));
|
|
843
967
|
// Utilities
|
|
844
968
|
const ClientAddressTag = /*#__PURE__*/Context.GenericTag("@effect/cluster/Sharding/ClientAddress");
|
|
845
969
|
//# sourceMappingURL=Sharding.js.map
|