@effect/cluster 0.50.6 → 0.52.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/RunnerStorage/package.json +6 -0
- package/SqlRunnerStorage/package.json +6 -0
- package/dist/cjs/ClusterError.js +2 -24
- package/dist/cjs/ClusterError.js.map +1 -1
- package/dist/cjs/ClusterMetrics.js +13 -15
- package/dist/cjs/ClusterMetrics.js.map +1 -1
- package/dist/cjs/ClusterSchema.js +17 -2
- package/dist/cjs/ClusterSchema.js.map +1 -1
- package/dist/cjs/ClusterWorkflowEngine.js +50 -83
- package/dist/cjs/ClusterWorkflowEngine.js.map +1 -1
- package/dist/cjs/Entity.js +1 -13
- package/dist/cjs/Entity.js.map +1 -1
- package/dist/cjs/EntityAddress.js +9 -1
- package/dist/cjs/EntityAddress.js.map +1 -1
- package/dist/cjs/EntityId.js +7 -1
- package/dist/cjs/EntityId.js.map +1 -1
- package/dist/cjs/EntityProxy.js +1 -1
- package/dist/cjs/EntityProxy.js.map +1 -1
- package/dist/cjs/HttpRunner.js +69 -43
- package/dist/cjs/HttpRunner.js.map +1 -1
- package/dist/cjs/MessageStorage.js +64 -16
- package/dist/cjs/MessageStorage.js.map +1 -1
- package/dist/cjs/Runner.js +3 -3
- package/dist/cjs/Runner.js.map +1 -1
- package/dist/cjs/RunnerAddress.js +7 -0
- package/dist/cjs/RunnerAddress.js.map +1 -1
- package/dist/cjs/RunnerHealth.js +91 -32
- package/dist/cjs/RunnerHealth.js.map +1 -1
- package/dist/cjs/RunnerServer.js +38 -24
- package/dist/cjs/RunnerServer.js.map +1 -1
- package/dist/cjs/RunnerStorage.js +100 -0
- package/dist/cjs/RunnerStorage.js.map +1 -0
- package/dist/cjs/Runners.js +18 -22
- package/dist/cjs/Runners.js.map +1 -1
- package/dist/cjs/ShardId.js +17 -7
- package/dist/cjs/ShardId.js.map +1 -1
- package/dist/cjs/Sharding.js +444 -320
- package/dist/cjs/Sharding.js.map +1 -1
- package/dist/cjs/ShardingConfig.js +10 -14
- package/dist/cjs/ShardingConfig.js.map +1 -1
- package/dist/cjs/Snowflake.js +1 -1
- package/dist/cjs/SocketRunner.js +1 -1
- package/dist/cjs/SocketRunner.js.map +1 -1
- package/dist/cjs/SqlMessageStorage.js +22 -28
- package/dist/cjs/SqlMessageStorage.js.map +1 -1
- package/dist/cjs/SqlRunnerStorage.js +375 -0
- package/dist/cjs/SqlRunnerStorage.js.map +1 -0
- package/dist/cjs/index.js +5 -15
- package/dist/cjs/internal/entityManager.js +42 -23
- package/dist/cjs/internal/entityManager.js.map +1 -1
- package/dist/dts/ClusterError.d.ts +0 -22
- package/dist/dts/ClusterError.d.ts.map +1 -1
- package/dist/dts/ClusterMetrics.d.ts +4 -14
- package/dist/dts/ClusterMetrics.d.ts.map +1 -1
- package/dist/dts/ClusterSchema.d.ts +9 -1
- package/dist/dts/ClusterSchema.d.ts.map +1 -1
- package/dist/dts/ClusterWorkflowEngine.d.ts.map +1 -1
- package/dist/dts/Entity.d.ts +3 -14
- package/dist/dts/Entity.d.ts.map +1 -1
- package/dist/dts/EntityAddress.d.ts +11 -0
- package/dist/dts/EntityAddress.d.ts.map +1 -1
- package/dist/dts/EntityId.d.ts +5 -0
- package/dist/dts/EntityId.d.ts.map +1 -1
- package/dist/dts/EntityProxy.d.ts +5 -6
- package/dist/dts/EntityProxy.d.ts.map +1 -1
- package/dist/dts/HttpRunner.d.ts +48 -25
- package/dist/dts/HttpRunner.d.ts.map +1 -1
- package/dist/dts/MessageStorage.d.ts +13 -5
- package/dist/dts/MessageStorage.d.ts.map +1 -1
- package/dist/dts/Runner.d.ts +4 -4
- package/dist/dts/Runner.d.ts.map +1 -1
- package/dist/dts/RunnerAddress.d.ts +5 -0
- package/dist/dts/RunnerAddress.d.ts.map +1 -1
- package/dist/dts/RunnerHealth.d.ts +24 -16
- package/dist/dts/RunnerHealth.d.ts.map +1 -1
- package/dist/dts/RunnerServer.d.ts +5 -4
- package/dist/dts/RunnerServer.d.ts.map +1 -1
- package/dist/dts/{ShardStorage.d.ts → RunnerStorage.d.ts} +41 -54
- package/dist/dts/RunnerStorage.d.ts.map +1 -0
- package/dist/dts/Runners.d.ts +15 -11
- package/dist/dts/Runners.d.ts.map +1 -1
- package/dist/dts/ShardId.d.ts +1 -1
- package/dist/dts/ShardId.d.ts.map +1 -1
- package/dist/dts/Sharding.d.ts +20 -10
- package/dist/dts/Sharding.d.ts.map +1 -1
- package/dist/dts/ShardingConfig.d.ts +40 -14
- package/dist/dts/ShardingConfig.d.ts.map +1 -1
- package/dist/dts/SocketRunner.d.ts +4 -3
- package/dist/dts/SocketRunner.d.ts.map +1 -1
- package/dist/dts/SqlMessageStorage.d.ts +2 -3
- package/dist/dts/SqlMessageStorage.d.ts.map +1 -1
- package/dist/dts/SqlRunnerStorage.d.ts +40 -0
- package/dist/dts/SqlRunnerStorage.d.ts.map +1 -0
- package/dist/dts/index.d.ts +4 -24
- package/dist/dts/index.d.ts.map +1 -1
- package/dist/esm/ClusterError.js +0 -21
- package/dist/esm/ClusterError.js.map +1 -1
- package/dist/esm/ClusterMetrics.js +12 -14
- package/dist/esm/ClusterMetrics.js.map +1 -1
- package/dist/esm/ClusterSchema.js +17 -2
- package/dist/esm/ClusterSchema.js.map +1 -1
- package/dist/esm/ClusterWorkflowEngine.js +50 -83
- package/dist/esm/ClusterWorkflowEngine.js.map +1 -1
- package/dist/esm/Entity.js +0 -12
- package/dist/esm/Entity.js.map +1 -1
- package/dist/esm/EntityAddress.js +7 -0
- package/dist/esm/EntityAddress.js.map +1 -1
- package/dist/esm/EntityId.js +5 -0
- package/dist/esm/EntityId.js.map +1 -1
- package/dist/esm/EntityProxy.js +2 -2
- package/dist/esm/EntityProxy.js.map +1 -1
- package/dist/esm/HttpRunner.js +62 -39
- package/dist/esm/HttpRunner.js.map +1 -1
- package/dist/esm/MessageStorage.js +65 -17
- package/dist/esm/MessageStorage.js.map +1 -1
- package/dist/esm/Runner.js +3 -3
- package/dist/esm/Runner.js.map +1 -1
- package/dist/esm/RunnerAddress.js +7 -0
- package/dist/esm/RunnerAddress.js.map +1 -1
- package/dist/esm/RunnerHealth.js +88 -30
- package/dist/esm/RunnerHealth.js.map +1 -1
- package/dist/esm/RunnerServer.js +38 -24
- package/dist/esm/RunnerServer.js.map +1 -1
- package/dist/esm/RunnerStorage.js +90 -0
- package/dist/esm/RunnerStorage.js.map +1 -0
- package/dist/esm/Runners.js +19 -23
- package/dist/esm/Runners.js.map +1 -1
- package/dist/esm/ShardId.js +16 -6
- package/dist/esm/ShardId.js.map +1 -1
- package/dist/esm/Sharding.js +447 -323
- package/dist/esm/Sharding.js.map +1 -1
- package/dist/esm/ShardingConfig.js +10 -14
- package/dist/esm/ShardingConfig.js.map +1 -1
- package/dist/esm/Snowflake.js +1 -1
- package/dist/esm/SocketRunner.js +1 -1
- package/dist/esm/SocketRunner.js.map +1 -1
- package/dist/esm/SqlMessageStorage.js +22 -28
- package/dist/esm/SqlMessageStorage.js.map +1 -1
- package/dist/esm/SqlRunnerStorage.js +366 -0
- package/dist/esm/SqlRunnerStorage.js.map +1 -0
- package/dist/esm/index.js +4 -24
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/internal/entityManager.js +41 -22
- package/dist/esm/internal/entityManager.js.map +1 -1
- package/package.json +20 -60
- package/src/ClusterError.ts +0 -24
- package/src/ClusterMetrics.ts +12 -16
- package/src/ClusterSchema.ts +17 -2
- package/src/ClusterWorkflowEngine.ts +48 -80
- package/src/Entity.ts +3 -21
- package/src/EntityAddress.ts +10 -0
- package/src/EntityId.ts +6 -0
- package/src/EntityProxy.ts +10 -10
- package/src/HttpRunner.ts +132 -67
- package/src/MessageStorage.ts +89 -24
- package/src/Runner.ts +4 -4
- package/src/RunnerAddress.ts +8 -0
- package/src/RunnerHealth.ts +119 -56
- package/src/RunnerServer.ts +64 -47
- package/src/RunnerStorage.ts +218 -0
- package/src/Runners.ts +32 -45
- package/src/ShardId.ts +14 -3
- package/src/Sharding.ts +561 -417
- package/src/ShardingConfig.ts +39 -31
- package/src/Snowflake.ts +1 -1
- package/src/SocketRunner.ts +6 -4
- package/src/SqlMessageStorage.ts +28 -30
- package/src/SqlRunnerStorage.ts +537 -0
- package/src/index.ts +4 -29
- package/src/internal/entityManager.ts +45 -29
- package/HttpCommon/package.json +0 -6
- package/HttpShardManager/package.json +0 -6
- package/ShardManager/package.json +0 -6
- package/ShardStorage/package.json +0 -6
- package/SocketShardManager/package.json +0 -6
- package/SqlShardStorage/package.json +0 -6
- package/SynchronizedClock/package.json +0 -6
- package/dist/cjs/HttpCommon.js +0 -48
- package/dist/cjs/HttpCommon.js.map +0 -1
- package/dist/cjs/HttpShardManager.js +0 -139
- package/dist/cjs/HttpShardManager.js.map +0 -1
- package/dist/cjs/ShardManager.js +0 -549
- package/dist/cjs/ShardManager.js.map +0 -1
- package/dist/cjs/ShardStorage.js +0 -151
- package/dist/cjs/ShardStorage.js.map +0 -1
- package/dist/cjs/SocketShardManager.js +0 -32
- package/dist/cjs/SocketShardManager.js.map +0 -1
- package/dist/cjs/SqlShardStorage.js +0 -253
- package/dist/cjs/SqlShardStorage.js.map +0 -1
- package/dist/cjs/SynchronizedClock.js +0 -65
- package/dist/cjs/SynchronizedClock.js.map +0 -1
- package/dist/cjs/internal/shardManager.js +0 -353
- package/dist/cjs/internal/shardManager.js.map +0 -1
- package/dist/dts/HttpCommon.d.ts +0 -25
- package/dist/dts/HttpCommon.d.ts.map +0 -1
- package/dist/dts/HttpShardManager.d.ts +0 -119
- package/dist/dts/HttpShardManager.d.ts.map +0 -1
- package/dist/dts/ShardManager.d.ts +0 -459
- package/dist/dts/ShardManager.d.ts.map +0 -1
- package/dist/dts/ShardStorage.d.ts.map +0 -1
- package/dist/dts/SocketShardManager.d.ts +0 -17
- package/dist/dts/SocketShardManager.d.ts.map +0 -1
- package/dist/dts/SqlShardStorage.d.ts +0 -38
- package/dist/dts/SqlShardStorage.d.ts.map +0 -1
- package/dist/dts/SynchronizedClock.d.ts +0 -19
- package/dist/dts/SynchronizedClock.d.ts.map +0 -1
- package/dist/dts/internal/shardManager.d.ts +0 -2
- package/dist/dts/internal/shardManager.d.ts.map +0 -1
- package/dist/esm/HttpCommon.js +0 -38
- package/dist/esm/HttpCommon.js.map +0 -1
- package/dist/esm/HttpShardManager.js +0 -128
- package/dist/esm/HttpShardManager.js.map +0 -1
- package/dist/esm/ShardManager.js +0 -535
- package/dist/esm/ShardManager.js.map +0 -1
- package/dist/esm/ShardStorage.js +0 -141
- package/dist/esm/ShardStorage.js.map +0 -1
- package/dist/esm/SocketShardManager.js +0 -24
- package/dist/esm/SocketShardManager.js.map +0 -1
- package/dist/esm/SqlShardStorage.js +0 -244
- package/dist/esm/SqlShardStorage.js.map +0 -1
- package/dist/esm/SynchronizedClock.js +0 -57
- package/dist/esm/SynchronizedClock.js.map +0 -1
- package/dist/esm/internal/shardManager.js +0 -342
- package/dist/esm/internal/shardManager.js.map +0 -1
- package/src/HttpCommon.ts +0 -73
- package/src/HttpShardManager.ts +0 -273
- package/src/ShardManager.ts +0 -823
- package/src/ShardStorage.ts +0 -297
- package/src/SocketShardManager.ts +0 -48
- package/src/SqlShardStorage.ts +0 -329
- package/src/SynchronizedClock.ts +0 -82
- package/src/internal/shardManager.ts +0 -412
|
@@ -1,128 +0,0 @@
|
|
|
1
|
-
import * as HttpRouter from "@effect/platform/HttpRouter";
|
|
2
|
-
import * as HttpServer from "@effect/platform/HttpServer";
|
|
3
|
-
import * as RpcServer from "@effect/rpc/RpcServer";
|
|
4
|
-
import * as Effect from "effect/Effect";
|
|
5
|
-
import { identity } from "effect/Function";
|
|
6
|
-
import * as Layer from "effect/Layer";
|
|
7
|
-
import { layerClientProtocolHttp, layerClientProtocolWebsocket } from "./HttpCommon.js";
|
|
8
|
-
import * as MessageStorage from "./MessageStorage.js";
|
|
9
|
-
import * as RunnerHealth from "./RunnerHealth.js";
|
|
10
|
-
import * as Runners from "./Runners.js";
|
|
11
|
-
import * as ShardManager from "./ShardManager.js";
|
|
12
|
-
/**
|
|
13
|
-
* @since 1.0.0
|
|
14
|
-
* @category Http App
|
|
15
|
-
*/
|
|
16
|
-
export const toHttpApp = /*#__PURE__*/Effect.gen(function* () {
|
|
17
|
-
const handlers = yield* Layer.build(ShardManager.layerServerHandlers);
|
|
18
|
-
return yield* RpcServer.toHttpApp(ShardManager.Rpcs).pipe(Effect.provide(handlers));
|
|
19
|
-
});
|
|
20
|
-
/**
|
|
21
|
-
* @since 1.0.0
|
|
22
|
-
* @category Http App
|
|
23
|
-
*/
|
|
24
|
-
export const toHttpAppWebsocket = /*#__PURE__*/Effect.gen(function* () {
|
|
25
|
-
const handlers = yield* Layer.build(ShardManager.layerServerHandlers);
|
|
26
|
-
return yield* RpcServer.toHttpAppWebsocket(ShardManager.Rpcs).pipe(Effect.provide(handlers));
|
|
27
|
-
});
|
|
28
|
-
/**
|
|
29
|
-
* A layer for the `ShardManager` service, that does not run a server.
|
|
30
|
-
*
|
|
31
|
-
* It only provides the `Runners` rpc client.
|
|
32
|
-
*
|
|
33
|
-
* You can use this with the `toHttpApp` and `toHttpAppWebsocket` apis
|
|
34
|
-
* to run a complete `ShardManager` server.
|
|
35
|
-
*
|
|
36
|
-
* @since 1.0.0
|
|
37
|
-
* @category Layers
|
|
38
|
-
*/
|
|
39
|
-
export const layerNoServerHttp = options => ShardManager.layer.pipe(Layer.provide(Runners.layerRpc.pipe(Layer.provide([layerClientProtocolHttp({
|
|
40
|
-
path: options.runnerPath,
|
|
41
|
-
https: options.runnerHttps
|
|
42
|
-
}), MessageStorage.layerNoop]))));
|
|
43
|
-
/**
|
|
44
|
-
* A layer for the `ShardManager` service, that does not run a server.
|
|
45
|
-
*
|
|
46
|
-
* It only provides the `Runners` rpc client.
|
|
47
|
-
*
|
|
48
|
-
* You can use this with the `toHttpApp` and `toHttpAppWebsocket` apis
|
|
49
|
-
* to run a complete `ShardManager` server.
|
|
50
|
-
*
|
|
51
|
-
* @since 1.0.0
|
|
52
|
-
* @category Layers
|
|
53
|
-
*/
|
|
54
|
-
export const layerNoServerWebsocket = options => ShardManager.layer.pipe(Layer.provide(Runners.layerRpc.pipe(Layer.provide([layerClientProtocolWebsocket({
|
|
55
|
-
path: options.runnerPath,
|
|
56
|
-
https: options.runnerHttps
|
|
57
|
-
}), MessageStorage.layerNoop]))));
|
|
58
|
-
/**
|
|
59
|
-
* A HTTP layer for the `ShardManager` server, that adds a route to the provided
|
|
60
|
-
* `HttpRouter.Tag`.
|
|
61
|
-
*
|
|
62
|
-
* By default, it uses the `HttpRouter.Default` tag.
|
|
63
|
-
*
|
|
64
|
-
* @since 1.0.0
|
|
65
|
-
* @category Layers
|
|
66
|
-
*/
|
|
67
|
-
export const layerHttpOptions = options => {
|
|
68
|
-
const routerTag = options.routerTag ?? HttpRouter.Default;
|
|
69
|
-
return routerTag.serve().pipe(options.logAddress ? withLogAddress : identity, Layer.merge(ShardManager.layerServer), Layer.provide(RpcServer.layerProtocolHttp(options)), Layer.provideMerge(layerNoServerHttp(options)));
|
|
70
|
-
};
|
|
71
|
-
/**
|
|
72
|
-
* A WebSocket layer for the `ShardManager` server, that adds a route to the provided
|
|
73
|
-
* `HttpRouter.Tag`.
|
|
74
|
-
*
|
|
75
|
-
* By default, it uses the `HttpRouter.Default` tag.
|
|
76
|
-
*
|
|
77
|
-
* @since 1.0.0
|
|
78
|
-
* @category Layers
|
|
79
|
-
*/
|
|
80
|
-
export const layerWebsocketOptions = options => {
|
|
81
|
-
const routerTag = options.routerTag ?? HttpRouter.Default;
|
|
82
|
-
return routerTag.serve().pipe(options.logAddress ? withLogAddress : identity, Layer.merge(ShardManager.layerServer), Layer.provide(RpcServer.layerProtocolWebsocket(options)), Layer.provideMerge(layerNoServerWebsocket(options)));
|
|
83
|
-
};
|
|
84
|
-
const withLogAddress = layer => Layer.effectDiscard(HttpServer.addressFormattedWith(address => Effect.annotateLogs(Effect.logInfo(`Listening on: ${address}`), {
|
|
85
|
-
package: "@effect/cluster",
|
|
86
|
-
service: "ShardManager"
|
|
87
|
-
}))).pipe(Layer.provideMerge(layer));
|
|
88
|
-
/**
|
|
89
|
-
* A HTTP layer for the `ShardManager` server, that adds a route to the provided
|
|
90
|
-
* `HttpRouter.Tag`.
|
|
91
|
-
*
|
|
92
|
-
* By default, it uses the `HttpRouter.Default` tag.
|
|
93
|
-
*
|
|
94
|
-
* @since 1.0.0
|
|
95
|
-
* @category Layers
|
|
96
|
-
*/
|
|
97
|
-
export const layerHttp = /*#__PURE__*/layerHttpOptions({
|
|
98
|
-
path: "/",
|
|
99
|
-
runnerPath: "/"
|
|
100
|
-
});
|
|
101
|
-
/**
|
|
102
|
-
* A Websocket layer for the `ShardManager` server, that adds a route to the provided
|
|
103
|
-
* `HttpRouter.Tag`.
|
|
104
|
-
*
|
|
105
|
-
* By default, it uses the `HttpRouter.Default` tag.
|
|
106
|
-
*
|
|
107
|
-
* @since 1.0.0
|
|
108
|
-
* @category Layers
|
|
109
|
-
*/
|
|
110
|
-
export const layerWebsocket = /*#__PURE__*/layerWebsocketOptions({
|
|
111
|
-
path: "/",
|
|
112
|
-
runnerPath: "/"
|
|
113
|
-
});
|
|
114
|
-
/**
|
|
115
|
-
* @since 1.0.0
|
|
116
|
-
* @category Layers
|
|
117
|
-
*/
|
|
118
|
-
export const layerRunnerHealthHttp = /*#__PURE__*/Layer.provide(RunnerHealth.layerRpc, /*#__PURE__*/layerClientProtocolHttp({
|
|
119
|
-
path: "/"
|
|
120
|
-
}));
|
|
121
|
-
/**
|
|
122
|
-
* @since 1.0.0
|
|
123
|
-
* @category Layers
|
|
124
|
-
*/
|
|
125
|
-
export const layerRunnerHealthWebsocket = /*#__PURE__*/Layer.provide(RunnerHealth.layerRpc, /*#__PURE__*/layerClientProtocolWebsocket({
|
|
126
|
-
path: "/"
|
|
127
|
-
}));
|
|
128
|
-
//# sourceMappingURL=HttpShardManager.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"HttpShardManager.js","names":["HttpRouter","HttpServer","RpcServer","Effect","identity","Layer","layerClientProtocolHttp","layerClientProtocolWebsocket","MessageStorage","RunnerHealth","Runners","ShardManager","toHttpApp","gen","handlers","build","layerServerHandlers","Rpcs","pipe","provide","toHttpAppWebsocket","layerNoServerHttp","options","layer","layerRpc","path","runnerPath","https","runnerHttps","layerNoop","layerNoServerWebsocket","layerHttpOptions","routerTag","Default","serve","logAddress","withLogAddress","merge","layerServer","layerProtocolHttp","provideMerge","layerWebsocketOptions","layerProtocolWebsocket","effectDiscard","addressFormattedWith","address","annotateLogs","logInfo","package","service","layerHttp","layerWebsocket","layerRunnerHealthHttp","layerRunnerHealthWebsocket"],"sources":["../../src/HttpShardManager.ts"],"sourcesContent":[null],"mappings":"AAKA,OAAO,KAAKA,UAAU,MAAM,6BAA6B;AACzD,OAAO,KAAKC,UAAU,MAAM,6BAA6B;AAGzD,OAAO,KAAKC,SAAS,MAAM,uBAAuB;AAClD,OAAO,KAAKC,MAAM,MAAM,eAAe;AACvC,SAASC,QAAQ,QAAQ,iBAAiB;AAC1C,OAAO,KAAKC,KAAK,MAAM,cAAc;AAErC,SAASC,uBAAuB,EAAEC,4BAA4B,QAAQ,iBAAiB;AACvF,OAAO,KAAKC,cAAc,MAAM,qBAAqB;AACrD,OAAO,KAAKC,YAAY,MAAM,mBAAmB;AACjD,OAAO,KAAKC,OAAO,MAAM,cAAc;AAEvC,OAAO,KAAKC,YAAY,MAAM,mBAAmB;AAGjD;;;;AAIA,OAAO,MAAMC,SAAS,gBAIlBT,MAAM,CAACU,GAAG,CAAC,aAAS;EACtB,MAAMC,QAAQ,GAAG,OAAOT,KAAK,CAACU,KAAK,CAACJ,YAAY,CAACK,mBAAmB,CAAC;EACrE,OAAO,OAAOd,SAAS,CAACU,SAAS,CAACD,YAAY,CAACM,IAAI,CAAC,CAACC,IAAI,CACvDf,MAAM,CAACgB,OAAO,CAACL,QAAQ,CAAC,CACzB;AACH,CAAC,CAAC;AAEF;;;;AAIA,OAAO,MAAMM,kBAAkB,gBAI3BjB,MAAM,CAACU,GAAG,CAAC,aAAS;EACtB,MAAMC,QAAQ,GAAG,OAAOT,KAAK,CAACU,KAAK,CAACJ,YAAY,CAACK,mBAAmB,CAAC;EACrE,OAAO,OAAOd,SAAS,CAACkB,kBAAkB,CAACT,YAAY,CAACM,IAAI,CAAC,CAACC,IAAI,CAChEf,MAAM,CAACgB,OAAO,CAACL,QAAQ,CAAC,CACzB;AACH,CAAC,CAAC;AAEF;;;;;;;;;;;AAWA,OAAO,MAAMO,iBAAiB,GAC5BC,OAGC,IAWDX,YAAY,CAACY,KAAK,CAACL,IAAI,CACrBb,KAAK,CAACc,OAAO,CAACT,OAAO,CAACc,QAAQ,CAACN,IAAI,CACjCb,KAAK,CAACc,OAAO,CAAC,CACZb,uBAAuB,CAAC;EACtBmB,IAAI,EAAEH,OAAO,CAACI,UAAU;EACxBC,KAAK,EAAEL,OAAO,CAACM;CAChB,CAAC,EACFpB,cAAc,CAACqB,SAAS,CACzB,CAAC,CACH,CAAC,CACH;AAEH;;;;;;;;;;;AAWA,OAAO,MAAMC,sBAAsB,GACjCR,OAGC,IAWDX,YAAY,CAACY,KAAK,CAACL,IAAI,CACrBb,KAAK,CAACc,OAAO,CAACT,OAAO,CAACc,QAAQ,CAACN,IAAI,CACjCb,KAAK,CAACc,OAAO,CAAC,CACZZ,4BAA4B,CAAC;EAC3BkB,IAAI,EAAEH,OAAO,CAACI,UAAU;EACxBC,KAAK,EAAEL,OAAO,CAACM;CAChB,CAAC,EACFpB,cAAc,CAACqB,SAAS,CACzB,CAAC,CACH,CAAC,CACH;AAEH;;;;;;;;;AASA,OAAO,MAAME,gBAAgB,GAC3BT,OAMC,IAWC;EACF,MAAMU,SAAS,GAAGV,OAAO,CAACU,SAAS,IAAIhC,UAAU,CAACiC,OAAO;EACzD,OAAOD,SAAS,CAACE,KAAK,EAAE,CAAChB,IAAI,CAC3BI,OAAO,CAACa,UAAU,GAAGC,cAAc,GAAGhC,QAAQ,EAC9CC,KAAK,CAACgC,KAAK,CAAC1B,YAAY,CAAC2B,WAAW,CAAC,EACrCjC,KAAK,CAACc,OAAO,CAACjB,SAAS,CAACqC,iBAAiB,CAACjB,OAAO,CAAC,CAAC,EACnDjB,KAAK,CAACmC,YAAY,CAACnB,iBAAiB,CAACC,OAAO,CAAC,CAAC,CAC/C;AACH,CAAC;AAED;;;;;;;;;AASA,OAAO,MAAMmB,qBAAqB,GAChCnB,OAMC,IAWC;EACF,MAAMU,SAAS,GAAGV,OAAO,CAACU,SAAS,IAAIhC,UAAU,CAACiC,OAAO;EACzD,OAAOD,SAAS,CAACE,KAAK,EAAE,CAAChB,IAAI,CAC3BI,OAAO,CAACa,UAAU,GAAGC,cAAc,GAAGhC,QAAQ,EAC9CC,KAAK,CAACgC,KAAK,CAAC1B,YAAY,CAAC2B,WAAW,CAAC,EACrCjC,KAAK,CAACc,OAAO,CAACjB,SAAS,CAACwC,sBAAsB,CAACpB,OAAO,CAAC,CAAC,EACxDjB,KAAK,CAACmC,YAAY,CAACV,sBAAsB,CAACR,OAAO,CAAC,CAAC,CACpD;AACH,CAAC;AAED,MAAMc,cAAc,GAAab,KAA2B,IAC1DlB,KAAK,CAACsC,aAAa,CACjB1C,UAAU,CAAC2C,oBAAoB,CAAEC,OAAO,IACtC1C,MAAM,CAAC2C,YAAY,CAAC3C,MAAM,CAAC4C,OAAO,CAAC,iBAAiBF,OAAO,EAAE,CAAC,EAAE;EAC9DG,OAAO,EAAE,iBAAiB;EAC1BC,OAAO,EAAE;CACV,CAAC,CACH,CACF,CAAC/B,IAAI,CAACb,KAAK,CAACmC,YAAY,CAACjB,KAAK,CAAC,CAAC;AAEnC;;;;;;;;;AASA,OAAO,MAAM2B,SAAS,gBAUlBnB,gBAAgB,CAAC;EAAEN,IAAI,EAAE,GAAG;EAAEC,UAAU,EAAE;AAAG,CAAE,CAAC;AAEpD;;;;;;;;;AASA,OAAO,MAAMyB,cAAc,gBAUvBV,qBAAqB,CAAC;EAAEhB,IAAI,EAAE,GAAG;EAAEC,UAAU,EAAE;AAAG,CAAE,CAAC;AAEzD;;;;AAIA,OAAO,MAAM0B,qBAAqB,gBAI9B/C,KAAK,CAACc,OAAO,CAACV,YAAY,CAACe,QAAQ,eAAElB,uBAAuB,CAAC;EAAEmB,IAAI,EAAE;AAAG,CAAE,CAAC,CAAC;AAEhF;;;;AAIA,OAAO,MAAM4B,0BAA0B,gBAInChD,KAAK,CAACc,OAAO,CAACV,YAAY,CAACe,QAAQ,eAAEjB,4BAA4B,CAAC;EAAEkB,IAAI,EAAE;AAAG,CAAE,CAAC,CAAC","ignoreList":[]}
|
package/dist/esm/ShardManager.js
DELETED
|
@@ -1,535 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* @since 1.0.0
|
|
3
|
-
*/
|
|
4
|
-
import * as Rpc from "@effect/rpc/Rpc";
|
|
5
|
-
import * as RpcClient from "@effect/rpc/RpcClient";
|
|
6
|
-
import * as RpcGroup from "@effect/rpc/RpcGroup";
|
|
7
|
-
import * as RpcServer from "@effect/rpc/RpcServer";
|
|
8
|
-
import * as Arr from "effect/Array";
|
|
9
|
-
import * as Clock from "effect/Clock";
|
|
10
|
-
import * as Config_ from "effect/Config";
|
|
11
|
-
import * as ConfigProvider from "effect/ConfigProvider";
|
|
12
|
-
import * as Context from "effect/Context";
|
|
13
|
-
import * as Data from "effect/Data";
|
|
14
|
-
import * as Deferred from "effect/Deferred";
|
|
15
|
-
import * as Duration from "effect/Duration";
|
|
16
|
-
import * as Effect from "effect/Effect";
|
|
17
|
-
import * as Equal from "effect/Equal";
|
|
18
|
-
import * as FiberSet from "effect/FiberSet";
|
|
19
|
-
import { identity } from "effect/Function";
|
|
20
|
-
import * as Iterable from "effect/Iterable";
|
|
21
|
-
import * as Layer from "effect/Layer";
|
|
22
|
-
import * as Mailbox from "effect/Mailbox";
|
|
23
|
-
import * as Metric from "effect/Metric";
|
|
24
|
-
import * as MetricLabel from "effect/MetricLabel";
|
|
25
|
-
import * as MutableHashMap from "effect/MutableHashMap";
|
|
26
|
-
import * as MutableHashSet from "effect/MutableHashSet";
|
|
27
|
-
import * as Option from "effect/Option";
|
|
28
|
-
import * as PubSub from "effect/PubSub";
|
|
29
|
-
import * as Queue from "effect/Queue";
|
|
30
|
-
import * as Schedule from "effect/Schedule";
|
|
31
|
-
import * as Schema from "effect/Schema";
|
|
32
|
-
import { RunnerNotRegistered } from "./ClusterError.js";
|
|
33
|
-
import * as ClusterMetrics from "./ClusterMetrics.js";
|
|
34
|
-
import { addAllNested, decideAssignmentsForShards, State } from "./internal/shardManager.js";
|
|
35
|
-
import * as MachineId from "./MachineId.js";
|
|
36
|
-
import { Runner } from "./Runner.js";
|
|
37
|
-
import { RunnerAddress } from "./RunnerAddress.js";
|
|
38
|
-
import { RunnerHealth } from "./RunnerHealth.js";
|
|
39
|
-
import { RpcClientProtocol, Runners } from "./Runners.js";
|
|
40
|
-
import { make as makeShardId, ShardId } from "./ShardId.js";
|
|
41
|
-
import { ShardingConfig } from "./ShardingConfig.js";
|
|
42
|
-
import { ShardStorage } from "./ShardStorage.js";
|
|
43
|
-
/**
|
|
44
|
-
* @since 1.0.0
|
|
45
|
-
* @category models
|
|
46
|
-
*/
|
|
47
|
-
export class ShardManager extends /*#__PURE__*/Context.Tag("@effect/cluster/ShardManager")() {}
|
|
48
|
-
/**
|
|
49
|
-
* @since 1.0.0
|
|
50
|
-
* @category Config
|
|
51
|
-
*/
|
|
52
|
-
export class Config extends /*#__PURE__*/Context.Tag("@effect/cluster/ShardManager/Config")() {
|
|
53
|
-
/**
|
|
54
|
-
* @since 1.0.0
|
|
55
|
-
*/
|
|
56
|
-
static defaults = {
|
|
57
|
-
rebalanceDebounce: /*#__PURE__*/Duration.seconds(3),
|
|
58
|
-
rebalanceInterval: /*#__PURE__*/Duration.seconds(20),
|
|
59
|
-
rebalanceRetryInterval: /*#__PURE__*/Duration.seconds(10),
|
|
60
|
-
rebalanceRate: 2 / 100,
|
|
61
|
-
persistRetryCount: 100,
|
|
62
|
-
persistRetryInterval: /*#__PURE__*/Duration.seconds(3),
|
|
63
|
-
runnerHealthCheckInterval: /*#__PURE__*/Duration.minutes(1),
|
|
64
|
-
runnerPingTimeout: /*#__PURE__*/Duration.seconds(3)
|
|
65
|
-
};
|
|
66
|
-
}
|
|
67
|
-
/**
|
|
68
|
-
* @since 1.0.0
|
|
69
|
-
* @category Config
|
|
70
|
-
*/
|
|
71
|
-
export const configConfig = /*#__PURE__*/Config_.all({
|
|
72
|
-
rebalanceDebounce: /*#__PURE__*/Config_.duration("rebalanceDebounce").pipe(/*#__PURE__*/Config_.withDefault(Config.defaults.rebalanceDebounce), /*#__PURE__*/Config_.withDescription("The duration to wait before rebalancing shards after a change.")),
|
|
73
|
-
rebalanceInterval: /*#__PURE__*/Config_.duration("rebalanceInterval").pipe(/*#__PURE__*/Config_.withDefault(Config.defaults.rebalanceInterval), /*#__PURE__*/Config_.withDescription("The interval on which regular rebalancing of shards will occur.")),
|
|
74
|
-
rebalanceRetryInterval: /*#__PURE__*/Config_.duration("rebalanceRetryInterval").pipe(/*#__PURE__*/Config_.withDefault(Config.defaults.rebalanceRetryInterval), /*#__PURE__*/Config_.withDescription("The interval on which rebalancing of shards which failed to be rebalanced will be retried.")),
|
|
75
|
-
rebalanceRate: /*#__PURE__*/Config_.number("rebalanceRate").pipe(/*#__PURE__*/Config_.withDefault(Config.defaults.rebalanceRate), /*#__PURE__*/Config_.withDescription("The maximum ratio of shards to rebalance at once.")),
|
|
76
|
-
persistRetryCount: /*#__PURE__*/Config_.integer("persistRetryCount").pipe(/*#__PURE__*/Config_.withDefault(Config.defaults.persistRetryCount), /*#__PURE__*/Config_.withDescription("The number of times persistence of runners will be retried if it fails.")),
|
|
77
|
-
persistRetryInterval: /*#__PURE__*/Config_.duration("persistRetryInterval").pipe(/*#__PURE__*/Config_.withDefault(Config.defaults.persistRetryInterval), /*#__PURE__*/Config_.withDescription("The interval on which persistence of runners will be retried if it fails.")),
|
|
78
|
-
runnerHealthCheckInterval: /*#__PURE__*/Config_.duration("runnerHealthCheckInterval").pipe(/*#__PURE__*/Config_.withDefault(Config.defaults.runnerHealthCheckInterval), /*#__PURE__*/Config_.withDescription("The interval on which runner health will be checked.")),
|
|
79
|
-
runnerPingTimeout: /*#__PURE__*/Config_.duration("runnerPingTimeout").pipe(/*#__PURE__*/Config_.withDefault(Config.defaults.runnerPingTimeout), /*#__PURE__*/Config_.withDescription("The length of time to wait for a runner to respond to a ping."))
|
|
80
|
-
});
|
|
81
|
-
/**
|
|
82
|
-
* @since 1.0.0
|
|
83
|
-
* @category Config
|
|
84
|
-
*/
|
|
85
|
-
export const configFromEnv = /*#__PURE__*/configConfig.pipe(/*#__PURE__*/Effect.withConfigProvider(/*#__PURE__*/ConfigProvider.fromEnv().pipe(ConfigProvider.constantCase)));
|
|
86
|
-
/**
|
|
87
|
-
* @since 1.0.0
|
|
88
|
-
* @category Config
|
|
89
|
-
*/
|
|
90
|
-
export const layerConfig = config => Layer.succeed(Config, {
|
|
91
|
-
...Config.defaults,
|
|
92
|
-
...config
|
|
93
|
-
});
|
|
94
|
-
/**
|
|
95
|
-
* @since 1.0.0
|
|
96
|
-
* @category Config
|
|
97
|
-
*/
|
|
98
|
-
export const layerConfigFromEnv = config => Layer.effect(Config, config ? Effect.map(configFromEnv, env => ({
|
|
99
|
-
...env,
|
|
100
|
-
...config
|
|
101
|
-
})) : configFromEnv);
|
|
102
|
-
/**
|
|
103
|
-
* Represents a client which can be used to communicate with the
|
|
104
|
-
* `ShardManager`.
|
|
105
|
-
*
|
|
106
|
-
* @since 1.0.0
|
|
107
|
-
* @category Client
|
|
108
|
-
*/
|
|
109
|
-
export class ShardManagerClient extends /*#__PURE__*/Context.Tag("@effect/cluster/ShardManager/ShardManagerClient")() {}
|
|
110
|
-
/**
|
|
111
|
-
* @since 1.0.0
|
|
112
|
-
* @category models
|
|
113
|
-
*/
|
|
114
|
-
export const ShardingEventSchema = /*#__PURE__*/Schema.Union(/*#__PURE__*/Schema.TaggedStruct("StreamStarted", {}), /*#__PURE__*/Schema.TaggedStruct("ShardsAssigned", {
|
|
115
|
-
address: RunnerAddress,
|
|
116
|
-
shards: /*#__PURE__*/Schema.Array(ShardId)
|
|
117
|
-
}), /*#__PURE__*/Schema.TaggedStruct("ShardsUnassigned", {
|
|
118
|
-
address: RunnerAddress,
|
|
119
|
-
shards: /*#__PURE__*/Schema.Array(ShardId)
|
|
120
|
-
}), /*#__PURE__*/Schema.TaggedStruct("RunnerRegistered", {
|
|
121
|
-
address: RunnerAddress
|
|
122
|
-
}), /*#__PURE__*/Schema.TaggedStruct("RunnerUnregistered", {
|
|
123
|
-
address: RunnerAddress
|
|
124
|
-
}));
|
|
125
|
-
/**
|
|
126
|
-
* The messaging protocol for the `ShardManager`.
|
|
127
|
-
*
|
|
128
|
-
* @since 1.0.0
|
|
129
|
-
* @category Rpcs
|
|
130
|
-
*/
|
|
131
|
-
export class Rpcs extends /*#__PURE__*/RpcGroup.make(/*#__PURE__*/Rpc.make("Register", {
|
|
132
|
-
payload: {
|
|
133
|
-
runner: Runner
|
|
134
|
-
},
|
|
135
|
-
success: MachineId.MachineId
|
|
136
|
-
}), /*#__PURE__*/Rpc.make("Unregister", {
|
|
137
|
-
payload: {
|
|
138
|
-
address: RunnerAddress
|
|
139
|
-
}
|
|
140
|
-
}), /*#__PURE__*/Rpc.make("NotifyUnhealthyRunner", {
|
|
141
|
-
payload: {
|
|
142
|
-
address: RunnerAddress
|
|
143
|
-
}
|
|
144
|
-
}), /*#__PURE__*/Rpc.make("GetAssignments", {
|
|
145
|
-
success: /*#__PURE__*/Schema.Array(/*#__PURE__*/Schema.Tuple(ShardId, /*#__PURE__*/Schema.Option(RunnerAddress)))
|
|
146
|
-
}), /*#__PURE__*/Rpc.make("ShardingEvents", {
|
|
147
|
-
payload: {
|
|
148
|
-
address: /*#__PURE__*/Schema.Option(RunnerAddress)
|
|
149
|
-
},
|
|
150
|
-
success: ShardingEventSchema,
|
|
151
|
-
error: RunnerNotRegistered,
|
|
152
|
-
stream: true
|
|
153
|
-
}), /*#__PURE__*/Rpc.make("GetTime", {
|
|
154
|
-
success: Schema.Number
|
|
155
|
-
})) {}
|
|
156
|
-
/**
|
|
157
|
-
* @since 1.0.0
|
|
158
|
-
* @category models
|
|
159
|
-
*/
|
|
160
|
-
export const ShardingEvent = /*#__PURE__*/Data.taggedEnum();
|
|
161
|
-
/**
|
|
162
|
-
* @since 1.0.0
|
|
163
|
-
* @category Client
|
|
164
|
-
*/
|
|
165
|
-
export const makeClientLocal = /*#__PURE__*/Effect.gen(function* () {
|
|
166
|
-
const config = yield* ShardingConfig;
|
|
167
|
-
const clock = yield* Effect.clock;
|
|
168
|
-
const groups = new Set();
|
|
169
|
-
const shards = MutableHashMap.empty();
|
|
170
|
-
let machineId = 0;
|
|
171
|
-
return ShardManagerClient.of({
|
|
172
|
-
register: (_, groupsToAdd) => Effect.sync(() => {
|
|
173
|
-
for (const group of groupsToAdd) {
|
|
174
|
-
if (groups.has(group)) continue;
|
|
175
|
-
groups.add(group);
|
|
176
|
-
for (let n = 1; n <= config.shardsPerGroup; n++) {
|
|
177
|
-
MutableHashMap.set(shards, makeShardId(group, n), config.runnerAddress);
|
|
178
|
-
}
|
|
179
|
-
}
|
|
180
|
-
return MachineId.make(++machineId);
|
|
181
|
-
}),
|
|
182
|
-
unregister: () => Effect.void,
|
|
183
|
-
notifyUnhealthyRunner: () => Effect.void,
|
|
184
|
-
getAssignments: Effect.succeed(shards),
|
|
185
|
-
shardingEvents: Effect.fnUntraced(function* (_address) {
|
|
186
|
-
const mailbox = yield* Mailbox.make();
|
|
187
|
-
yield* mailbox.offer(ShardingEvent.StreamStarted());
|
|
188
|
-
return mailbox;
|
|
189
|
-
}),
|
|
190
|
-
getTime: clock.currentTimeMillis
|
|
191
|
-
});
|
|
192
|
-
});
|
|
193
|
-
/**
|
|
194
|
-
* @since 1.0.0
|
|
195
|
-
* @category Client
|
|
196
|
-
*/
|
|
197
|
-
export const makeClientRpc = /*#__PURE__*/Effect.gen(function* () {
|
|
198
|
-
const config = yield* ShardingConfig;
|
|
199
|
-
const client = yield* RpcClient.make(Rpcs, {
|
|
200
|
-
spanPrefix: "ShardManagerClient",
|
|
201
|
-
disableTracing: true
|
|
202
|
-
});
|
|
203
|
-
return ShardManagerClient.of({
|
|
204
|
-
register: (address, groups) => client.Register({
|
|
205
|
-
runner: Runner.make({
|
|
206
|
-
address,
|
|
207
|
-
version: config.serverVersion,
|
|
208
|
-
groups
|
|
209
|
-
})
|
|
210
|
-
}).pipe(Effect.orDie),
|
|
211
|
-
unregister: address => Effect.orDie(client.Unregister({
|
|
212
|
-
address
|
|
213
|
-
})),
|
|
214
|
-
notifyUnhealthyRunner: address => Effect.orDie(client.NotifyUnhealthyRunner({
|
|
215
|
-
address
|
|
216
|
-
})),
|
|
217
|
-
getAssignments: Effect.orDie(client.GetAssignments()),
|
|
218
|
-
shardingEvents: address => Mailbox.make().pipe(Effect.tap(Effect.fnUntraced(function* (mailbox) {
|
|
219
|
-
const events = yield* client.ShardingEvents({
|
|
220
|
-
address
|
|
221
|
-
}, {
|
|
222
|
-
asMailbox: true
|
|
223
|
-
});
|
|
224
|
-
const take = Effect.orDie(events.takeAll);
|
|
225
|
-
while (true) {
|
|
226
|
-
mailbox.unsafeOfferAll((yield* take)[0]);
|
|
227
|
-
}
|
|
228
|
-
}, (effect, mb) => Mailbox.into(effect, mb), Effect.forkScoped))),
|
|
229
|
-
getTime: Effect.orDie(client.GetTime())
|
|
230
|
-
});
|
|
231
|
-
});
|
|
232
|
-
/**
|
|
233
|
-
* @since 1.0.0
|
|
234
|
-
* @category Client
|
|
235
|
-
*/
|
|
236
|
-
export const layerClientLocal = /*#__PURE__*/Layer.effect(ShardManagerClient, makeClientLocal);
|
|
237
|
-
/**
|
|
238
|
-
* @since 1.0.0
|
|
239
|
-
* @category Client
|
|
240
|
-
*/
|
|
241
|
-
export const layerClientRpc = /*#__PURE__*/Layer.scoped(ShardManagerClient, makeClientRpc).pipe(/*#__PURE__*/Layer.provide(/*#__PURE__*/Layer.scoped(RpcClient.Protocol, /*#__PURE__*/Effect.gen(function* () {
|
|
242
|
-
const config = yield* ShardingConfig;
|
|
243
|
-
const clientProtocol = yield* RpcClientProtocol;
|
|
244
|
-
return yield* clientProtocol(config.shardManagerAddress);
|
|
245
|
-
}))));
|
|
246
|
-
/**
|
|
247
|
-
* @since 1.0.0
|
|
248
|
-
* @category Constructors
|
|
249
|
-
*/
|
|
250
|
-
export const make = /*#__PURE__*/Effect.gen(function* () {
|
|
251
|
-
const storage = yield* ShardStorage;
|
|
252
|
-
const runnersApi = yield* Runners;
|
|
253
|
-
const runnerHealthApi = yield* RunnerHealth;
|
|
254
|
-
const clock = yield* Effect.clock;
|
|
255
|
-
const config = yield* Config;
|
|
256
|
-
const shardingConfig = yield* ShardingConfig;
|
|
257
|
-
const state = yield* Effect.orDie(State.fromStorage(shardingConfig.shardsPerGroup));
|
|
258
|
-
const scope = yield* Effect.scope;
|
|
259
|
-
const events = yield* PubSub.unbounded();
|
|
260
|
-
function updateRunnerMetrics() {
|
|
261
|
-
ClusterMetrics.runners.unsafeUpdate(MutableHashMap.size(state.allRunners), []);
|
|
262
|
-
}
|
|
263
|
-
function updateShardMetrics() {
|
|
264
|
-
const stats = state.shardStats;
|
|
265
|
-
for (const [address, shardCount] of stats.perRunner) {
|
|
266
|
-
ClusterMetrics.assignedShards.unsafeUpdate(shardCount, [MetricLabel.make("address", address)]);
|
|
267
|
-
}
|
|
268
|
-
ClusterMetrics.unassignedShards.unsafeUpdate(stats.unassigned, []);
|
|
269
|
-
}
|
|
270
|
-
updateShardMetrics();
|
|
271
|
-
function withRetry(effect) {
|
|
272
|
-
return effect.pipe(Effect.retry({
|
|
273
|
-
schedule: Schedule.spaced(config.persistRetryCount),
|
|
274
|
-
times: config.persistRetryCount
|
|
275
|
-
}), Effect.ignore);
|
|
276
|
-
}
|
|
277
|
-
const persistRunners = Effect.unsafeMakeSemaphore(1).withPermits(1)(withRetry(Effect.suspend(() => storage.saveRunners(Iterable.map(state.allRunners, ([address, runner]) => [address, runner.runner])))));
|
|
278
|
-
const persistAssignments = Effect.unsafeMakeSemaphore(1).withPermits(1)(withRetry(Effect.suspend(() => storage.saveAssignments(state.assignments))));
|
|
279
|
-
const notifyUnhealthyRunner = Effect.fnUntraced(function* (address) {
|
|
280
|
-
if (!MutableHashMap.has(state.allRunners, address)) return;
|
|
281
|
-
if (!(yield* runnerHealthApi.isAlive(address))) {
|
|
282
|
-
yield* Effect.logWarning(`Runner at address '${address.toString()}' is not alive`);
|
|
283
|
-
yield* unregister(address);
|
|
284
|
-
}
|
|
285
|
-
});
|
|
286
|
-
function updateShardsState(shards, address) {
|
|
287
|
-
return Effect.suspend(() => {
|
|
288
|
-
if (Option.isSome(address) && !MutableHashMap.has(state.allRunners, address.value)) {
|
|
289
|
-
return Effect.fail(new RunnerNotRegistered({
|
|
290
|
-
address: address.value
|
|
291
|
-
}));
|
|
292
|
-
}
|
|
293
|
-
state.addAssignments(shards, address);
|
|
294
|
-
return Effect.void;
|
|
295
|
-
});
|
|
296
|
-
}
|
|
297
|
-
const getAssignments = Effect.sync(() => state.assignments);
|
|
298
|
-
let machineId = 0;
|
|
299
|
-
const register = Effect.fnUntraced(function* (runner) {
|
|
300
|
-
yield* Effect.logInfo(`Registering runner ${Runner.pretty(runner)}`);
|
|
301
|
-
const current = MutableHashMap.get(state.allRunners, runner.address).pipe(Option.filter(r => r.runner.version === runner.version));
|
|
302
|
-
if (Option.isSome(current)) {
|
|
303
|
-
return MachineId.make(++machineId);
|
|
304
|
-
}
|
|
305
|
-
state.addRunner(runner, clock.unsafeCurrentTimeMillis());
|
|
306
|
-
updateRunnerMetrics();
|
|
307
|
-
yield* PubSub.publish(events, ShardingEvent.RunnerRegistered({
|
|
308
|
-
address: runner.address
|
|
309
|
-
}));
|
|
310
|
-
yield* Effect.forkIn(persistRunners, scope);
|
|
311
|
-
yield* Effect.forkIn(rebalance, scope);
|
|
312
|
-
return MachineId.make(++machineId);
|
|
313
|
-
});
|
|
314
|
-
const unregister = Effect.fnUntraced(function* (address) {
|
|
315
|
-
if (!MutableHashMap.has(state.allRunners, address)) return;
|
|
316
|
-
yield* Effect.logInfo("Unregistering runner at address:", address);
|
|
317
|
-
const unassignments = Arr.empty();
|
|
318
|
-
for (const [shard, runner] of state.assignments) {
|
|
319
|
-
if (Option.isSome(runner) && Equal.equals(runner.value, address)) {
|
|
320
|
-
unassignments.push(shard);
|
|
321
|
-
}
|
|
322
|
-
}
|
|
323
|
-
state.addAssignments(unassignments, Option.none());
|
|
324
|
-
state.removeRunner(address);
|
|
325
|
-
updateRunnerMetrics();
|
|
326
|
-
if (unassignments.length > 0) {
|
|
327
|
-
yield* PubSub.publish(events, ShardingEvent.RunnerUnregistered({
|
|
328
|
-
address
|
|
329
|
-
}));
|
|
330
|
-
}
|
|
331
|
-
yield* Effect.forkIn(persistRunners, scope);
|
|
332
|
-
yield* Effect.forkIn(rebalance, scope);
|
|
333
|
-
});
|
|
334
|
-
let rebalancing = false;
|
|
335
|
-
let rebalanceDeferred;
|
|
336
|
-
const rebalanceFibers = yield* FiberSet.make();
|
|
337
|
-
const rebalance = Effect.withFiberRuntime(fiber => {
|
|
338
|
-
if (!rebalancing) {
|
|
339
|
-
rebalancing = true;
|
|
340
|
-
return rebalanceLoop;
|
|
341
|
-
}
|
|
342
|
-
if (!rebalanceDeferred) {
|
|
343
|
-
rebalanceDeferred = Deferred.unsafeMake(fiber.id());
|
|
344
|
-
}
|
|
345
|
-
return Deferred.await(rebalanceDeferred);
|
|
346
|
-
});
|
|
347
|
-
const rebalanceLoop = Effect.suspend(() => {
|
|
348
|
-
const deferred = rebalanceDeferred;
|
|
349
|
-
rebalanceDeferred = undefined;
|
|
350
|
-
return runRebalance.pipe(deferred ? Effect.intoDeferred(deferred) : identity, Effect.onExit(() => {
|
|
351
|
-
if (!rebalanceDeferred) {
|
|
352
|
-
rebalancing = false;
|
|
353
|
-
return Effect.void;
|
|
354
|
-
}
|
|
355
|
-
return Effect.forkIn(rebalanceLoop, scope);
|
|
356
|
-
}));
|
|
357
|
-
});
|
|
358
|
-
const runRebalance = Effect.gen(function* () {
|
|
359
|
-
yield* Effect.sleep(config.rebalanceDebounce);
|
|
360
|
-
if (state.shards.size === 0) {
|
|
361
|
-
yield* Effect.logDebug("No shards to rebalance");
|
|
362
|
-
return;
|
|
363
|
-
}
|
|
364
|
-
// Determine which shards to assign and unassign
|
|
365
|
-
const assignments = MutableHashMap.empty();
|
|
366
|
-
const unassignments = MutableHashMap.empty();
|
|
367
|
-
const changes = MutableHashSet.empty();
|
|
368
|
-
for (const group of state.shards.keys()) {
|
|
369
|
-
const [groupAssignments, groupUnassignments, groupChanges] = decideAssignmentsForShards(state, group);
|
|
370
|
-
for (const [address, shards] of groupAssignments) {
|
|
371
|
-
addAllNested(assignments, address, Array.from(shards, id => makeShardId(group, id)));
|
|
372
|
-
}
|
|
373
|
-
for (const [address, shards] of groupUnassignments) {
|
|
374
|
-
addAllNested(unassignments, address, Array.from(shards, id => makeShardId(group, id)));
|
|
375
|
-
}
|
|
376
|
-
for (const address of groupChanges) {
|
|
377
|
-
MutableHashSet.add(changes, address);
|
|
378
|
-
}
|
|
379
|
-
}
|
|
380
|
-
yield* Effect.logDebug(`Rebalancing shards`);
|
|
381
|
-
if (MutableHashSet.size(changes) === 0) return;
|
|
382
|
-
yield* Metric.increment(ClusterMetrics.rebalances);
|
|
383
|
-
// Ping runners first and remove unhealthy ones
|
|
384
|
-
const failedRunners = MutableHashSet.empty();
|
|
385
|
-
for (const address of changes) {
|
|
386
|
-
yield* FiberSet.run(rebalanceFibers, runnersApi.ping(address).pipe(Effect.timeout(config.runnerPingTimeout), Effect.catchAll(() => {
|
|
387
|
-
MutableHashSet.add(failedRunners, address);
|
|
388
|
-
MutableHashMap.remove(assignments, address);
|
|
389
|
-
MutableHashMap.remove(unassignments, address);
|
|
390
|
-
return Effect.void;
|
|
391
|
-
})));
|
|
392
|
-
}
|
|
393
|
-
yield* FiberSet.awaitEmpty(rebalanceFibers);
|
|
394
|
-
const failedUnassignments = new Set();
|
|
395
|
-
for (const [address, shards] of unassignments) {
|
|
396
|
-
yield* FiberSet.run(rebalanceFibers, updateShardsState(shards, Option.none()).pipe(Effect.matchEffect({
|
|
397
|
-
onFailure: () => {
|
|
398
|
-
MutableHashSet.add(failedRunners, address);
|
|
399
|
-
for (const shard of shards) {
|
|
400
|
-
failedUnassignments.add(shard);
|
|
401
|
-
}
|
|
402
|
-
// Remove failed runners from the assignments
|
|
403
|
-
MutableHashMap.remove(assignments, address);
|
|
404
|
-
return Effect.void;
|
|
405
|
-
},
|
|
406
|
-
onSuccess: () => PubSub.publish(events, ShardingEvent.ShardsUnassigned({
|
|
407
|
-
address,
|
|
408
|
-
shards: Array.from(shards)
|
|
409
|
-
}))
|
|
410
|
-
})));
|
|
411
|
-
}
|
|
412
|
-
yield* FiberSet.awaitEmpty(rebalanceFibers);
|
|
413
|
-
// Remove failed shard unassignments from the assignments
|
|
414
|
-
MutableHashMap.forEach(assignments, (shards, address) => {
|
|
415
|
-
for (const shard of failedUnassignments) {
|
|
416
|
-
MutableHashSet.remove(shards, shard);
|
|
417
|
-
}
|
|
418
|
-
if (MutableHashSet.size(shards) === 0) {
|
|
419
|
-
MutableHashMap.remove(assignments, address);
|
|
420
|
-
}
|
|
421
|
-
});
|
|
422
|
-
// Perform the assignments
|
|
423
|
-
for (const [address, shards] of assignments) {
|
|
424
|
-
yield* FiberSet.run(rebalanceFibers, updateShardsState(shards, Option.some(address)).pipe(Effect.matchEffect({
|
|
425
|
-
onFailure: () => {
|
|
426
|
-
MutableHashSet.add(failedRunners, address);
|
|
427
|
-
return Effect.void;
|
|
428
|
-
},
|
|
429
|
-
onSuccess: () => PubSub.publish(events, ShardingEvent.ShardsAssigned({
|
|
430
|
-
address,
|
|
431
|
-
shards: Array.from(shards)
|
|
432
|
-
}))
|
|
433
|
-
})));
|
|
434
|
-
}
|
|
435
|
-
yield* FiberSet.awaitEmpty(rebalanceFibers);
|
|
436
|
-
updateShardMetrics();
|
|
437
|
-
const wereFailures = MutableHashSet.size(failedRunners) > 0;
|
|
438
|
-
if (wereFailures) {
|
|
439
|
-
// Check if the failing runners are still reachable
|
|
440
|
-
yield* Effect.forEach(failedRunners, notifyUnhealthyRunner, {
|
|
441
|
-
discard: true
|
|
442
|
-
}).pipe(Effect.forkIn(scope));
|
|
443
|
-
yield* Effect.logWarning("Failed to rebalance runners: ", failedRunners);
|
|
444
|
-
}
|
|
445
|
-
if (wereFailures) {
|
|
446
|
-
// Try rebalancing again later if there were any failures
|
|
447
|
-
yield* Clock.sleep(config.rebalanceRetryInterval).pipe(Effect.zipRight(rebalance), Effect.forkIn(scope));
|
|
448
|
-
}
|
|
449
|
-
yield* persistAssignments;
|
|
450
|
-
}).pipe(Effect.withSpan("ShardManager.rebalance", {
|
|
451
|
-
captureStackTrace: false
|
|
452
|
-
}));
|
|
453
|
-
const checkRunnerHealth = Effect.suspend(() => Effect.forEach(MutableHashMap.keys(state.allRunners), notifyUnhealthyRunner, {
|
|
454
|
-
concurrency: 10,
|
|
455
|
-
discard: true
|
|
456
|
-
}));
|
|
457
|
-
yield* Effect.addFinalizer(() => persistAssignments.pipe(Effect.catchAllCause(cause => Effect.logWarning("Failed to persist assignments on shutdown", cause)), Effect.zipRight(persistRunners.pipe(Effect.catchAllCause(cause => Effect.logWarning("Failed to persist runners on shutdown", cause))))));
|
|
458
|
-
yield* Effect.forkIn(persistRunners, scope);
|
|
459
|
-
// Start a regular cluster rebalance at the configured interval
|
|
460
|
-
yield* rebalance.pipe(Effect.andThen(Effect.sleep(config.rebalanceInterval)), Effect.forever, Effect.forkIn(scope));
|
|
461
|
-
yield* checkRunnerHealth.pipe(Effect.andThen(Effect.sleep(config.runnerHealthCheckInterval)), Effect.forever, Effect.forkIn(scope));
|
|
462
|
-
yield* Effect.gen(function* () {
|
|
463
|
-
const queue = yield* PubSub.subscribe(events);
|
|
464
|
-
while (true) {
|
|
465
|
-
yield* Effect.logInfo("Shard manager event:", yield* Queue.take(queue));
|
|
466
|
-
}
|
|
467
|
-
}).pipe(Effect.forkIn(scope));
|
|
468
|
-
yield* Effect.logInfo("Shard manager initialized");
|
|
469
|
-
return ShardManager.of({
|
|
470
|
-
getAssignments,
|
|
471
|
-
shardingEvents: address => {
|
|
472
|
-
if (Option.isNone(address)) {
|
|
473
|
-
return PubSub.subscribe(events);
|
|
474
|
-
}
|
|
475
|
-
return Effect.tap(PubSub.subscribe(events), () => {
|
|
476
|
-
const isRegistered = MutableHashMap.has(state.allRunners, address.value);
|
|
477
|
-
if (isRegistered) {
|
|
478
|
-
return runnerHealthApi.onConnection(address.value);
|
|
479
|
-
}
|
|
480
|
-
return Effect.fail(new RunnerNotRegistered({
|
|
481
|
-
address: address.value
|
|
482
|
-
}));
|
|
483
|
-
});
|
|
484
|
-
},
|
|
485
|
-
register,
|
|
486
|
-
unregister,
|
|
487
|
-
rebalance,
|
|
488
|
-
notifyUnhealthyRunner,
|
|
489
|
-
checkRunnerHealth
|
|
490
|
-
});
|
|
491
|
-
});
|
|
492
|
-
/**
|
|
493
|
-
* @since 1.0.0
|
|
494
|
-
* @category layer
|
|
495
|
-
*/
|
|
496
|
-
export const layer = /*#__PURE__*/Layer.scoped(ShardManager, make);
|
|
497
|
-
/**
|
|
498
|
-
* @since 1.0.0
|
|
499
|
-
* @category Server
|
|
500
|
-
*/
|
|
501
|
-
export const layerServerHandlers = /*#__PURE__*/Rpcs.toLayer(/*#__PURE__*/Effect.gen(function* () {
|
|
502
|
-
const shardManager = yield* ShardManager;
|
|
503
|
-
const clock = yield* Effect.clock;
|
|
504
|
-
return {
|
|
505
|
-
Register: ({
|
|
506
|
-
runner
|
|
507
|
-
}) => shardManager.register(runner),
|
|
508
|
-
Unregister: ({
|
|
509
|
-
address
|
|
510
|
-
}) => shardManager.unregister(address),
|
|
511
|
-
NotifyUnhealthyRunner: ({
|
|
512
|
-
address
|
|
513
|
-
}) => shardManager.notifyUnhealthyRunner(address),
|
|
514
|
-
GetAssignments: () => Effect.map(shardManager.getAssignments, assignments => Array.from(assignments)),
|
|
515
|
-
ShardingEvents: Effect.fnUntraced(function* ({
|
|
516
|
-
address
|
|
517
|
-
}) {
|
|
518
|
-
const queue = yield* shardManager.shardingEvents(address);
|
|
519
|
-
const mailbox = yield* Mailbox.make();
|
|
520
|
-
yield* mailbox.offer(ShardingEvent.StreamStarted());
|
|
521
|
-
yield* Queue.takeBetween(queue, 1, Number.MAX_SAFE_INTEGER).pipe(Effect.flatMap(events => mailbox.offerAll(events)), Effect.forever, Effect.forkScoped);
|
|
522
|
-
return mailbox;
|
|
523
|
-
}),
|
|
524
|
-
GetTime: () => clock.currentTimeMillis
|
|
525
|
-
};
|
|
526
|
-
}));
|
|
527
|
-
/**
|
|
528
|
-
* @since 1.0.0
|
|
529
|
-
* @category Server
|
|
530
|
-
*/
|
|
531
|
-
export const layerServer = /*#__PURE__*/RpcServer.layer(Rpcs, {
|
|
532
|
-
spanPrefix: "ShardManager",
|
|
533
|
-
disableTracing: true
|
|
534
|
-
}).pipe(/*#__PURE__*/Layer.provide(layerServerHandlers));
|
|
535
|
-
//# sourceMappingURL=ShardManager.js.map
|