@effect/cluster 0.50.6 → 0.52.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (232) hide show
  1. package/RunnerStorage/package.json +6 -0
  2. package/SqlRunnerStorage/package.json +6 -0
  3. package/dist/cjs/ClusterError.js +2 -24
  4. package/dist/cjs/ClusterError.js.map +1 -1
  5. package/dist/cjs/ClusterMetrics.js +13 -15
  6. package/dist/cjs/ClusterMetrics.js.map +1 -1
  7. package/dist/cjs/ClusterSchema.js +17 -2
  8. package/dist/cjs/ClusterSchema.js.map +1 -1
  9. package/dist/cjs/ClusterWorkflowEngine.js +50 -83
  10. package/dist/cjs/ClusterWorkflowEngine.js.map +1 -1
  11. package/dist/cjs/Entity.js +1 -13
  12. package/dist/cjs/Entity.js.map +1 -1
  13. package/dist/cjs/EntityAddress.js +9 -1
  14. package/dist/cjs/EntityAddress.js.map +1 -1
  15. package/dist/cjs/EntityId.js +7 -1
  16. package/dist/cjs/EntityId.js.map +1 -1
  17. package/dist/cjs/EntityProxy.js +1 -1
  18. package/dist/cjs/EntityProxy.js.map +1 -1
  19. package/dist/cjs/HttpRunner.js +69 -43
  20. package/dist/cjs/HttpRunner.js.map +1 -1
  21. package/dist/cjs/MessageStorage.js +64 -16
  22. package/dist/cjs/MessageStorage.js.map +1 -1
  23. package/dist/cjs/Runner.js +3 -3
  24. package/dist/cjs/Runner.js.map +1 -1
  25. package/dist/cjs/RunnerAddress.js +7 -0
  26. package/dist/cjs/RunnerAddress.js.map +1 -1
  27. package/dist/cjs/RunnerHealth.js +91 -32
  28. package/dist/cjs/RunnerHealth.js.map +1 -1
  29. package/dist/cjs/RunnerServer.js +38 -24
  30. package/dist/cjs/RunnerServer.js.map +1 -1
  31. package/dist/cjs/RunnerStorage.js +100 -0
  32. package/dist/cjs/RunnerStorage.js.map +1 -0
  33. package/dist/cjs/Runners.js +18 -22
  34. package/dist/cjs/Runners.js.map +1 -1
  35. package/dist/cjs/ShardId.js +17 -7
  36. package/dist/cjs/ShardId.js.map +1 -1
  37. package/dist/cjs/Sharding.js +444 -320
  38. package/dist/cjs/Sharding.js.map +1 -1
  39. package/dist/cjs/ShardingConfig.js +10 -14
  40. package/dist/cjs/ShardingConfig.js.map +1 -1
  41. package/dist/cjs/Snowflake.js +1 -1
  42. package/dist/cjs/SocketRunner.js +1 -1
  43. package/dist/cjs/SocketRunner.js.map +1 -1
  44. package/dist/cjs/SqlMessageStorage.js +22 -28
  45. package/dist/cjs/SqlMessageStorage.js.map +1 -1
  46. package/dist/cjs/SqlRunnerStorage.js +375 -0
  47. package/dist/cjs/SqlRunnerStorage.js.map +1 -0
  48. package/dist/cjs/index.js +5 -15
  49. package/dist/cjs/internal/entityManager.js +42 -23
  50. package/dist/cjs/internal/entityManager.js.map +1 -1
  51. package/dist/dts/ClusterError.d.ts +0 -22
  52. package/dist/dts/ClusterError.d.ts.map +1 -1
  53. package/dist/dts/ClusterMetrics.d.ts +4 -14
  54. package/dist/dts/ClusterMetrics.d.ts.map +1 -1
  55. package/dist/dts/ClusterSchema.d.ts +9 -1
  56. package/dist/dts/ClusterSchema.d.ts.map +1 -1
  57. package/dist/dts/ClusterWorkflowEngine.d.ts.map +1 -1
  58. package/dist/dts/Entity.d.ts +3 -14
  59. package/dist/dts/Entity.d.ts.map +1 -1
  60. package/dist/dts/EntityAddress.d.ts +11 -0
  61. package/dist/dts/EntityAddress.d.ts.map +1 -1
  62. package/dist/dts/EntityId.d.ts +5 -0
  63. package/dist/dts/EntityId.d.ts.map +1 -1
  64. package/dist/dts/EntityProxy.d.ts +5 -6
  65. package/dist/dts/EntityProxy.d.ts.map +1 -1
  66. package/dist/dts/HttpRunner.d.ts +48 -25
  67. package/dist/dts/HttpRunner.d.ts.map +1 -1
  68. package/dist/dts/MessageStorage.d.ts +13 -5
  69. package/dist/dts/MessageStorage.d.ts.map +1 -1
  70. package/dist/dts/Runner.d.ts +4 -4
  71. package/dist/dts/Runner.d.ts.map +1 -1
  72. package/dist/dts/RunnerAddress.d.ts +5 -0
  73. package/dist/dts/RunnerAddress.d.ts.map +1 -1
  74. package/dist/dts/RunnerHealth.d.ts +24 -16
  75. package/dist/dts/RunnerHealth.d.ts.map +1 -1
  76. package/dist/dts/RunnerServer.d.ts +5 -4
  77. package/dist/dts/RunnerServer.d.ts.map +1 -1
  78. package/dist/dts/{ShardStorage.d.ts → RunnerStorage.d.ts} +41 -54
  79. package/dist/dts/RunnerStorage.d.ts.map +1 -0
  80. package/dist/dts/Runners.d.ts +15 -11
  81. package/dist/dts/Runners.d.ts.map +1 -1
  82. package/dist/dts/ShardId.d.ts +1 -1
  83. package/dist/dts/ShardId.d.ts.map +1 -1
  84. package/dist/dts/Sharding.d.ts +20 -10
  85. package/dist/dts/Sharding.d.ts.map +1 -1
  86. package/dist/dts/ShardingConfig.d.ts +40 -14
  87. package/dist/dts/ShardingConfig.d.ts.map +1 -1
  88. package/dist/dts/SocketRunner.d.ts +4 -3
  89. package/dist/dts/SocketRunner.d.ts.map +1 -1
  90. package/dist/dts/SqlMessageStorage.d.ts +2 -3
  91. package/dist/dts/SqlMessageStorage.d.ts.map +1 -1
  92. package/dist/dts/SqlRunnerStorage.d.ts +40 -0
  93. package/dist/dts/SqlRunnerStorage.d.ts.map +1 -0
  94. package/dist/dts/index.d.ts +4 -24
  95. package/dist/dts/index.d.ts.map +1 -1
  96. package/dist/esm/ClusterError.js +0 -21
  97. package/dist/esm/ClusterError.js.map +1 -1
  98. package/dist/esm/ClusterMetrics.js +12 -14
  99. package/dist/esm/ClusterMetrics.js.map +1 -1
  100. package/dist/esm/ClusterSchema.js +17 -2
  101. package/dist/esm/ClusterSchema.js.map +1 -1
  102. package/dist/esm/ClusterWorkflowEngine.js +50 -83
  103. package/dist/esm/ClusterWorkflowEngine.js.map +1 -1
  104. package/dist/esm/Entity.js +0 -12
  105. package/dist/esm/Entity.js.map +1 -1
  106. package/dist/esm/EntityAddress.js +7 -0
  107. package/dist/esm/EntityAddress.js.map +1 -1
  108. package/dist/esm/EntityId.js +5 -0
  109. package/dist/esm/EntityId.js.map +1 -1
  110. package/dist/esm/EntityProxy.js +2 -2
  111. package/dist/esm/EntityProxy.js.map +1 -1
  112. package/dist/esm/HttpRunner.js +62 -39
  113. package/dist/esm/HttpRunner.js.map +1 -1
  114. package/dist/esm/MessageStorage.js +65 -17
  115. package/dist/esm/MessageStorage.js.map +1 -1
  116. package/dist/esm/Runner.js +3 -3
  117. package/dist/esm/Runner.js.map +1 -1
  118. package/dist/esm/RunnerAddress.js +7 -0
  119. package/dist/esm/RunnerAddress.js.map +1 -1
  120. package/dist/esm/RunnerHealth.js +88 -30
  121. package/dist/esm/RunnerHealth.js.map +1 -1
  122. package/dist/esm/RunnerServer.js +38 -24
  123. package/dist/esm/RunnerServer.js.map +1 -1
  124. package/dist/esm/RunnerStorage.js +90 -0
  125. package/dist/esm/RunnerStorage.js.map +1 -0
  126. package/dist/esm/Runners.js +19 -23
  127. package/dist/esm/Runners.js.map +1 -1
  128. package/dist/esm/ShardId.js +16 -6
  129. package/dist/esm/ShardId.js.map +1 -1
  130. package/dist/esm/Sharding.js +447 -323
  131. package/dist/esm/Sharding.js.map +1 -1
  132. package/dist/esm/ShardingConfig.js +10 -14
  133. package/dist/esm/ShardingConfig.js.map +1 -1
  134. package/dist/esm/Snowflake.js +1 -1
  135. package/dist/esm/SocketRunner.js +1 -1
  136. package/dist/esm/SocketRunner.js.map +1 -1
  137. package/dist/esm/SqlMessageStorage.js +22 -28
  138. package/dist/esm/SqlMessageStorage.js.map +1 -1
  139. package/dist/esm/SqlRunnerStorage.js +366 -0
  140. package/dist/esm/SqlRunnerStorage.js.map +1 -0
  141. package/dist/esm/index.js +4 -24
  142. package/dist/esm/index.js.map +1 -1
  143. package/dist/esm/internal/entityManager.js +41 -22
  144. package/dist/esm/internal/entityManager.js.map +1 -1
  145. package/package.json +20 -60
  146. package/src/ClusterError.ts +0 -24
  147. package/src/ClusterMetrics.ts +12 -16
  148. package/src/ClusterSchema.ts +17 -2
  149. package/src/ClusterWorkflowEngine.ts +48 -80
  150. package/src/Entity.ts +3 -21
  151. package/src/EntityAddress.ts +10 -0
  152. package/src/EntityId.ts +6 -0
  153. package/src/EntityProxy.ts +10 -10
  154. package/src/HttpRunner.ts +132 -67
  155. package/src/MessageStorage.ts +89 -24
  156. package/src/Runner.ts +4 -4
  157. package/src/RunnerAddress.ts +8 -0
  158. package/src/RunnerHealth.ts +119 -56
  159. package/src/RunnerServer.ts +64 -47
  160. package/src/RunnerStorage.ts +218 -0
  161. package/src/Runners.ts +32 -45
  162. package/src/ShardId.ts +14 -3
  163. package/src/Sharding.ts +561 -417
  164. package/src/ShardingConfig.ts +39 -31
  165. package/src/Snowflake.ts +1 -1
  166. package/src/SocketRunner.ts +6 -4
  167. package/src/SqlMessageStorage.ts +28 -30
  168. package/src/SqlRunnerStorage.ts +537 -0
  169. package/src/index.ts +4 -29
  170. package/src/internal/entityManager.ts +45 -29
  171. package/HttpCommon/package.json +0 -6
  172. package/HttpShardManager/package.json +0 -6
  173. package/ShardManager/package.json +0 -6
  174. package/ShardStorage/package.json +0 -6
  175. package/SocketShardManager/package.json +0 -6
  176. package/SqlShardStorage/package.json +0 -6
  177. package/SynchronizedClock/package.json +0 -6
  178. package/dist/cjs/HttpCommon.js +0 -48
  179. package/dist/cjs/HttpCommon.js.map +0 -1
  180. package/dist/cjs/HttpShardManager.js +0 -139
  181. package/dist/cjs/HttpShardManager.js.map +0 -1
  182. package/dist/cjs/ShardManager.js +0 -549
  183. package/dist/cjs/ShardManager.js.map +0 -1
  184. package/dist/cjs/ShardStorage.js +0 -151
  185. package/dist/cjs/ShardStorage.js.map +0 -1
  186. package/dist/cjs/SocketShardManager.js +0 -32
  187. package/dist/cjs/SocketShardManager.js.map +0 -1
  188. package/dist/cjs/SqlShardStorage.js +0 -253
  189. package/dist/cjs/SqlShardStorage.js.map +0 -1
  190. package/dist/cjs/SynchronizedClock.js +0 -65
  191. package/dist/cjs/SynchronizedClock.js.map +0 -1
  192. package/dist/cjs/internal/shardManager.js +0 -353
  193. package/dist/cjs/internal/shardManager.js.map +0 -1
  194. package/dist/dts/HttpCommon.d.ts +0 -25
  195. package/dist/dts/HttpCommon.d.ts.map +0 -1
  196. package/dist/dts/HttpShardManager.d.ts +0 -119
  197. package/dist/dts/HttpShardManager.d.ts.map +0 -1
  198. package/dist/dts/ShardManager.d.ts +0 -459
  199. package/dist/dts/ShardManager.d.ts.map +0 -1
  200. package/dist/dts/ShardStorage.d.ts.map +0 -1
  201. package/dist/dts/SocketShardManager.d.ts +0 -17
  202. package/dist/dts/SocketShardManager.d.ts.map +0 -1
  203. package/dist/dts/SqlShardStorage.d.ts +0 -38
  204. package/dist/dts/SqlShardStorage.d.ts.map +0 -1
  205. package/dist/dts/SynchronizedClock.d.ts +0 -19
  206. package/dist/dts/SynchronizedClock.d.ts.map +0 -1
  207. package/dist/dts/internal/shardManager.d.ts +0 -2
  208. package/dist/dts/internal/shardManager.d.ts.map +0 -1
  209. package/dist/esm/HttpCommon.js +0 -38
  210. package/dist/esm/HttpCommon.js.map +0 -1
  211. package/dist/esm/HttpShardManager.js +0 -128
  212. package/dist/esm/HttpShardManager.js.map +0 -1
  213. package/dist/esm/ShardManager.js +0 -535
  214. package/dist/esm/ShardManager.js.map +0 -1
  215. package/dist/esm/ShardStorage.js +0 -141
  216. package/dist/esm/ShardStorage.js.map +0 -1
  217. package/dist/esm/SocketShardManager.js +0 -24
  218. package/dist/esm/SocketShardManager.js.map +0 -1
  219. package/dist/esm/SqlShardStorage.js +0 -244
  220. package/dist/esm/SqlShardStorage.js.map +0 -1
  221. package/dist/esm/SynchronizedClock.js +0 -57
  222. package/dist/esm/SynchronizedClock.js.map +0 -1
  223. package/dist/esm/internal/shardManager.js +0 -342
  224. package/dist/esm/internal/shardManager.js.map +0 -1
  225. package/src/HttpCommon.ts +0 -73
  226. package/src/HttpShardManager.ts +0 -273
  227. package/src/ShardManager.ts +0 -823
  228. package/src/ShardStorage.ts +0 -297
  229. package/src/SocketShardManager.ts +0 -48
  230. package/src/SqlShardStorage.ts +0 -329
  231. package/src/SynchronizedClock.ts +0 -82
  232. package/src/internal/shardManager.ts +0 -412
@@ -1,823 +0,0 @@
1
- /**
2
- * @since 1.0.0
3
- */
4
- import * as Rpc from "@effect/rpc/Rpc"
5
- import * as RpcClient from "@effect/rpc/RpcClient"
6
- import * as RpcGroup from "@effect/rpc/RpcGroup"
7
- import * as RpcServer from "@effect/rpc/RpcServer"
8
- import * as Arr from "effect/Array"
9
- import * as Clock from "effect/Clock"
10
- import * as Config_ from "effect/Config"
11
- import type { ConfigError } from "effect/ConfigError"
12
- import * as ConfigProvider from "effect/ConfigProvider"
13
- import * as Context from "effect/Context"
14
- import * as Data from "effect/Data"
15
- import * as Deferred from "effect/Deferred"
16
- import * as Duration from "effect/Duration"
17
- import * as Effect from "effect/Effect"
18
- import * as Equal from "effect/Equal"
19
- import * as FiberSet from "effect/FiberSet"
20
- import { identity } from "effect/Function"
21
- import * as Iterable from "effect/Iterable"
22
- import * as Layer from "effect/Layer"
23
- import * as Mailbox from "effect/Mailbox"
24
- import * as Metric from "effect/Metric"
25
- import * as MetricLabel from "effect/MetricLabel"
26
- import * as MutableHashMap from "effect/MutableHashMap"
27
- import * as MutableHashSet from "effect/MutableHashSet"
28
- import * as Option from "effect/Option"
29
- import * as PubSub from "effect/PubSub"
30
- import * as Queue from "effect/Queue"
31
- import * as Schedule from "effect/Schedule"
32
- import * as Schema from "effect/Schema"
33
- import type { Scope } from "effect/Scope"
34
- import { RunnerNotRegistered } from "./ClusterError.js"
35
- import * as ClusterMetrics from "./ClusterMetrics.js"
36
- import { addAllNested, decideAssignmentsForShards, State } from "./internal/shardManager.js"
37
- import * as MachineId from "./MachineId.js"
38
- import { Runner } from "./Runner.js"
39
- import { RunnerAddress } from "./RunnerAddress.js"
40
- import { RunnerHealth } from "./RunnerHealth.js"
41
- import { RpcClientProtocol, Runners } from "./Runners.js"
42
- import { make as makeShardId, ShardId } from "./ShardId.js"
43
- import { ShardingConfig } from "./ShardingConfig.js"
44
- import { ShardStorage } from "./ShardStorage.js"
45
-
46
- /**
47
- * @since 1.0.0
48
- * @category models
49
- */
50
- export class ShardManager extends Context.Tag("@effect/cluster/ShardManager")<ShardManager, {
51
- /**
52
- * Get all shard assignments.
53
- */
54
- readonly getAssignments: Effect.Effect<
55
- Iterable<readonly [ShardId, Option.Option<RunnerAddress>]>
56
- >
57
- /**
58
- * Get a stream of sharding events emit by the shard manager.
59
- */
60
- readonly shardingEvents: (
61
- address: Option.Option<RunnerAddress>
62
- ) => Effect.Effect<Queue.Dequeue<ShardingEvent>, RunnerNotRegistered, Scope>
63
- /**
64
- * Register a new runner with the cluster.
65
- */
66
- readonly register: (runner: Runner) => Effect.Effect<MachineId.MachineId>
67
- /**
68
- * Unregister a runner from the cluster.
69
- */
70
- readonly unregister: (address: RunnerAddress) => Effect.Effect<void>
71
- /**
72
- * Rebalance shards assigned to runners within the cluster.
73
- */
74
- readonly rebalance: Effect.Effect<void>
75
- /**
76
- * Notify the cluster of an unhealthy runner.
77
- */
78
- readonly notifyUnhealthyRunner: (address: RunnerAddress) => Effect.Effect<void>
79
- /**
80
- * Check and repot on the health of all runners in the cluster.
81
- */
82
- readonly checkRunnerHealth: Effect.Effect<void>
83
- }>() {}
84
-
85
- /**
86
- * @since 1.0.0
87
- * @category Config
88
- */
89
- export class Config extends Context.Tag("@effect/cluster/ShardManager/Config")<Config, {
90
- /**
91
- * The duration to wait before rebalancing shards after a change.
92
- */
93
- readonly rebalanceDebounce: Duration.DurationInput
94
- /**
95
- * The interval on which regular rebalancing of shards will occur.
96
- */
97
- readonly rebalanceInterval: Duration.DurationInput
98
- /**
99
- * The interval on which rebalancing of shards which failed to be
100
- * rebalanced will be retried.
101
- */
102
- readonly rebalanceRetryInterval: Duration.DurationInput
103
- /**
104
- * The maximum ratio of shards to rebalance at once.
105
- *
106
- * **Note**: this value should be a number between `0` and `1`.
107
- */
108
- readonly rebalanceRate: number
109
- /**
110
- * The interval on which persistence of Runners will be retried if it fails.
111
- */
112
- readonly persistRetryInterval: Duration.DurationInput
113
- /**
114
- * The number of times persistence of Runners will be retried if it fails.
115
- */
116
- readonly persistRetryCount: number
117
- /**
118
- * The interval on which Runner health will be checked.
119
- */
120
- readonly runnerHealthCheckInterval: Duration.DurationInput
121
- /**
122
- * The length of time to wait for a Runner to respond to a ping.
123
- */
124
- readonly runnerPingTimeout: Duration.DurationInput
125
- }>() {
126
- /**
127
- * @since 1.0.0
128
- */
129
- static readonly defaults: Config["Type"] = {
130
- rebalanceDebounce: Duration.seconds(3),
131
- rebalanceInterval: Duration.seconds(20),
132
- rebalanceRetryInterval: Duration.seconds(10),
133
- rebalanceRate: 2 / 100,
134
- persistRetryCount: 100,
135
- persistRetryInterval: Duration.seconds(3),
136
- runnerHealthCheckInterval: Duration.minutes(1),
137
- runnerPingTimeout: Duration.seconds(3)
138
- }
139
- }
140
-
141
- /**
142
- * @since 1.0.0
143
- * @category Config
144
- */
145
- export const configConfig: Config_.Config<Config["Type"]> = Config_.all({
146
- rebalanceDebounce: Config_.duration("rebalanceDebounce").pipe(
147
- Config_.withDefault(Config.defaults.rebalanceDebounce),
148
- Config_.withDescription("The duration to wait before rebalancing shards after a change.")
149
- ),
150
- rebalanceInterval: Config_.duration("rebalanceInterval").pipe(
151
- Config_.withDefault(Config.defaults.rebalanceInterval),
152
- Config_.withDescription("The interval on which regular rebalancing of shards will occur.")
153
- ),
154
- rebalanceRetryInterval: Config_.duration("rebalanceRetryInterval").pipe(
155
- Config_.withDefault(Config.defaults.rebalanceRetryInterval),
156
- Config_.withDescription(
157
- "The interval on which rebalancing of shards which failed to be rebalanced will be retried."
158
- )
159
- ),
160
- rebalanceRate: Config_.number("rebalanceRate").pipe(
161
- Config_.withDefault(Config.defaults.rebalanceRate),
162
- Config_.withDescription("The maximum ratio of shards to rebalance at once.")
163
- ),
164
- persistRetryCount: Config_.integer("persistRetryCount").pipe(
165
- Config_.withDefault(Config.defaults.persistRetryCount),
166
- Config_.withDescription("The number of times persistence of runners will be retried if it fails.")
167
- ),
168
- persistRetryInterval: Config_.duration("persistRetryInterval").pipe(
169
- Config_.withDefault(Config.defaults.persistRetryInterval),
170
- Config_.withDescription("The interval on which persistence of runners will be retried if it fails.")
171
- ),
172
- runnerHealthCheckInterval: Config_.duration("runnerHealthCheckInterval").pipe(
173
- Config_.withDefault(Config.defaults.runnerHealthCheckInterval),
174
- Config_.withDescription("The interval on which runner health will be checked.")
175
- ),
176
- runnerPingTimeout: Config_.duration("runnerPingTimeout").pipe(
177
- Config_.withDefault(Config.defaults.runnerPingTimeout),
178
- Config_.withDescription("The length of time to wait for a runner to respond to a ping.")
179
- )
180
- })
181
-
182
- /**
183
- * @since 1.0.0
184
- * @category Config
185
- */
186
- export const configFromEnv: Effect.Effect<Config["Type"], ConfigError> = configConfig.pipe(
187
- Effect.withConfigProvider(
188
- ConfigProvider.fromEnv().pipe(
189
- ConfigProvider.constantCase
190
- )
191
- )
192
- )
193
-
194
- /**
195
- * @since 1.0.0
196
- * @category Config
197
- */
198
- export const layerConfig = (config?: Partial<Config["Type"]> | undefined): Layer.Layer<Config> =>
199
- Layer.succeed(Config, {
200
- ...Config.defaults,
201
- ...config
202
- })
203
-
204
- /**
205
- * @since 1.0.0
206
- * @category Config
207
- */
208
- export const layerConfigFromEnv = (config?: Partial<Config["Type"]> | undefined): Layer.Layer<Config, ConfigError> =>
209
- Layer.effect(Config, config ? Effect.map(configFromEnv, (env) => ({ ...env, ...config })) : configFromEnv)
210
-
211
- /**
212
- * Represents a client which can be used to communicate with the
213
- * `ShardManager`.
214
- *
215
- * @since 1.0.0
216
- * @category Client
217
- */
218
- export class ShardManagerClient
219
- extends Context.Tag("@effect/cluster/ShardManager/ShardManagerClient")<ShardManagerClient, {
220
- /**
221
- * Register a new runner with the cluster.
222
- */
223
- readonly register: (address: RunnerAddress, groups: ReadonlyArray<string>) => Effect.Effect<MachineId.MachineId>
224
- /**
225
- * Unregister a runner from the cluster.
226
- */
227
- readonly unregister: (address: RunnerAddress) => Effect.Effect<void>
228
- /**
229
- * Notify the cluster of an unhealthy runner.
230
- */
231
- readonly notifyUnhealthyRunner: (address: RunnerAddress) => Effect.Effect<void>
232
- /**
233
- * Get all shard assignments.
234
- */
235
- readonly getAssignments: Effect.Effect<
236
- Iterable<readonly [ShardId, Option.Option<RunnerAddress>]>
237
- >
238
- /**
239
- * Get a stream of sharding events emit by the shard manager.
240
- */
241
- readonly shardingEvents: (
242
- address: Option.Option<RunnerAddress>
243
- ) => Effect.Effect<Mailbox.ReadonlyMailbox<ShardingEvent>, never, Scope>
244
- /**
245
- * Get the current time on the shard manager.
246
- */
247
- readonly getTime: Effect.Effect<number>
248
- }>()
249
- {}
250
-
251
- /**
252
- * @since 1.0.0
253
- * @category models
254
- */
255
- export const ShardingEventSchema = Schema.Union(
256
- Schema.TaggedStruct("StreamStarted", {}),
257
- Schema.TaggedStruct("ShardsAssigned", {
258
- address: RunnerAddress,
259
- shards: Schema.Array(ShardId)
260
- }),
261
- Schema.TaggedStruct("ShardsUnassigned", {
262
- address: RunnerAddress,
263
- shards: Schema.Array(ShardId)
264
- }),
265
- Schema.TaggedStruct("RunnerRegistered", {
266
- address: RunnerAddress
267
- }),
268
- Schema.TaggedStruct("RunnerUnregistered", {
269
- address: RunnerAddress
270
- })
271
- ) satisfies Schema.Schema<ShardingEvent, any>
272
-
273
- /**
274
- * The messaging protocol for the `ShardManager`.
275
- *
276
- * @since 1.0.0
277
- * @category Rpcs
278
- */
279
- export class Rpcs extends RpcGroup.make(
280
- Rpc.make("Register", {
281
- payload: { runner: Runner },
282
- success: MachineId.MachineId
283
- }),
284
- Rpc.make("Unregister", {
285
- payload: { address: RunnerAddress }
286
- }),
287
- Rpc.make("NotifyUnhealthyRunner", {
288
- payload: { address: RunnerAddress }
289
- }),
290
- Rpc.make("GetAssignments", {
291
- success: Schema.Array(Schema.Tuple(ShardId, Schema.Option(RunnerAddress)))
292
- }),
293
- Rpc.make("ShardingEvents", {
294
- payload: { address: Schema.Option(RunnerAddress) },
295
- success: ShardingEventSchema,
296
- error: RunnerNotRegistered,
297
- stream: true
298
- }),
299
- Rpc.make("GetTime", {
300
- success: Schema.Number
301
- })
302
- ) {}
303
-
304
- /**
305
- * @since 1.0.0
306
- * @category models
307
- */
308
- export type ShardingEvent = Data.TaggedEnum<{
309
- StreamStarted: {}
310
- ShardsAssigned: {
311
- address: RunnerAddress
312
- shards: ReadonlyArray<ShardId>
313
- }
314
- ShardsUnassigned: {
315
- address: RunnerAddress
316
- shards: ReadonlyArray<ShardId>
317
- }
318
- RunnerRegistered: { address: RunnerAddress }
319
- RunnerUnregistered: { address: RunnerAddress }
320
- }>
321
-
322
- /**
323
- * @since 1.0.0
324
- * @category models
325
- */
326
- export const ShardingEvent = Data.taggedEnum<ShardingEvent>()
327
-
328
- /**
329
- * @since 1.0.0
330
- * @category Client
331
- */
332
- export const makeClientLocal = Effect.gen(function*() {
333
- const config = yield* ShardingConfig
334
- const clock = yield* Effect.clock
335
-
336
- const groups = new Set<string>()
337
- const shards = MutableHashMap.empty<ShardId, Option.Option<RunnerAddress>>()
338
-
339
- let machineId = 0
340
-
341
- return ShardManagerClient.of({
342
- register: (_, groupsToAdd) =>
343
- Effect.sync(() => {
344
- for (const group of groupsToAdd) {
345
- if (groups.has(group)) continue
346
- groups.add(group)
347
- for (let n = 1; n <= config.shardsPerGroup; n++) {
348
- MutableHashMap.set(shards, makeShardId(group, n), config.runnerAddress)
349
- }
350
- }
351
- return MachineId.make(++machineId)
352
- }),
353
- unregister: () => Effect.void,
354
- notifyUnhealthyRunner: () => Effect.void,
355
- getAssignments: Effect.succeed(shards),
356
- shardingEvents: Effect.fnUntraced(function*(_address) {
357
- const mailbox = yield* Mailbox.make<ShardingEvent>()
358
- yield* mailbox.offer(ShardingEvent.StreamStarted())
359
- return mailbox
360
- }),
361
- getTime: clock.currentTimeMillis
362
- })
363
- })
364
-
365
- /**
366
- * @since 1.0.0
367
- * @category Client
368
- */
369
- export const makeClientRpc: Effect.Effect<
370
- ShardManagerClient["Type"],
371
- never,
372
- ShardingConfig | RpcClient.Protocol | Scope
373
- > = Effect.gen(function*() {
374
- const config = yield* ShardingConfig
375
- const client = yield* RpcClient.make(Rpcs, {
376
- spanPrefix: "ShardManagerClient",
377
- disableTracing: true
378
- })
379
-
380
- return ShardManagerClient.of({
381
- register: (address, groups) =>
382
- client.Register({ runner: Runner.make({ address, version: config.serverVersion, groups }) }).pipe(
383
- Effect.orDie
384
- ),
385
- unregister: (address) => Effect.orDie(client.Unregister({ address })),
386
- notifyUnhealthyRunner: (address) => Effect.orDie(client.NotifyUnhealthyRunner({ address })),
387
- getAssignments: Effect.orDie(client.GetAssignments()),
388
- shardingEvents: (address) =>
389
- Mailbox.make<ShardingEvent>().pipe(
390
- Effect.tap(Effect.fnUntraced(
391
- function*(mailbox) {
392
- const events = yield* client.ShardingEvents({ address }, { asMailbox: true })
393
- const take = Effect.orDie(events.takeAll)
394
- while (true) {
395
- mailbox.unsafeOfferAll((yield* take)[0])
396
- }
397
- },
398
- (effect, mb) => Mailbox.into(effect, mb),
399
- Effect.forkScoped
400
- ))
401
- ),
402
- getTime: Effect.orDie(client.GetTime())
403
- })
404
- })
405
-
406
- /**
407
- * @since 1.0.0
408
- * @category Client
409
- */
410
- export const layerClientLocal: Layer.Layer<
411
- ShardManagerClient,
412
- never,
413
- ShardingConfig
414
- > = Layer.effect(ShardManagerClient, makeClientLocal)
415
-
416
- /**
417
- * @since 1.0.0
418
- * @category Client
419
- */
420
- export const layerClientRpc: Layer.Layer<
421
- ShardManagerClient,
422
- never,
423
- ShardingConfig | RpcClientProtocol
424
- > = Layer.scoped(ShardManagerClient, makeClientRpc).pipe(
425
- Layer.provide(Layer.scoped(
426
- RpcClient.Protocol,
427
- Effect.gen(function*() {
428
- const config = yield* ShardingConfig
429
- const clientProtocol = yield* RpcClientProtocol
430
- return yield* clientProtocol(config.shardManagerAddress)
431
- })
432
- ))
433
- )
434
-
435
- /**
436
- * @since 1.0.0
437
- * @category Constructors
438
- */
439
- export const make = Effect.gen(function*() {
440
- const storage = yield* ShardStorage
441
- const runnersApi = yield* Runners
442
- const runnerHealthApi = yield* RunnerHealth
443
- const clock = yield* Effect.clock
444
- const config = yield* Config
445
- const shardingConfig = yield* ShardingConfig
446
-
447
- const state = yield* Effect.orDie(State.fromStorage(shardingConfig.shardsPerGroup))
448
- const scope = yield* Effect.scope
449
- const events = yield* PubSub.unbounded<ShardingEvent>()
450
-
451
- function updateRunnerMetrics() {
452
- ClusterMetrics.runners.unsafeUpdate(MutableHashMap.size(state.allRunners), [])
453
- }
454
-
455
- function updateShardMetrics() {
456
- const stats = state.shardStats
457
- for (const [address, shardCount] of stats.perRunner) {
458
- ClusterMetrics.assignedShards.unsafeUpdate(
459
- shardCount,
460
- [MetricLabel.make("address", address)]
461
- )
462
- }
463
- ClusterMetrics.unassignedShards.unsafeUpdate(stats.unassigned, [])
464
- }
465
- updateShardMetrics()
466
-
467
- function withRetry<A, E, R>(effect: Effect.Effect<A, E, R>): Effect.Effect<void, never, R> {
468
- return effect.pipe(
469
- Effect.retry({
470
- schedule: Schedule.spaced(config.persistRetryCount),
471
- times: config.persistRetryCount
472
- }),
473
- Effect.ignore
474
- )
475
- }
476
-
477
- const persistRunners = Effect.unsafeMakeSemaphore(1).withPermits(1)(withRetry(
478
- Effect.suspend(() =>
479
- storage.saveRunners(
480
- Iterable.map(state.allRunners, ([address, runner]) => [address, runner.runner])
481
- )
482
- )
483
- ))
484
-
485
- const persistAssignments = Effect.unsafeMakeSemaphore(1).withPermits(1)(withRetry(
486
- Effect.suspend(() => storage.saveAssignments(state.assignments))
487
- ))
488
-
489
- const notifyUnhealthyRunner = Effect.fnUntraced(function*(address: RunnerAddress) {
490
- if (!MutableHashMap.has(state.allRunners, address)) return
491
-
492
- if (!(yield* runnerHealthApi.isAlive(address))) {
493
- yield* Effect.logWarning(`Runner at address '${address.toString()}' is not alive`)
494
- yield* unregister(address)
495
- }
496
- })
497
-
498
- function updateShardsState(
499
- shards: Iterable<ShardId>,
500
- address: Option.Option<RunnerAddress>
501
- ): Effect.Effect<void, RunnerNotRegistered> {
502
- return Effect.suspend(() => {
503
- if (Option.isSome(address) && !MutableHashMap.has(state.allRunners, address.value)) {
504
- return Effect.fail(new RunnerNotRegistered({ address: address.value }))
505
- }
506
- state.addAssignments(shards, address)
507
- return Effect.void
508
- })
509
- }
510
-
511
- const getAssignments = Effect.sync(() => state.assignments)
512
-
513
- let machineId = 0
514
- const register = Effect.fnUntraced(function*(runner: Runner) {
515
- yield* Effect.logInfo(`Registering runner ${Runner.pretty(runner)}`)
516
-
517
- const current = MutableHashMap.get(state.allRunners, runner.address).pipe(
518
- Option.filter((r) => r.runner.version === runner.version)
519
- )
520
- if (Option.isSome(current)) {
521
- return MachineId.make(++machineId)
522
- }
523
-
524
- state.addRunner(runner, clock.unsafeCurrentTimeMillis())
525
- updateRunnerMetrics()
526
- yield* PubSub.publish(events, ShardingEvent.RunnerRegistered({ address: runner.address }))
527
- yield* Effect.forkIn(persistRunners, scope)
528
- yield* Effect.forkIn(rebalance, scope)
529
- return MachineId.make(++machineId)
530
- })
531
-
532
- const unregister = Effect.fnUntraced(function*(address: RunnerAddress) {
533
- if (!MutableHashMap.has(state.allRunners, address)) return
534
-
535
- yield* Effect.logInfo("Unregistering runner at address:", address)
536
- const unassignments = Arr.empty<ShardId>()
537
- for (const [shard, runner] of state.assignments) {
538
- if (Option.isSome(runner) && Equal.equals(runner.value, address)) {
539
- unassignments.push(shard)
540
- }
541
- }
542
- state.addAssignments(unassignments, Option.none())
543
- state.removeRunner(address)
544
- updateRunnerMetrics()
545
-
546
- if (unassignments.length > 0) {
547
- yield* PubSub.publish(events, ShardingEvent.RunnerUnregistered({ address }))
548
- }
549
-
550
- yield* Effect.forkIn(persistRunners, scope)
551
- yield* Effect.forkIn(rebalance, scope)
552
- })
553
-
554
- let rebalancing = false
555
- let rebalanceDeferred: Deferred.Deferred<void> | undefined
556
- const rebalanceFibers = yield* FiberSet.make()
557
-
558
- const rebalance = Effect.withFiberRuntime<void>((fiber) => {
559
- if (!rebalancing) {
560
- rebalancing = true
561
- return rebalanceLoop
562
- }
563
- if (!rebalanceDeferred) {
564
- rebalanceDeferred = Deferred.unsafeMake(fiber.id())
565
- }
566
- return Deferred.await(rebalanceDeferred)
567
- })
568
-
569
- const rebalanceLoop: Effect.Effect<void> = Effect.suspend(() => {
570
- const deferred = rebalanceDeferred
571
- rebalanceDeferred = undefined
572
- return runRebalance.pipe(
573
- deferred ? Effect.intoDeferred(deferred) : identity,
574
- Effect.onExit(() => {
575
- if (!rebalanceDeferred) {
576
- rebalancing = false
577
- return Effect.void
578
- }
579
- return Effect.forkIn(rebalanceLoop, scope)
580
- })
581
- )
582
- })
583
-
584
- const runRebalance = Effect.gen(function*() {
585
- yield* Effect.sleep(config.rebalanceDebounce)
586
-
587
- if (state.shards.size === 0) {
588
- yield* Effect.logDebug("No shards to rebalance")
589
- return
590
- }
591
-
592
- // Determine which shards to assign and unassign
593
- const assignments = MutableHashMap.empty<RunnerAddress, MutableHashSet.MutableHashSet<ShardId>>()
594
- const unassignments = MutableHashMap.empty<RunnerAddress, MutableHashSet.MutableHashSet<ShardId>>()
595
- const changes = MutableHashSet.empty<RunnerAddress>()
596
- for (const group of state.shards.keys()) {
597
- const [groupAssignments, groupUnassignments, groupChanges] = decideAssignmentsForShards(state, group)
598
- for (const [address, shards] of groupAssignments) {
599
- addAllNested(assignments, address, Array.from(shards, (id) => makeShardId(group, id)))
600
- }
601
- for (const [address, shards] of groupUnassignments) {
602
- addAllNested(unassignments, address, Array.from(shards, (id) => makeShardId(group, id)))
603
- }
604
- for (const address of groupChanges) {
605
- MutableHashSet.add(changes, address)
606
- }
607
- }
608
-
609
- yield* Effect.logDebug(`Rebalancing shards`)
610
-
611
- if (MutableHashSet.size(changes) === 0) return
612
-
613
- yield* Metric.increment(ClusterMetrics.rebalances)
614
-
615
- // Ping runners first and remove unhealthy ones
616
- const failedRunners = MutableHashSet.empty<RunnerAddress>()
617
- for (const address of changes) {
618
- yield* FiberSet.run(
619
- rebalanceFibers,
620
- runnersApi.ping(address).pipe(
621
- Effect.timeout(config.runnerPingTimeout),
622
- Effect.catchAll(() => {
623
- MutableHashSet.add(failedRunners, address)
624
- MutableHashMap.remove(assignments, address)
625
- MutableHashMap.remove(unassignments, address)
626
- return Effect.void
627
- })
628
- )
629
- )
630
- }
631
- yield* FiberSet.awaitEmpty(rebalanceFibers)
632
-
633
- const failedUnassignments = new Set<ShardId>()
634
- for (const [address, shards] of unassignments) {
635
- yield* FiberSet.run(
636
- rebalanceFibers,
637
- updateShardsState(shards, Option.none()).pipe(
638
- Effect.matchEffect({
639
- onFailure: () => {
640
- MutableHashSet.add(failedRunners, address)
641
- for (const shard of shards) {
642
- failedUnassignments.add(shard)
643
- }
644
- // Remove failed runners from the assignments
645
- MutableHashMap.remove(assignments, address)
646
- return Effect.void
647
- },
648
- onSuccess: () =>
649
- PubSub.publish(events, ShardingEvent.ShardsUnassigned({ address, shards: Array.from(shards) }))
650
- })
651
- )
652
- )
653
- }
654
- yield* FiberSet.awaitEmpty(rebalanceFibers)
655
-
656
- // Remove failed shard unassignments from the assignments
657
- MutableHashMap.forEach(assignments, (shards, address) => {
658
- for (const shard of failedUnassignments) {
659
- MutableHashSet.remove(shards, shard)
660
- }
661
- if (MutableHashSet.size(shards) === 0) {
662
- MutableHashMap.remove(assignments, address)
663
- }
664
- })
665
-
666
- // Perform the assignments
667
- for (const [address, shards] of assignments) {
668
- yield* FiberSet.run(
669
- rebalanceFibers,
670
- updateShardsState(shards, Option.some(address)).pipe(
671
- Effect.matchEffect({
672
- onFailure: () => {
673
- MutableHashSet.add(failedRunners, address)
674
- return Effect.void
675
- },
676
- onSuccess: () =>
677
- PubSub.publish(events, ShardingEvent.ShardsAssigned({ address, shards: Array.from(shards) }))
678
- })
679
- )
680
- )
681
- }
682
- yield* FiberSet.awaitEmpty(rebalanceFibers)
683
-
684
- updateShardMetrics()
685
-
686
- const wereFailures = MutableHashSet.size(failedRunners) > 0
687
- if (wereFailures) {
688
- // Check if the failing runners are still reachable
689
- yield* Effect.forEach(failedRunners, notifyUnhealthyRunner, { discard: true }).pipe(
690
- Effect.forkIn(scope)
691
- )
692
- yield* Effect.logWarning("Failed to rebalance runners: ", failedRunners)
693
- }
694
-
695
- if (wereFailures) {
696
- // Try rebalancing again later if there were any failures
697
- yield* Clock.sleep(config.rebalanceRetryInterval).pipe(
698
- Effect.zipRight(rebalance),
699
- Effect.forkIn(scope)
700
- )
701
- }
702
-
703
- yield* persistAssignments
704
- }).pipe(Effect.withSpan("ShardManager.rebalance", { captureStackTrace: false }))
705
-
706
- const checkRunnerHealth: Effect.Effect<void> = Effect.suspend(() =>
707
- Effect.forEach(MutableHashMap.keys(state.allRunners), notifyUnhealthyRunner, {
708
- concurrency: 10,
709
- discard: true
710
- })
711
- )
712
-
713
- yield* Effect.addFinalizer(() =>
714
- persistAssignments.pipe(
715
- Effect.catchAllCause((cause) => Effect.logWarning("Failed to persist assignments on shutdown", cause)),
716
- Effect.zipRight(persistRunners.pipe(
717
- Effect.catchAllCause((cause) => Effect.logWarning("Failed to persist runners on shutdown", cause))
718
- ))
719
- )
720
- )
721
-
722
- yield* Effect.forkIn(persistRunners, scope)
723
-
724
- // Start a regular cluster rebalance at the configured interval
725
- yield* rebalance.pipe(
726
- Effect.andThen(Effect.sleep(config.rebalanceInterval)),
727
- Effect.forever,
728
- Effect.forkIn(scope)
729
- )
730
-
731
- yield* checkRunnerHealth.pipe(
732
- Effect.andThen(Effect.sleep(config.runnerHealthCheckInterval)),
733
- Effect.forever,
734
- Effect.forkIn(scope)
735
- )
736
-
737
- yield* Effect.gen(function*() {
738
- const queue = yield* PubSub.subscribe(events)
739
- while (true) {
740
- yield* Effect.logInfo("Shard manager event:", yield* Queue.take(queue))
741
- }
742
- }).pipe(Effect.forkIn(scope))
743
-
744
- yield* Effect.logInfo("Shard manager initialized")
745
-
746
- return ShardManager.of({
747
- getAssignments,
748
- shardingEvents: (address) => {
749
- if (Option.isNone(address)) {
750
- return PubSub.subscribe(events)
751
- }
752
- return Effect.tap(PubSub.subscribe(events), () => {
753
- const isRegistered = MutableHashMap.has(state.allRunners, address.value)
754
- if (isRegistered) {
755
- return runnerHealthApi.onConnection(address.value)
756
- }
757
- return Effect.fail(new RunnerNotRegistered({ address: address.value }))
758
- })
759
- },
760
- register,
761
- unregister,
762
- rebalance,
763
- notifyUnhealthyRunner,
764
- checkRunnerHealth
765
- })
766
- })
767
-
768
- /**
769
- * @since 1.0.0
770
- * @category layer
771
- */
772
- export const layer: Layer.Layer<
773
- ShardManager,
774
- never,
775
- ShardStorage | RunnerHealth | Runners | Config | ShardingConfig
776
- > = Layer.scoped(ShardManager, make)
777
-
778
- /**
779
- * @since 1.0.0
780
- * @category Server
781
- */
782
- export const layerServerHandlers = Rpcs.toLayer(Effect.gen(function*() {
783
- const shardManager = yield* ShardManager
784
- const clock = yield* Effect.clock
785
- return {
786
- Register: ({ runner }) => shardManager.register(runner),
787
- Unregister: ({ address }) => shardManager.unregister(address),
788
- NotifyUnhealthyRunner: ({ address }) => shardManager.notifyUnhealthyRunner(address),
789
- GetAssignments: () =>
790
- Effect.map(
791
- shardManager.getAssignments,
792
- (assignments) => Array.from(assignments)
793
- ),
794
- ShardingEvents: Effect.fnUntraced(function*({ address }) {
795
- const queue = yield* shardManager.shardingEvents(address)
796
- const mailbox = yield* Mailbox.make<ShardingEvent>()
797
-
798
- yield* mailbox.offer(ShardingEvent.StreamStarted())
799
-
800
- yield* Queue.takeBetween(queue, 1, Number.MAX_SAFE_INTEGER).pipe(
801
- Effect.flatMap((events) => mailbox.offerAll(events)),
802
- Effect.forever,
803
- Effect.forkScoped
804
- )
805
-
806
- return mailbox
807
- }),
808
- GetTime: () => clock.currentTimeMillis
809
- }
810
- }))
811
-
812
- /**
813
- * @since 1.0.0
814
- * @category Server
815
- */
816
- export const layerServer: Layer.Layer<
817
- never,
818
- never,
819
- ShardManager | RpcServer.Protocol
820
- > = RpcServer.layer(Rpcs, {
821
- spanPrefix: "ShardManager",
822
- disableTracing: true
823
- }).pipe(Layer.provide(layerServerHandlers))