alepha 0.12.1 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (198) hide show
  1. package/dist/api-notifications/index.d.ts +111 -111
  2. package/dist/api-users/index.d.ts +1240 -1240
  3. package/dist/api-verifications/index.d.ts +94 -94
  4. package/dist/cli/{dist-Sz2EXvQX.cjs → dist-Dl9Vl7Ur.js} +17 -13
  5. package/dist/cli/{dist-BBPjuQ56.js.map → dist-Dl9Vl7Ur.js.map} +1 -1
  6. package/dist/cli/index.d.ts +3 -11
  7. package/dist/cli/index.js +106 -74
  8. package/dist/cli/index.js.map +1 -1
  9. package/dist/email/index.js +71 -73
  10. package/dist/email/index.js.map +1 -1
  11. package/dist/orm/index.d.ts +1 -1
  12. package/dist/orm/index.js.map +1 -1
  13. package/dist/queue/index.d.ts +4 -4
  14. package/dist/redis/index.d.ts +10 -10
  15. package/dist/retry/index.d.ts +1 -1
  16. package/dist/retry/index.js +2 -2
  17. package/dist/retry/index.js.map +1 -1
  18. package/dist/scheduler/index.d.ts +6 -6
  19. package/dist/server/index.js +1 -1
  20. package/dist/server/index.js.map +1 -1
  21. package/dist/server-auth/index.d.ts +193 -193
  22. package/dist/server-health/index.d.ts +17 -17
  23. package/dist/server-links/index.d.ts +34 -34
  24. package/dist/server-metrics/index.js +170 -174
  25. package/dist/server-metrics/index.js.map +1 -1
  26. package/dist/server-security/index.d.ts +9 -9
  27. package/dist/vite/index.js +4 -5
  28. package/dist/vite/index.js.map +1 -1
  29. package/dist/websocket/index.d.ts +7 -7
  30. package/package.json +52 -103
  31. package/src/cli/apps/AlephaPackageBuilderCli.ts +7 -2
  32. package/src/cli/assets/appRouterTs.ts +9 -0
  33. package/src/cli/assets/indexHtml.ts +2 -1
  34. package/src/cli/assets/mainBrowserTs.ts +10 -0
  35. package/src/cli/commands/CoreCommands.ts +6 -5
  36. package/src/cli/commands/DrizzleCommands.ts +65 -57
  37. package/src/cli/commands/VerifyCommands.ts +1 -1
  38. package/src/cli/services/ProjectUtils.ts +44 -38
  39. package/src/orm/providers/DrizzleKitProvider.ts +1 -1
  40. package/src/retry/descriptors/$retry.ts +5 -3
  41. package/src/server/providers/NodeHttpServerProvider.ts +1 -1
  42. package/src/vite/helpers/boot.ts +3 -3
  43. package/dist/api-files/index.cjs +0 -1293
  44. package/dist/api-files/index.cjs.map +0 -1
  45. package/dist/api-files/index.d.cts +0 -829
  46. package/dist/api-jobs/index.cjs +0 -274
  47. package/dist/api-jobs/index.cjs.map +0 -1
  48. package/dist/api-jobs/index.d.cts +0 -654
  49. package/dist/api-notifications/index.cjs +0 -380
  50. package/dist/api-notifications/index.cjs.map +0 -1
  51. package/dist/api-notifications/index.d.cts +0 -289
  52. package/dist/api-parameters/index.cjs +0 -66
  53. package/dist/api-parameters/index.cjs.map +0 -1
  54. package/dist/api-parameters/index.d.cts +0 -84
  55. package/dist/api-users/index.cjs +0 -6009
  56. package/dist/api-users/index.cjs.map +0 -1
  57. package/dist/api-users/index.d.cts +0 -4740
  58. package/dist/api-verifications/index.cjs +0 -407
  59. package/dist/api-verifications/index.cjs.map +0 -1
  60. package/dist/api-verifications/index.d.cts +0 -207
  61. package/dist/batch/index.cjs +0 -408
  62. package/dist/batch/index.cjs.map +0 -1
  63. package/dist/batch/index.d.cts +0 -330
  64. package/dist/bin/index.cjs +0 -17
  65. package/dist/bin/index.cjs.map +0 -1
  66. package/dist/bin/index.d.cts +0 -1
  67. package/dist/bucket/index.cjs +0 -303
  68. package/dist/bucket/index.cjs.map +0 -1
  69. package/dist/bucket/index.d.cts +0 -355
  70. package/dist/cache/index.cjs +0 -241
  71. package/dist/cache/index.cjs.map +0 -1
  72. package/dist/cache/index.d.cts +0 -202
  73. package/dist/cache-redis/index.cjs +0 -84
  74. package/dist/cache-redis/index.cjs.map +0 -1
  75. package/dist/cache-redis/index.d.cts +0 -40
  76. package/dist/cli/chunk-DSlc6foC.cjs +0 -43
  77. package/dist/cli/dist-BBPjuQ56.js +0 -2778
  78. package/dist/cli/dist-Sz2EXvQX.cjs.map +0 -1
  79. package/dist/cli/index.cjs +0 -1241
  80. package/dist/cli/index.cjs.map +0 -1
  81. package/dist/cli/index.d.cts +0 -422
  82. package/dist/command/index.cjs +0 -693
  83. package/dist/command/index.cjs.map +0 -1
  84. package/dist/command/index.d.cts +0 -340
  85. package/dist/core/index.cjs +0 -2264
  86. package/dist/core/index.cjs.map +0 -1
  87. package/dist/core/index.d.cts +0 -1927
  88. package/dist/datetime/index.cjs +0 -318
  89. package/dist/datetime/index.cjs.map +0 -1
  90. package/dist/datetime/index.d.cts +0 -145
  91. package/dist/email/index.cjs +0 -10874
  92. package/dist/email/index.cjs.map +0 -1
  93. package/dist/email/index.d.cts +0 -186
  94. package/dist/fake/index.cjs +0 -34641
  95. package/dist/fake/index.cjs.map +0 -1
  96. package/dist/fake/index.d.cts +0 -74
  97. package/dist/file/index.cjs +0 -1212
  98. package/dist/file/index.cjs.map +0 -1
  99. package/dist/file/index.d.cts +0 -698
  100. package/dist/lock/index.cjs +0 -226
  101. package/dist/lock/index.cjs.map +0 -1
  102. package/dist/lock/index.d.cts +0 -361
  103. package/dist/lock-redis/index.cjs +0 -113
  104. package/dist/lock-redis/index.cjs.map +0 -1
  105. package/dist/lock-redis/index.d.cts +0 -24
  106. package/dist/logger/index.cjs +0 -521
  107. package/dist/logger/index.cjs.map +0 -1
  108. package/dist/logger/index.d.cts +0 -281
  109. package/dist/orm/index.cjs +0 -2986
  110. package/dist/orm/index.cjs.map +0 -1
  111. package/dist/orm/index.d.cts +0 -2213
  112. package/dist/queue/index.cjs +0 -1044
  113. package/dist/queue/index.cjs.map +0 -1
  114. package/dist/queue/index.d.cts +0 -1265
  115. package/dist/queue-redis/index.cjs +0 -873
  116. package/dist/queue-redis/index.cjs.map +0 -1
  117. package/dist/queue-redis/index.d.cts +0 -82
  118. package/dist/redis/index.cjs +0 -153
  119. package/dist/redis/index.cjs.map +0 -1
  120. package/dist/redis/index.d.cts +0 -82
  121. package/dist/retry/index.cjs +0 -146
  122. package/dist/retry/index.cjs.map +0 -1
  123. package/dist/retry/index.d.cts +0 -172
  124. package/dist/router/index.cjs +0 -111
  125. package/dist/router/index.cjs.map +0 -1
  126. package/dist/router/index.d.cts +0 -46
  127. package/dist/scheduler/index.cjs +0 -576
  128. package/dist/scheduler/index.cjs.map +0 -1
  129. package/dist/scheduler/index.d.cts +0 -145
  130. package/dist/security/index.cjs +0 -2402
  131. package/dist/security/index.cjs.map +0 -1
  132. package/dist/security/index.d.cts +0 -598
  133. package/dist/server/index.cjs +0 -1680
  134. package/dist/server/index.cjs.map +0 -1
  135. package/dist/server/index.d.cts +0 -810
  136. package/dist/server-auth/index.cjs +0 -3146
  137. package/dist/server-auth/index.cjs.map +0 -1
  138. package/dist/server-auth/index.d.cts +0 -1164
  139. package/dist/server-cache/index.cjs +0 -252
  140. package/dist/server-cache/index.cjs.map +0 -1
  141. package/dist/server-cache/index.d.cts +0 -164
  142. package/dist/server-compress/index.cjs +0 -141
  143. package/dist/server-compress/index.cjs.map +0 -1
  144. package/dist/server-compress/index.d.cts +0 -38
  145. package/dist/server-cookies/index.cjs +0 -234
  146. package/dist/server-cookies/index.cjs.map +0 -1
  147. package/dist/server-cookies/index.d.cts +0 -144
  148. package/dist/server-cors/index.cjs +0 -201
  149. package/dist/server-cors/index.cjs.map +0 -1
  150. package/dist/server-cors/index.d.cts +0 -140
  151. package/dist/server-health/index.cjs +0 -62
  152. package/dist/server-health/index.cjs.map +0 -1
  153. package/dist/server-health/index.d.cts +0 -58
  154. package/dist/server-helmet/index.cjs +0 -131
  155. package/dist/server-helmet/index.cjs.map +0 -1
  156. package/dist/server-helmet/index.d.cts +0 -97
  157. package/dist/server-links/index.cjs +0 -992
  158. package/dist/server-links/index.cjs.map +0 -1
  159. package/dist/server-links/index.d.cts +0 -513
  160. package/dist/server-metrics/index.cjs +0 -4535
  161. package/dist/server-metrics/index.cjs.map +0 -1
  162. package/dist/server-metrics/index.d.cts +0 -35
  163. package/dist/server-multipart/index.cjs +0 -237
  164. package/dist/server-multipart/index.cjs.map +0 -1
  165. package/dist/server-multipart/index.d.cts +0 -50
  166. package/dist/server-proxy/index.cjs +0 -186
  167. package/dist/server-proxy/index.cjs.map +0 -1
  168. package/dist/server-proxy/index.d.cts +0 -234
  169. package/dist/server-rate-limit/index.cjs +0 -241
  170. package/dist/server-rate-limit/index.cjs.map +0 -1
  171. package/dist/server-rate-limit/index.d.cts +0 -183
  172. package/dist/server-security/index.cjs +0 -316
  173. package/dist/server-security/index.cjs.map +0 -1
  174. package/dist/server-security/index.d.cts +0 -173
  175. package/dist/server-static/index.cjs +0 -170
  176. package/dist/server-static/index.cjs.map +0 -1
  177. package/dist/server-static/index.d.cts +0 -121
  178. package/dist/server-swagger/index.cjs +0 -1021
  179. package/dist/server-swagger/index.cjs.map +0 -1
  180. package/dist/server-swagger/index.d.cts +0 -382
  181. package/dist/sms/index.cjs +0 -221
  182. package/dist/sms/index.cjs.map +0 -1
  183. package/dist/sms/index.d.cts +0 -130
  184. package/dist/thread/index.cjs +0 -350
  185. package/dist/thread/index.cjs.map +0 -1
  186. package/dist/thread/index.d.cts +0 -260
  187. package/dist/topic/index.cjs +0 -282
  188. package/dist/topic/index.cjs.map +0 -1
  189. package/dist/topic/index.d.cts +0 -523
  190. package/dist/topic-redis/index.cjs +0 -71
  191. package/dist/topic-redis/index.cjs.map +0 -1
  192. package/dist/topic-redis/index.d.cts +0 -42
  193. package/dist/vite/index.cjs +0 -1077
  194. package/dist/vite/index.cjs.map +0 -1
  195. package/dist/vite/index.d.cts +0 -542
  196. package/dist/websocket/index.cjs +0 -1117
  197. package/dist/websocket/index.cjs.map +0 -1
  198. package/dist/websocket/index.d.cts +0 -861
@@ -1,408 +0,0 @@
1
- let alepha = require("alepha");
2
- let node_crypto = require("node:crypto");
3
- let alepha_datetime = require("alepha/datetime");
4
- let alepha_logger = require("alepha/logger");
5
- let alepha_retry = require("alepha/retry");
6
-
7
- //#region src/batch/providers/BatchProvider.ts
8
- /**
9
- * Service for batch processing operations.
10
- * Provides methods to manage batches of items with automatic flushing based on size or time.
11
- */
12
- var BatchProvider = class {
13
- log = (0, alepha_logger.$logger)();
14
- dateTime = (0, alepha.$inject)(alepha_datetime.DateTimeProvider);
15
- retryProvider = (0, alepha.$inject)(alepha_retry.RetryProvider);
16
- /**
17
- * Creates a new batch context with the given options.
18
- */
19
- createContext(alepha$1, options) {
20
- return {
21
- options,
22
- itemStates: /* @__PURE__ */ new Map(),
23
- partitions: /* @__PURE__ */ new Map(),
24
- activeHandlers: [],
25
- isShuttingDown: false,
26
- isReady: false,
27
- alepha: alepha$1
28
- };
29
- }
30
- /**
31
- * Get the effective maxSize for a context.
32
- */
33
- getMaxSize(context) {
34
- return context.options.maxSize ?? 10;
35
- }
36
- /**
37
- * Get the effective concurrency for a context.
38
- */
39
- getConcurrency(context) {
40
- return context.options.concurrency ?? 1;
41
- }
42
- /**
43
- * Get the effective maxDuration for a context.
44
- */
45
- getMaxDuration(context) {
46
- return context.options.maxDuration ?? [1, "second"];
47
- }
48
- /**
49
- * Pushes an item into the batch and returns immediately with a unique ID.
50
- * The item will be processed asynchronously with other items when the batch is flushed.
51
- * Use wait(id) to get the processing result.
52
- *
53
- * @throws Error if maxQueueSize is exceeded
54
- */
55
- push(context, item) {
56
- const id = (0, node_crypto.randomUUID)();
57
- let partitionKey;
58
- try {
59
- partitionKey = context.options.partitionBy ? context.options.partitionBy(item) : "default";
60
- } catch (error) {
61
- this.log.warn("partitionBy function threw an error, using 'default' partition", { error });
62
- partitionKey = "default";
63
- }
64
- const itemState = {
65
- id,
66
- item,
67
- partitionKey,
68
- status: "pending"
69
- };
70
- this.log.trace("Pushing item to batch", {
71
- id,
72
- partitionKey,
73
- item
74
- });
75
- context.itemStates.set(id, itemState);
76
- if (!context.partitions.has(partitionKey)) context.partitions.set(partitionKey, {
77
- itemIds: [],
78
- flushing: false
79
- });
80
- const partition = context.partitions.get(partitionKey);
81
- if (context.options.maxQueueSize !== void 0 && partition.itemIds.length >= context.options.maxQueueSize) throw new Error(`Batch queue size exceeded for partition '${partitionKey}' (max: ${context.options.maxQueueSize})`);
82
- partition.itemIds.push(id);
83
- const maxSize = this.getMaxSize(context);
84
- const maxDuration = this.getMaxDuration(context);
85
- if (context.isReady) {
86
- if (partition.itemIds.length >= maxSize) {
87
- this.log.trace(`Batch partition '${partitionKey}' is full, flushing...`);
88
- this.flushPartition(context, partitionKey).catch((error) => this.log.error(`Failed to flush batch partition '${partitionKey}' on max size`, error));
89
- } else if (!partition.timeout && !partition.flushing) partition.timeout = this.dateTime.createTimeout(() => {
90
- this.log.trace(`Batch partition '${partitionKey}' timed out, flushing...`);
91
- this.flushPartition(context, partitionKey).catch((error) => this.log.error(`Failed to flush batch partition '${partitionKey}' on timeout`, error));
92
- }, maxDuration);
93
- } else this.log.trace(`Buffering item in partition '${partitionKey}' (app not ready yet, ${partition.itemIds.length} items buffered)`);
94
- return id;
95
- }
96
- /**
97
- * Wait for a specific item to be processed and get its result.
98
- * @param id The item ID returned from push()
99
- * @returns The processing result
100
- * @throws If the item doesn't exist or processing failed
101
- */
102
- async wait(context, id) {
103
- const itemState = context.itemStates.get(id);
104
- if (!itemState) throw new Error(`Item with id '${id}' not found`);
105
- if (itemState.status === "completed") return itemState.result;
106
- if (itemState.status === "failed") throw itemState.error;
107
- if (!itemState.promise) itemState.promise = new Promise((resolve, reject) => {
108
- itemState.resolve = resolve;
109
- itemState.reject = reject;
110
- });
111
- return itemState.promise;
112
- }
113
- /**
114
- * Get the current status of an item.
115
- * @param id The item ID returned from push()
116
- * @returns Status information or undefined if item doesn't exist
117
- */
118
- status(context, id) {
119
- const itemState = context.itemStates.get(id);
120
- if (!itemState) return;
121
- if (itemState.status === "completed") return {
122
- status: "completed",
123
- result: itemState.result
124
- };
125
- if (itemState.status === "failed") return {
126
- status: "failed",
127
- error: itemState.error
128
- };
129
- return { status: itemState.status };
130
- }
131
- /**
132
- * Clears completed and failed items from the context to free memory.
133
- * Returns the number of items cleared.
134
- *
135
- * @param context The batch context
136
- * @param status Optional: only clear items with this specific status ('completed' or 'failed')
137
- * @returns The number of items cleared
138
- */
139
- clearCompleted(context, status) {
140
- let count = 0;
141
- for (const [id, state] of context.itemStates) if (status) {
142
- if (state.status === status) {
143
- context.itemStates.delete(id);
144
- count++;
145
- }
146
- } else if (state.status === "completed" || state.status === "failed") {
147
- context.itemStates.delete(id);
148
- count++;
149
- }
150
- return count;
151
- }
152
- /**
153
- * Flush all partitions or a specific partition.
154
- */
155
- async flush(context, partitionKey) {
156
- const promises = [];
157
- if (partitionKey) {
158
- if (context.partitions.has(partitionKey)) promises.push(this.flushPartition(context, partitionKey));
159
- } else for (const key of context.partitions.keys()) promises.push(this.flushPartition(context, key));
160
- await Promise.all(promises);
161
- }
162
- /**
163
- * Flush a specific partition.
164
- */
165
- async flushPartition(context, partitionKey, limit) {
166
- const partition = context.partitions.get(partitionKey);
167
- if (!partition || partition.itemIds.length === 0) {
168
- context.partitions.delete(partitionKey);
169
- return;
170
- }
171
- partition.timeout?.clear();
172
- partition.timeout = void 0;
173
- const itemsToTake = limit !== void 0 ? Math.min(limit, partition.itemIds.length) : partition.itemIds.length;
174
- const itemIdsToProcess = partition.itemIds.splice(0, itemsToTake);
175
- partition.flushing = true;
176
- const itemsToProcess = [];
177
- for (const id of itemIdsToProcess) {
178
- const itemState = context.itemStates.get(id);
179
- if (itemState) {
180
- itemState.status = "processing";
181
- itemsToProcess.push(itemState.item);
182
- }
183
- }
184
- const concurrency = this.getConcurrency(context);
185
- const maxDuration = this.getMaxDuration(context);
186
- while (context.activeHandlers.length >= concurrency) {
187
- this.log.trace(`Batch handler is at concurrency limit, waiting for a slot...`);
188
- await Promise.race(context.activeHandlers.map((it) => it.promise));
189
- }
190
- const promise = Promise.withResolvers();
191
- context.activeHandlers.push(promise);
192
- let result;
193
- try {
194
- result = await context.alepha.context.run(() => context.isShuttingDown ? context.options.handler(itemsToProcess) : this.retryProvider.retry({
195
- ...context.options.retry,
196
- handler: context.options.handler
197
- }, itemsToProcess));
198
- for (const id of itemIdsToProcess) {
199
- const itemState = context.itemStates.get(id);
200
- if (itemState) {
201
- itemState.status = "completed";
202
- itemState.result = result;
203
- itemState.resolve?.(result);
204
- }
205
- }
206
- } catch (error) {
207
- this.log.error(`Batch handler failed`, error);
208
- for (const id of itemIdsToProcess) {
209
- const itemState = context.itemStates.get(id);
210
- if (itemState) {
211
- itemState.status = "failed";
212
- itemState.error = error;
213
- itemState.reject?.(error);
214
- }
215
- }
216
- } finally {
217
- promise.resolve();
218
- context.activeHandlers = context.activeHandlers.filter((it) => it !== promise);
219
- const currentPartition = context.partitions.get(partitionKey);
220
- if (currentPartition?.flushing && currentPartition.itemIds.length === 0) context.partitions.delete(partitionKey);
221
- else if (currentPartition) {
222
- currentPartition.flushing = false;
223
- if (currentPartition.itemIds.length > 0 && !currentPartition.timeout) currentPartition.timeout = this.dateTime.createTimeout(() => {
224
- this.log.trace(`Batch partition '${partitionKey}' timed out, flushing...`);
225
- this.flushPartition(context, partitionKey).catch((error) => this.log.error(`Failed to flush batch partition '${partitionKey}' on timeout`, error));
226
- }, maxDuration);
227
- }
228
- }
229
- }
230
- /**
231
- * Mark the context as ready and start processing buffered items.
232
- * Called after the "ready" hook.
233
- */
234
- async markReady(context) {
235
- this.log.debug("Batch processor is now ready, starting to process buffered items...");
236
- context.isReady = true;
237
- await this.startProcessing(context);
238
- }
239
- /**
240
- * Mark the context as shutting down and flush all remaining items.
241
- */
242
- async shutdown(context) {
243
- this.log.debug("Flushing all remaining batch partitions on shutdown...");
244
- context.isShuttingDown = true;
245
- await this.flush(context);
246
- this.log.debug("All batch partitions flushed");
247
- }
248
- /**
249
- * Called after the "ready" hook to start processing buffered items that were
250
- * pushed during startup. This checks all partitions and starts timeouts/flushes
251
- * for items that were accumulated before the app was ready.
252
- */
253
- async startProcessing(context) {
254
- const maxSize = this.getMaxSize(context);
255
- const maxDuration = this.getMaxDuration(context);
256
- for (const [partitionKey, partition] of context.partitions.entries()) {
257
- if (partition.itemIds.length === 0) continue;
258
- this.log.trace(`Starting processing for partition '${partitionKey}' with ${partition.itemIds.length} buffered items`);
259
- while (partition.itemIds.length >= maxSize) {
260
- this.log.trace(`Partition '${partitionKey}' has ${partition.itemIds.length} items, flushing batch of ${maxSize}...`);
261
- await this.flushPartition(context, partitionKey, maxSize);
262
- }
263
- if (partition.itemIds.length > 0 && !partition.timeout && !partition.flushing) {
264
- this.log.trace(`Starting timeout for partition '${partitionKey}' with ${partition.itemIds.length} remaining items`);
265
- partition.timeout = this.dateTime.createTimeout(() => {
266
- this.log.trace(`Batch partition '${partitionKey}' timed out, flushing...`);
267
- this.flushPartition(context, partitionKey).catch((error) => this.log.error(`Failed to flush partition '${partitionKey}' on timeout after startup`, error));
268
- }, maxDuration);
269
- }
270
- }
271
- }
272
- };
273
-
274
- //#endregion
275
- //#region src/batch/descriptors/$batch.ts
276
- /**
277
- * Creates a batch processing descriptor for efficient grouping and processing of multiple operations.
278
- */
279
- const $batch = (options) => (0, alepha.createDescriptor)(BatchDescriptor, options);
280
- var BatchDescriptor = class extends alepha.Descriptor {
281
- batchProvider = (0, alepha.$inject)(BatchProvider);
282
- context;
283
- constructor(...args) {
284
- super(...args);
285
- this.context = this.batchProvider.createContext(this.alepha, {
286
- handler: this.options.handler,
287
- maxSize: this.options.maxSize,
288
- maxQueueSize: this.options.maxQueueSize,
289
- maxDuration: this.options.maxDuration,
290
- partitionBy: this.options.partitionBy,
291
- concurrency: this.options.concurrency,
292
- retry: this.options.retry
293
- });
294
- }
295
- /**
296
- * Pushes an item into the batch and returns immediately with a unique ID.
297
- * The item will be processed asynchronously with other items when the batch is flushed.
298
- * Use wait(id) to get the processing result.
299
- */
300
- async push(item) {
301
- const validatedItem = this.alepha.codec.validate(this.options.schema, item);
302
- return this.batchProvider.push(this.context, validatedItem);
303
- }
304
- /**
305
- * Wait for a specific item to be processed and get its result.
306
- * @param id The item ID returned from push()
307
- * @returns The processing result
308
- * @throws If the item doesn't exist or processing failed
309
- */
310
- async wait(id) {
311
- return this.batchProvider.wait(this.context, id);
312
- }
313
- /**
314
- * Get the current status of an item.
315
- * @param id The item ID returned from push()
316
- * @returns Status information or undefined if item doesn't exist
317
- */
318
- status(id) {
319
- return this.batchProvider.status(this.context, id);
320
- }
321
- /**
322
- * Flush all partitions or a specific partition.
323
- */
324
- async flush(partitionKey) {
325
- return this.batchProvider.flush(this.context, partitionKey);
326
- }
327
- /**
328
- * Clears completed and failed items from memory.
329
- * Call this periodically in long-running applications to prevent memory leaks.
330
- *
331
- * @param status Optional: only clear items with this specific status ('completed' or 'failed')
332
- * @returns The number of items cleared
333
- */
334
- clearCompleted(status) {
335
- return this.batchProvider.clearCompleted(this.context, status);
336
- }
337
- onReady = (0, alepha.$hook)({
338
- on: "ready",
339
- handler: async () => {
340
- await this.batchProvider.markReady(this.context);
341
- }
342
- });
343
- dispose = (0, alepha.$hook)({
344
- on: "stop",
345
- priority: "first",
346
- handler: async () => {
347
- await this.batchProvider.shutdown(this.context);
348
- }
349
- });
350
- };
351
- $batch[alepha.KIND] = BatchDescriptor;
352
-
353
- //#endregion
354
- //#region src/batch/index.ts
355
- /**
356
- * This module allows you to group multiple asynchronous operations into a single "batch," which is then processed together.
357
- * This is an essential pattern for improving performance, reducing I/O, and interacting efficiently with rate-limited APIs or databases.
358
- *
359
- * ```ts
360
- * import { Alepha, $hook, run, t } from "alepha";
361
- * import { $batch } from "alepha/batch";
362
- *
363
- * class LoggingService {
364
- * // define the batch processor
365
- * logBatch = $batch({
366
- * schema: t.text(),
367
- * maxSize: 10,
368
- * maxDuration: [5, "seconds"],
369
- * handler: async (items) => {
370
- * console.log(`[BATCH LOG] Processing ${items.length} events:`, items);
371
- * },
372
- * });
373
- *
374
- * // example of how to use it
375
- * onReady = $hook({
376
- * on: "ready",
377
- * handler: async () => {
378
- * // push() returns an ID immediately
379
- * const id1 = await this.logBatch.push("Application started.");
380
- * const id2 = await this.logBatch.push("User authenticated.");
381
- *
382
- * // optionally wait for processing to complete
383
- * await this.logBatch.wait(id1);
384
- *
385
- * // or check the status
386
- * const status = this.logBatch.status(id2);
387
- * console.log(status?.status); // "pending" | "processing" | "completed" | "failed"
388
- * },
389
- * });
390
- * }
391
- * ```
392
- *
393
- * @see {@link $batch}
394
- * @see {@link BatchProvider}
395
- * @module alepha.batch
396
- */
397
- const AlephaBatch = (0, alepha.$module)({
398
- name: "alepha.batch",
399
- descriptors: [$batch],
400
- services: [BatchProvider]
401
- });
402
-
403
- //#endregion
404
- exports.$batch = $batch;
405
- exports.AlephaBatch = AlephaBatch;
406
- exports.BatchDescriptor = BatchDescriptor;
407
- exports.BatchProvider = BatchProvider;
408
- //# sourceMappingURL=index.cjs.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"index.cjs","names":["DateTimeProvider","RetryProvider","partitionKey: string","itemState: BatchItemState<TItem, TResponse>","promises: Promise<void>[]","itemsToProcess: TItem[]","result: any","Descriptor","KIND"],"sources":["../../src/batch/providers/BatchProvider.ts","../../src/batch/descriptors/$batch.ts","../../src/batch/index.ts"],"sourcesContent":["import { randomUUID } from \"node:crypto\";\nimport { $inject, type Alepha } from \"alepha\";\nimport { DateTimeProvider, type DurationLike } from \"alepha/datetime\";\nimport { $logger } from \"alepha/logger\";\nimport { type RetryBackoffOptions, RetryProvider } from \"alepha/retry\";\n\n// ---------------------------------------------------------------------------------------------------------------------\n\nexport interface BatchOptions<TItem, TResponse = any> {\n /**\n * The batch processing handler function that processes arrays of validated items.\n */\n handler: (items: TItem[]) => TResponse;\n\n /**\n * Maximum number of items to collect before automatically flushing the batch.\n *\n * @default 10\n */\n maxSize?: number;\n\n /**\n * Maximum number of items that can be queued in a single partition.\n * If exceeded, push() will throw an error.\n */\n maxQueueSize?: number;\n\n /**\n * Maximum time to wait before flushing a batch, even if it hasn't reached maxSize.\n *\n * @default [1, \"second\"]\n */\n maxDuration?: DurationLike;\n\n /**\n * Function to determine partition keys for grouping items into separate batches.\n */\n partitionBy?: (item: TItem) => string;\n\n /**\n * Maximum number of batch handlers that can execute simultaneously.\n *\n * @default 1\n */\n concurrency?: number;\n\n /**\n * Retry configuration for failed batch processing operations.\n */\n retry?: {\n /**\n * The maximum number of attempts.\n *\n * @default 3\n */\n max?: number;\n\n /**\n * The backoff strategy for delays between retries.\n * Can be a fixed number (in ms) or a configuration object for exponential backoff.\n *\n * @default { initial: 200, factor: 2, jitter: true }\n */\n backoff?: number | RetryBackoffOptions;\n\n /**\n * An overall time limit for all retry attempts combined.\n *\n * e.g., `[5, 'seconds']`\n */\n maxDuration?: DurationLike;\n\n /**\n * A function that determines if a retry should be attempted based on the error.\n *\n * @default (error) => true (retries on any error)\n */\n when?: (error: Error) => boolean;\n\n /**\n * A custom callback for when a retry attempt fails.\n * This is called before the delay.\n */\n onError?: (error: Error, attempt: number) => void;\n };\n}\n\n// ---------------------------------------------------------------------------------------------------------------------\n\nexport type BatchItemStatus = \"pending\" | \"processing\" | \"completed\" | \"failed\";\n\nexport interface BatchItemState<TItem, TResponse> {\n id: string;\n item: TItem;\n partitionKey: string;\n status: BatchItemStatus;\n result?: TResponse;\n error?: Error;\n promise?: Promise<TResponse>;\n resolve?: (value: TResponse) => void;\n reject?: (error: Error) => void;\n}\n\nexport interface PartitionState {\n itemIds: string[];\n timeout?: { clear: () => void };\n flushing: boolean;\n}\n\n/**\n * Context object that holds all state for a batch processor instance.\n */\nexport interface BatchContext<TItem, TResponse> {\n options: BatchOptions<TItem, TResponse>;\n itemStates: Map<string, BatchItemState<TItem, TResponse>>;\n partitions: Map<string, PartitionState>;\n activeHandlers: PromiseWithResolvers<void>[];\n isShuttingDown: boolean;\n isReady: boolean;\n alepha: Alepha;\n}\n\n// ---------------------------------------------------------------------------------------------------------------------\n\n/**\n * Service for batch processing operations.\n * Provides methods to manage batches of items with automatic flushing based on size or time.\n */\nexport class BatchProvider {\n protected readonly log = $logger();\n protected readonly dateTime = $inject(DateTimeProvider);\n protected readonly retryProvider = $inject(RetryProvider);\n\n /**\n * Creates a new batch context with the given options.\n */\n createContext<TItem, TResponse>(\n alepha: Alepha,\n options: BatchOptions<TItem, TResponse>,\n ): BatchContext<TItem, TResponse> {\n return {\n options,\n itemStates: new Map(),\n partitions: new Map(),\n activeHandlers: [],\n isShuttingDown: false,\n isReady: false,\n alepha,\n };\n }\n\n /**\n * Get the effective maxSize for a context.\n */\n protected getMaxSize<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n ): number {\n return context.options.maxSize ?? 10;\n }\n\n /**\n * Get the effective concurrency for a context.\n */\n protected getConcurrency<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n ): number {\n return context.options.concurrency ?? 1;\n }\n\n /**\n * Get the effective maxDuration for a context.\n */\n protected getMaxDuration<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n ): DurationLike {\n return context.options.maxDuration ?? [1, \"second\"];\n }\n\n /**\n * Pushes an item into the batch and returns immediately with a unique ID.\n * The item will be processed asynchronously with other items when the batch is flushed.\n * Use wait(id) to get the processing result.\n *\n * @throws Error if maxQueueSize is exceeded\n */\n push<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n item: TItem,\n ): string {\n // 1. Generate unique ID\n const id = randomUUID();\n\n // 2. Determine the partition key (with error handling)\n let partitionKey: string;\n try {\n partitionKey = context.options.partitionBy\n ? context.options.partitionBy(item)\n : \"default\";\n } catch (error) {\n this.log.warn(\n \"partitionBy function threw an error, using 'default' partition\",\n { error },\n );\n partitionKey = \"default\";\n }\n\n // 3. Create item state\n const itemState: BatchItemState<TItem, TResponse> = {\n id,\n item,\n partitionKey,\n status: \"pending\",\n };\n\n // CAUTION: Do not log.debug/info here as it may cause infinite loops if logging is batched\n // log.trace is safe\n\n this.log.trace(\"Pushing item to batch\", {\n id,\n partitionKey,\n item,\n });\n\n context.itemStates.set(id, itemState);\n\n // 4. Get or create the partition state\n if (!context.partitions.has(partitionKey)) {\n context.partitions.set(partitionKey, {\n itemIds: [],\n flushing: false,\n });\n }\n const partition = context.partitions.get(partitionKey)!;\n\n // 5. Check maxQueueSize before adding\n if (\n context.options.maxQueueSize !== undefined &&\n partition.itemIds.length >= context.options.maxQueueSize\n ) {\n throw new Error(\n `Batch queue size exceeded for partition '${partitionKey}' (max: ${context.options.maxQueueSize})`,\n );\n }\n\n // 6. Add item ID to partition\n partition.itemIds.push(id);\n\n const maxSize = this.getMaxSize(context);\n const maxDuration = this.getMaxDuration(context);\n\n // 7. Only start processing if the app is ready (after \"ready\" hook)\n // During startup, items are just buffered in memory\n if (context.isReady) {\n // Check if the batch is full\n if (partition.itemIds.length >= maxSize) {\n this.log.trace(\n `Batch partition '${partitionKey}' is full, flushing...`,\n );\n this.flushPartition(context, partitionKey).catch((error) =>\n this.log.error(\n `Failed to flush batch partition '${partitionKey}' on max size`,\n error,\n ),\n );\n } else if (!partition.timeout && !partition.flushing) {\n // 8. Start the timeout if it's not already running for this partition and not currently flushing\n partition.timeout = this.dateTime.createTimeout(() => {\n this.log.trace(\n `Batch partition '${partitionKey}' timed out, flushing...`,\n );\n this.flushPartition(context, partitionKey).catch((error) =>\n this.log.error(\n `Failed to flush batch partition '${partitionKey}' on timeout`,\n error,\n ),\n );\n }, maxDuration);\n }\n } else {\n // Not ready yet - just buffer items, no size checks or timeouts\n this.log.trace(\n `Buffering item in partition '${partitionKey}' (app not ready yet, ${partition.itemIds.length} items buffered)`,\n );\n }\n\n // 9. Return ID immediately\n return id;\n }\n\n /**\n * Wait for a specific item to be processed and get its result.\n * @param id The item ID returned from push()\n * @returns The processing result\n * @throws If the item doesn't exist or processing failed\n */\n async wait<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n id: string,\n ): Promise<TResponse> {\n const itemState = context.itemStates.get(id);\n if (!itemState) {\n throw new Error(`Item with id '${id}' not found`);\n }\n\n // If already completed or failed, return immediately\n if (itemState.status === \"completed\") {\n return itemState.result!;\n }\n if (itemState.status === \"failed\") {\n throw itemState.error!;\n }\n\n // Create promise on-demand if not already created\n if (!itemState.promise) {\n itemState.promise = new Promise<TResponse>((resolve, reject) => {\n itemState.resolve = resolve;\n itemState.reject = reject;\n });\n }\n\n return itemState.promise;\n }\n\n /**\n * Get the current status of an item.\n * @param id The item ID returned from push()\n * @returns Status information or undefined if item doesn't exist\n */\n status<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n id: string,\n ):\n | { status: \"pending\" | \"processing\" }\n | { status: \"completed\"; result: TResponse }\n | { status: \"failed\"; error: Error }\n | undefined {\n const itemState = context.itemStates.get(id);\n if (!itemState) {\n return undefined;\n }\n\n if (itemState.status === \"completed\") {\n return { status: \"completed\", result: itemState.result! };\n }\n if (itemState.status === \"failed\") {\n return { status: \"failed\", error: itemState.error! };\n }\n return { status: itemState.status };\n }\n\n /**\n * Clears completed and failed items from the context to free memory.\n * Returns the number of items cleared.\n *\n * @param context The batch context\n * @param status Optional: only clear items with this specific status ('completed' or 'failed')\n * @returns The number of items cleared\n */\n clearCompleted<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n status?: \"completed\" | \"failed\",\n ): number {\n let count = 0;\n for (const [id, state] of context.itemStates) {\n if (status) {\n if (state.status === status) {\n context.itemStates.delete(id);\n count++;\n }\n } else if (state.status === \"completed\" || state.status === \"failed\") {\n context.itemStates.delete(id);\n count++;\n }\n }\n return count;\n }\n\n /**\n * Flush all partitions or a specific partition.\n */\n async flush<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n partitionKey?: string,\n ): Promise<void> {\n const promises: Promise<void>[] = [];\n if (partitionKey) {\n if (context.partitions.has(partitionKey)) {\n promises.push(this.flushPartition(context, partitionKey));\n }\n } else {\n for (const key of context.partitions.keys()) {\n promises.push(this.flushPartition(context, key));\n }\n }\n await Promise.all(promises);\n }\n\n /**\n * Flush a specific partition.\n */\n protected async flushPartition<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n partitionKey: string,\n limit?: number,\n ): Promise<void> {\n const partition = context.partitions.get(partitionKey);\n if (!partition || partition.itemIds.length === 0) {\n context.partitions.delete(partitionKey);\n return;\n }\n\n // Clear the timeout and grab the item IDs (up to limit if specified)\n partition.timeout?.clear();\n partition.timeout = undefined;\n const itemsToTake =\n limit !== undefined\n ? Math.min(limit, partition.itemIds.length)\n : partition.itemIds.length;\n const itemIdsToProcess = partition.itemIds.splice(0, itemsToTake);\n\n // Mark partition as flushing to prevent race conditions\n partition.flushing = true;\n\n // Get the items and mark them as processing\n const itemsToProcess: TItem[] = [];\n for (const id of itemIdsToProcess) {\n const itemState = context.itemStates.get(id);\n if (itemState) {\n itemState.status = \"processing\";\n itemsToProcess.push(itemState.item);\n }\n }\n\n const concurrency = this.getConcurrency(context);\n const maxDuration = this.getMaxDuration(context);\n\n // Wait until there's a free slot (if at concurrency limit)\n while (context.activeHandlers.length >= concurrency) {\n this.log.trace(\n `Batch handler is at concurrency limit, waiting for a slot...`,\n );\n // Wait for any single handler to complete, not all of them\n await Promise.race(context.activeHandlers.map((it) => it.promise));\n }\n\n const promise = Promise.withResolvers<void>();\n context.activeHandlers.push(promise);\n let result: any;\n try {\n result = await context.alepha.context.run(() =>\n // during shutdown, call handler directly to avoid retry cancellation\n context.isShuttingDown\n ? context.options.handler(itemsToProcess)\n : this.retryProvider.retry(\n {\n ...context.options.retry,\n handler: context.options.handler,\n },\n itemsToProcess,\n ),\n );\n\n // Mark all items as completed and resolve their promises\n for (const id of itemIdsToProcess) {\n const itemState = context.itemStates.get(id);\n if (itemState) {\n itemState.status = \"completed\";\n itemState.result = result;\n // Only resolve if someone is waiting\n itemState.resolve?.(result);\n }\n }\n } catch (error) {\n this.log.error(`Batch handler failed`, error);\n\n // Mark all items as failed and reject their promises\n for (const id of itemIdsToProcess) {\n const itemState = context.itemStates.get(id);\n if (itemState) {\n itemState.status = \"failed\";\n itemState.error = error as Error;\n // Only reject if someone is waiting (promise was created)\n itemState.reject?.(error as Error);\n }\n }\n } finally {\n promise.resolve();\n context.activeHandlers = context.activeHandlers.filter(\n (it) => it !== promise,\n );\n\n // Only delete partition if no new items arrived during processing\n const currentPartition = context.partitions.get(partitionKey);\n if (currentPartition?.flushing && currentPartition.itemIds.length === 0) {\n context.partitions.delete(partitionKey);\n } else if (currentPartition) {\n // Reset flushing flag if partition still exists with items\n currentPartition.flushing = false;\n\n // Restart timeout for items that arrived during flush\n if (currentPartition.itemIds.length > 0 && !currentPartition.timeout) {\n currentPartition.timeout = this.dateTime.createTimeout(() => {\n this.log.trace(\n `Batch partition '${partitionKey}' timed out, flushing...`,\n );\n this.flushPartition(context, partitionKey).catch((error) =>\n this.log.error(\n `Failed to flush batch partition '${partitionKey}' on timeout`,\n error,\n ),\n );\n }, maxDuration);\n }\n }\n }\n }\n\n /**\n * Mark the context as ready and start processing buffered items.\n * Called after the \"ready\" hook.\n */\n async markReady<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n ): Promise<void> {\n this.log.debug(\n \"Batch processor is now ready, starting to process buffered items...\",\n );\n context.isReady = true;\n await this.startProcessing(context);\n }\n\n /**\n * Mark the context as shutting down and flush all remaining items.\n */\n async shutdown<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n ): Promise<void> {\n this.log.debug(\"Flushing all remaining batch partitions on shutdown...\");\n context.isShuttingDown = true;\n await this.flush(context);\n this.log.debug(\"All batch partitions flushed\");\n }\n\n /**\n * Called after the \"ready\" hook to start processing buffered items that were\n * pushed during startup. This checks all partitions and starts timeouts/flushes\n * for items that were accumulated before the app was ready.\n */\n protected async startProcessing<TItem, TResponse>(\n context: BatchContext<TItem, TResponse>,\n ): Promise<void> {\n const maxSize = this.getMaxSize(context);\n const maxDuration = this.getMaxDuration(context);\n\n for (const [partitionKey, partition] of context.partitions.entries()) {\n if (partition.itemIds.length === 0) {\n continue;\n }\n\n this.log.trace(\n `Starting processing for partition '${partitionKey}' with ${partition.itemIds.length} buffered items`,\n );\n\n // Flush batches of maxSize while we have items >= maxSize\n while (partition.itemIds.length >= maxSize) {\n this.log.trace(\n `Partition '${partitionKey}' has ${partition.itemIds.length} items, flushing batch of ${maxSize}...`,\n );\n await this.flushPartition(context, partitionKey, maxSize);\n }\n\n // After flushing full batches, start timeout for any remaining items\n if (\n partition.itemIds.length > 0 &&\n !partition.timeout &&\n !partition.flushing\n ) {\n this.log.trace(\n `Starting timeout for partition '${partitionKey}' with ${partition.itemIds.length} remaining items`,\n );\n partition.timeout = this.dateTime.createTimeout(() => {\n this.log.trace(\n `Batch partition '${partitionKey}' timed out, flushing...`,\n );\n this.flushPartition(context, partitionKey).catch((error) =>\n this.log.error(\n `Failed to flush partition '${partitionKey}' on timeout after startup`,\n error,\n ),\n );\n }, maxDuration);\n }\n }\n }\n}\n","import {\n $hook,\n $inject,\n createDescriptor,\n Descriptor,\n KIND,\n type Static,\n type TSchema,\n} from \"alepha\";\nimport type { DurationLike } from \"alepha/datetime\";\nimport type { RetryDescriptorOptions } from \"alepha/retry\";\nimport {\n type BatchContext,\n type BatchItemState,\n type BatchItemStatus,\n BatchProvider,\n} from \"../providers/BatchProvider.ts\";\n\n/**\n * Creates a batch processing descriptor for efficient grouping and processing of multiple operations.\n */\nexport const $batch = <TItem extends TSchema, TResponse>(\n options: BatchDescriptorOptions<TItem, TResponse>,\n): BatchDescriptor<TItem, TResponse> =>\n createDescriptor(BatchDescriptor<TItem, TResponse>, options);\n\n// ---------------------------------------------------------------------------------------------------------------------\n\nexport interface BatchDescriptorOptions<\n TItem extends TSchema,\n TResponse = any,\n> {\n /**\n * TypeBox schema for validating each item added to the batch.\n */\n schema: TItem;\n\n /**\n * The batch processing handler function that processes arrays of validated items.\n */\n handler: (items: Static<TItem>[]) => TResponse;\n\n /**\n * Maximum number of items to collect before automatically flushing the batch.\n */\n maxSize?: number;\n\n /**\n * Maximum number of items that can be queued in a single partition.\n * If exceeded, push() will throw an error.\n */\n maxQueueSize?: number;\n\n /**\n * Maximum time to wait before flushing a batch, even if it hasn't reached maxSize.\n */\n maxDuration?: DurationLike;\n\n /**\n * Function to determine partition keys for grouping items into separate batches.\n */\n partitionBy?: (item: Static<TItem>) => string;\n\n /**\n * Maximum number of batch handlers that can execute simultaneously.\n */\n concurrency?: number;\n\n /**\n * Retry configuration for failed batch processing operations.\n */\n retry?: Omit<RetryDescriptorOptions<() => Array<Static<TItem>>>, \"handler\">;\n}\n\n// ---------------------------------------------------------------------------------------------------------------------\n\nexport type { BatchItemState, BatchItemStatus };\n\n// ---------------------------------------------------------------------------------------------------------------------\n\nexport class BatchDescriptor<\n TItem extends TSchema,\n TResponse = any,\n> extends Descriptor<BatchDescriptorOptions<TItem, TResponse>> {\n protected readonly batchProvider = $inject(BatchProvider);\n protected readonly context: BatchContext<Static<TItem>, TResponse>;\n\n constructor(\n ...args: ConstructorParameters<\n typeof Descriptor<BatchDescriptorOptions<TItem, TResponse>>\n >\n ) {\n super(...args);\n this.context = this.batchProvider.createContext(this.alepha, {\n handler: this.options.handler,\n maxSize: this.options.maxSize,\n maxQueueSize: this.options.maxQueueSize,\n maxDuration: this.options.maxDuration,\n partitionBy: this.options.partitionBy,\n concurrency: this.options.concurrency,\n retry: this.options.retry,\n });\n }\n\n /**\n * Pushes an item into the batch and returns immediately with a unique ID.\n * The item will be processed asynchronously with other items when the batch is flushed.\n * Use wait(id) to get the processing result.\n */\n public async push(item: Static<TItem>): Promise<string> {\n // Validate the item against the schema\n const validatedItem = this.alepha.codec.validate(this.options.schema, item);\n return this.batchProvider.push(this.context, validatedItem);\n }\n\n /**\n * Wait for a specific item to be processed and get its result.\n * @param id The item ID returned from push()\n * @returns The processing result\n * @throws If the item doesn't exist or processing failed\n */\n public async wait(id: string): Promise<TResponse> {\n return this.batchProvider.wait(this.context, id);\n }\n\n /**\n * Get the current status of an item.\n * @param id The item ID returned from push()\n * @returns Status information or undefined if item doesn't exist\n */\n public status(\n id: string,\n ):\n | { status: \"pending\" | \"processing\" }\n | { status: \"completed\"; result: TResponse }\n | { status: \"failed\"; error: Error }\n | undefined {\n return this.batchProvider.status(this.context, id);\n }\n\n /**\n * Flush all partitions or a specific partition.\n */\n public async flush(partitionKey?: string): Promise<void> {\n return this.batchProvider.flush(this.context, partitionKey);\n }\n\n /**\n * Clears completed and failed items from memory.\n * Call this periodically in long-running applications to prevent memory leaks.\n *\n * @param status Optional: only clear items with this specific status ('completed' or 'failed')\n * @returns The number of items cleared\n */\n public clearCompleted(status?: \"completed\" | \"failed\"): number {\n return this.batchProvider.clearCompleted(this.context, status);\n }\n\n protected readonly onReady = $hook({\n on: \"ready\",\n handler: async () => {\n await this.batchProvider.markReady(this.context);\n },\n });\n\n protected readonly dispose = $hook({\n on: \"stop\",\n priority: \"first\",\n handler: async () => {\n await this.batchProvider.shutdown(this.context);\n },\n });\n}\n\n$batch[KIND] = BatchDescriptor;\n","import { $module } from \"alepha\";\nimport { $batch } from \"./descriptors/$batch.ts\";\nimport { BatchProvider } from \"./providers/BatchProvider.ts\";\n\n// ---------------------------------------------------------------------------------------------------------------------\n\nexport * from \"./descriptors/$batch.ts\";\nexport * from \"./providers/BatchProvider.ts\";\n\n// ---------------------------------------------------------------------------------------------------------------------\n\n/**\n * This module allows you to group multiple asynchronous operations into a single \"batch,\" which is then processed together.\n * This is an essential pattern for improving performance, reducing I/O, and interacting efficiently with rate-limited APIs or databases.\n *\n * ```ts\n * import { Alepha, $hook, run, t } from \"alepha\";\n * import { $batch } from \"alepha/batch\";\n *\n * class LoggingService {\n * // define the batch processor\n * logBatch = $batch({\n * schema: t.text(),\n * maxSize: 10,\n * maxDuration: [5, \"seconds\"],\n * handler: async (items) => {\n * console.log(`[BATCH LOG] Processing ${items.length} events:`, items);\n * },\n * });\n *\n * // example of how to use it\n * onReady = $hook({\n * on: \"ready\",\n * handler: async () => {\n * // push() returns an ID immediately\n * const id1 = await this.logBatch.push(\"Application started.\");\n * const id2 = await this.logBatch.push(\"User authenticated.\");\n *\n * // optionally wait for processing to complete\n * await this.logBatch.wait(id1);\n *\n * // or check the status\n * const status = this.logBatch.status(id2);\n * console.log(status?.status); // \"pending\" | \"processing\" | \"completed\" | \"failed\"\n * },\n * });\n * }\n * ```\n *\n * @see {@link $batch}\n * @see {@link BatchProvider}\n * @module alepha.batch\n */\nexport const AlephaBatch = $module({\n name: \"alepha.batch\",\n descriptors: [$batch],\n services: [BatchProvider],\n});\n"],"mappings":";;;;;;;;;;;AAgIA,IAAa,gBAAb,MAA2B;CACzB,AAAmB,kCAAe;CAClC,AAAmB,+BAAmBA,iCAAiB;CACvD,AAAmB,oCAAwBC,2BAAc;;;;CAKzD,cACE,UACA,SACgC;AAChC,SAAO;GACL;GACA,4BAAY,IAAI,KAAK;GACrB,4BAAY,IAAI,KAAK;GACrB,gBAAgB,EAAE;GAClB,gBAAgB;GAChB,SAAS;GACT;GACD;;;;;CAMH,AAAU,WACR,SACQ;AACR,SAAO,QAAQ,QAAQ,WAAW;;;;;CAMpC,AAAU,eACR,SACQ;AACR,SAAO,QAAQ,QAAQ,eAAe;;;;;CAMxC,AAAU,eACR,SACc;AACd,SAAO,QAAQ,QAAQ,eAAe,CAAC,GAAG,SAAS;;;;;;;;;CAUrD,KACE,SACA,MACQ;EAER,MAAM,kCAAiB;EAGvB,IAAIC;AACJ,MAAI;AACF,kBAAe,QAAQ,QAAQ,cAC3B,QAAQ,QAAQ,YAAY,KAAK,GACjC;WACG,OAAO;AACd,QAAK,IAAI,KACP,kEACA,EAAE,OAAO,CACV;AACD,kBAAe;;EAIjB,MAAMC,YAA8C;GAClD;GACA;GACA;GACA,QAAQ;GACT;AAKD,OAAK,IAAI,MAAM,yBAAyB;GACtC;GACA;GACA;GACD,CAAC;AAEF,UAAQ,WAAW,IAAI,IAAI,UAAU;AAGrC,MAAI,CAAC,QAAQ,WAAW,IAAI,aAAa,CACvC,SAAQ,WAAW,IAAI,cAAc;GACnC,SAAS,EAAE;GACX,UAAU;GACX,CAAC;EAEJ,MAAM,YAAY,QAAQ,WAAW,IAAI,aAAa;AAGtD,MACE,QAAQ,QAAQ,iBAAiB,UACjC,UAAU,QAAQ,UAAU,QAAQ,QAAQ,aAE5C,OAAM,IAAI,MACR,4CAA4C,aAAa,UAAU,QAAQ,QAAQ,aAAa,GACjG;AAIH,YAAU,QAAQ,KAAK,GAAG;EAE1B,MAAM,UAAU,KAAK,WAAW,QAAQ;EACxC,MAAM,cAAc,KAAK,eAAe,QAAQ;AAIhD,MAAI,QAAQ,SAEV;OAAI,UAAU,QAAQ,UAAU,SAAS;AACvC,SAAK,IAAI,MACP,oBAAoB,aAAa,wBAClC;AACD,SAAK,eAAe,SAAS,aAAa,CAAC,OAAO,UAChD,KAAK,IAAI,MACP,oCAAoC,aAAa,gBACjD,MACD,CACF;cACQ,CAAC,UAAU,WAAW,CAAC,UAAU,SAE1C,WAAU,UAAU,KAAK,SAAS,oBAAoB;AACpD,SAAK,IAAI,MACP,oBAAoB,aAAa,0BAClC;AACD,SAAK,eAAe,SAAS,aAAa,CAAC,OAAO,UAChD,KAAK,IAAI,MACP,oCAAoC,aAAa,eACjD,MACD,CACF;MACA,YAAY;QAIjB,MAAK,IAAI,MACP,gCAAgC,aAAa,wBAAwB,UAAU,QAAQ,OAAO,kBAC/F;AAIH,SAAO;;;;;;;;CAST,MAAM,KACJ,SACA,IACoB;EACpB,MAAM,YAAY,QAAQ,WAAW,IAAI,GAAG;AAC5C,MAAI,CAAC,UACH,OAAM,IAAI,MAAM,iBAAiB,GAAG,aAAa;AAInD,MAAI,UAAU,WAAW,YACvB,QAAO,UAAU;AAEnB,MAAI,UAAU,WAAW,SACvB,OAAM,UAAU;AAIlB,MAAI,CAAC,UAAU,QACb,WAAU,UAAU,IAAI,SAAoB,SAAS,WAAW;AAC9D,aAAU,UAAU;AACpB,aAAU,SAAS;IACnB;AAGJ,SAAO,UAAU;;;;;;;CAQnB,OACE,SACA,IAKY;EACZ,MAAM,YAAY,QAAQ,WAAW,IAAI,GAAG;AAC5C,MAAI,CAAC,UACH;AAGF,MAAI,UAAU,WAAW,YACvB,QAAO;GAAE,QAAQ;GAAa,QAAQ,UAAU;GAAS;AAE3D,MAAI,UAAU,WAAW,SACvB,QAAO;GAAE,QAAQ;GAAU,OAAO,UAAU;GAAQ;AAEtD,SAAO,EAAE,QAAQ,UAAU,QAAQ;;;;;;;;;;CAWrC,eACE,SACA,QACQ;EACR,IAAI,QAAQ;AACZ,OAAK,MAAM,CAAC,IAAI,UAAU,QAAQ,WAChC,KAAI,QACF;OAAI,MAAM,WAAW,QAAQ;AAC3B,YAAQ,WAAW,OAAO,GAAG;AAC7B;;aAEO,MAAM,WAAW,eAAe,MAAM,WAAW,UAAU;AACpE,WAAQ,WAAW,OAAO,GAAG;AAC7B;;AAGJ,SAAO;;;;;CAMT,MAAM,MACJ,SACA,cACe;EACf,MAAMC,WAA4B,EAAE;AACpC,MAAI,cACF;OAAI,QAAQ,WAAW,IAAI,aAAa,CACtC,UAAS,KAAK,KAAK,eAAe,SAAS,aAAa,CAAC;QAG3D,MAAK,MAAM,OAAO,QAAQ,WAAW,MAAM,CACzC,UAAS,KAAK,KAAK,eAAe,SAAS,IAAI,CAAC;AAGpD,QAAM,QAAQ,IAAI,SAAS;;;;;CAM7B,MAAgB,eACd,SACA,cACA,OACe;EACf,MAAM,YAAY,QAAQ,WAAW,IAAI,aAAa;AACtD,MAAI,CAAC,aAAa,UAAU,QAAQ,WAAW,GAAG;AAChD,WAAQ,WAAW,OAAO,aAAa;AACvC;;AAIF,YAAU,SAAS,OAAO;AAC1B,YAAU,UAAU;EACpB,MAAM,cACJ,UAAU,SACN,KAAK,IAAI,OAAO,UAAU,QAAQ,OAAO,GACzC,UAAU,QAAQ;EACxB,MAAM,mBAAmB,UAAU,QAAQ,OAAO,GAAG,YAAY;AAGjE,YAAU,WAAW;EAGrB,MAAMC,iBAA0B,EAAE;AAClC,OAAK,MAAM,MAAM,kBAAkB;GACjC,MAAM,YAAY,QAAQ,WAAW,IAAI,GAAG;AAC5C,OAAI,WAAW;AACb,cAAU,SAAS;AACnB,mBAAe,KAAK,UAAU,KAAK;;;EAIvC,MAAM,cAAc,KAAK,eAAe,QAAQ;EAChD,MAAM,cAAc,KAAK,eAAe,QAAQ;AAGhD,SAAO,QAAQ,eAAe,UAAU,aAAa;AACnD,QAAK,IAAI,MACP,+DACD;AAED,SAAM,QAAQ,KAAK,QAAQ,eAAe,KAAK,OAAO,GAAG,QAAQ,CAAC;;EAGpE,MAAM,UAAU,QAAQ,eAAqB;AAC7C,UAAQ,eAAe,KAAK,QAAQ;EACpC,IAAIC;AACJ,MAAI;AACF,YAAS,MAAM,QAAQ,OAAO,QAAQ,UAEpC,QAAQ,iBACJ,QAAQ,QAAQ,QAAQ,eAAe,GACvC,KAAK,cAAc,MACjB;IACE,GAAG,QAAQ,QAAQ;IACnB,SAAS,QAAQ,QAAQ;IAC1B,EACD,eACD,CACN;AAGD,QAAK,MAAM,MAAM,kBAAkB;IACjC,MAAM,YAAY,QAAQ,WAAW,IAAI,GAAG;AAC5C,QAAI,WAAW;AACb,eAAU,SAAS;AACnB,eAAU,SAAS;AAEnB,eAAU,UAAU,OAAO;;;WAGxB,OAAO;AACd,QAAK,IAAI,MAAM,wBAAwB,MAAM;AAG7C,QAAK,MAAM,MAAM,kBAAkB;IACjC,MAAM,YAAY,QAAQ,WAAW,IAAI,GAAG;AAC5C,QAAI,WAAW;AACb,eAAU,SAAS;AACnB,eAAU,QAAQ;AAElB,eAAU,SAAS,MAAe;;;YAG9B;AACR,WAAQ,SAAS;AACjB,WAAQ,iBAAiB,QAAQ,eAAe,QAC7C,OAAO,OAAO,QAChB;GAGD,MAAM,mBAAmB,QAAQ,WAAW,IAAI,aAAa;AAC7D,OAAI,kBAAkB,YAAY,iBAAiB,QAAQ,WAAW,EACpE,SAAQ,WAAW,OAAO,aAAa;YAC9B,kBAAkB;AAE3B,qBAAiB,WAAW;AAG5B,QAAI,iBAAiB,QAAQ,SAAS,KAAK,CAAC,iBAAiB,QAC3D,kBAAiB,UAAU,KAAK,SAAS,oBAAoB;AAC3D,UAAK,IAAI,MACP,oBAAoB,aAAa,0BAClC;AACD,UAAK,eAAe,SAAS,aAAa,CAAC,OAAO,UAChD,KAAK,IAAI,MACP,oCAAoC,aAAa,eACjD,MACD,CACF;OACA,YAAY;;;;;;;;CAUvB,MAAM,UACJ,SACe;AACf,OAAK,IAAI,MACP,sEACD;AACD,UAAQ,UAAU;AAClB,QAAM,KAAK,gBAAgB,QAAQ;;;;;CAMrC,MAAM,SACJ,SACe;AACf,OAAK,IAAI,MAAM,yDAAyD;AACxE,UAAQ,iBAAiB;AACzB,QAAM,KAAK,MAAM,QAAQ;AACzB,OAAK,IAAI,MAAM,+BAA+B;;;;;;;CAQhD,MAAgB,gBACd,SACe;EACf,MAAM,UAAU,KAAK,WAAW,QAAQ;EACxC,MAAM,cAAc,KAAK,eAAe,QAAQ;AAEhD,OAAK,MAAM,CAAC,cAAc,cAAc,QAAQ,WAAW,SAAS,EAAE;AACpE,OAAI,UAAU,QAAQ,WAAW,EAC/B;AAGF,QAAK,IAAI,MACP,sCAAsC,aAAa,SAAS,UAAU,QAAQ,OAAO,iBACtF;AAGD,UAAO,UAAU,QAAQ,UAAU,SAAS;AAC1C,SAAK,IAAI,MACP,cAAc,aAAa,QAAQ,UAAU,QAAQ,OAAO,4BAA4B,QAAQ,KACjG;AACD,UAAM,KAAK,eAAe,SAAS,cAAc,QAAQ;;AAI3D,OACE,UAAU,QAAQ,SAAS,KAC3B,CAAC,UAAU,WACX,CAAC,UAAU,UACX;AACA,SAAK,IAAI,MACP,mCAAmC,aAAa,SAAS,UAAU,QAAQ,OAAO,kBACnF;AACD,cAAU,UAAU,KAAK,SAAS,oBAAoB;AACpD,UAAK,IAAI,MACP,oBAAoB,aAAa,0BAClC;AACD,UAAK,eAAe,SAAS,aAAa,CAAC,OAAO,UAChD,KAAK,IAAI,MACP,8BAA8B,aAAa,6BAC3C,MACD,CACF;OACA,YAAY;;;;;;;;;;;ACzjBvB,MAAa,UACX,yCAEiB,iBAAmC,QAAQ;AAwD9D,IAAa,kBAAb,cAGUC,kBAAqD;CAC7D,AAAmB,oCAAwB,cAAc;CACzD,AAAmB;CAEnB,YACE,GAAG,MAGH;AACA,QAAM,GAAG,KAAK;AACd,OAAK,UAAU,KAAK,cAAc,cAAc,KAAK,QAAQ;GAC3D,SAAS,KAAK,QAAQ;GACtB,SAAS,KAAK,QAAQ;GACtB,cAAc,KAAK,QAAQ;GAC3B,aAAa,KAAK,QAAQ;GAC1B,aAAa,KAAK,QAAQ;GAC1B,aAAa,KAAK,QAAQ;GAC1B,OAAO,KAAK,QAAQ;GACrB,CAAC;;;;;;;CAQJ,MAAa,KAAK,MAAsC;EAEtD,MAAM,gBAAgB,KAAK,OAAO,MAAM,SAAS,KAAK,QAAQ,QAAQ,KAAK;AAC3E,SAAO,KAAK,cAAc,KAAK,KAAK,SAAS,cAAc;;;;;;;;CAS7D,MAAa,KAAK,IAAgC;AAChD,SAAO,KAAK,cAAc,KAAK,KAAK,SAAS,GAAG;;;;;;;CAQlD,AAAO,OACL,IAKY;AACZ,SAAO,KAAK,cAAc,OAAO,KAAK,SAAS,GAAG;;;;;CAMpD,MAAa,MAAM,cAAsC;AACvD,SAAO,KAAK,cAAc,MAAM,KAAK,SAAS,aAAa;;;;;;;;;CAU7D,AAAO,eAAe,QAAyC;AAC7D,SAAO,KAAK,cAAc,eAAe,KAAK,SAAS,OAAO;;CAGhE,AAAmB,4BAAgB;EACjC,IAAI;EACJ,SAAS,YAAY;AACnB,SAAM,KAAK,cAAc,UAAU,KAAK,QAAQ;;EAEnD,CAAC;CAEF,AAAmB,4BAAgB;EACjC,IAAI;EACJ,UAAU;EACV,SAAS,YAAY;AACnB,SAAM,KAAK,cAAc,SAAS,KAAK,QAAQ;;EAElD,CAAC;;AAGJ,OAAOC,eAAQ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACzHf,MAAa,kCAAsB;CACjC,MAAM;CACN,aAAa,CAAC,OAAO;CACrB,UAAU,CAAC,cAAc;CAC1B,CAAC"}