@atproto/bsky 0.0.80 → 0.0.82

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/dist/data-plane/client.d.ts.map +1 -1
  3. package/dist/data-plane/client.js +2 -1
  4. package/dist/data-plane/client.js.map +1 -1
  5. package/dist/data-plane/server/db/migrations/20240829T211238293Z-simplify-actor-sync.d.ts +4 -0
  6. package/dist/data-plane/server/db/migrations/20240829T211238293Z-simplify-actor-sync.d.ts.map +1 -0
  7. package/dist/data-plane/server/db/migrations/20240829T211238293Z-simplify-actor-sync.js +26 -0
  8. package/dist/data-plane/server/db/migrations/20240829T211238293Z-simplify-actor-sync.js.map +1 -0
  9. package/dist/data-plane/server/db/migrations/index.d.ts +1 -0
  10. package/dist/data-plane/server/db/migrations/index.d.ts.map +1 -1
  11. package/dist/data-plane/server/db/migrations/index.js +2 -1
  12. package/dist/data-plane/server/db/migrations/index.js.map +1 -1
  13. package/dist/data-plane/server/db/tables/actor-sync.d.ts +0 -3
  14. package/dist/data-plane/server/db/tables/actor-sync.d.ts.map +1 -1
  15. package/dist/data-plane/server/db/tables/actor-sync.js.map +1 -1
  16. package/dist/data-plane/server/indexing/index.d.ts +2 -7
  17. package/dist/data-plane/server/indexing/index.d.ts.map +1 -1
  18. package/dist/data-plane/server/indexing/index.js +4 -21
  19. package/dist/data-plane/server/indexing/index.js.map +1 -1
  20. package/dist/data-plane/server/subscription.d.ts +26 -0
  21. package/dist/data-plane/server/subscription.d.ts.map +1 -0
  22. package/dist/data-plane/server/subscription.js +115 -0
  23. package/dist/data-plane/server/subscription.js.map +1 -0
  24. package/dist/lexicon/lexicons.d.ts +110 -3
  25. package/dist/lexicon/lexicons.d.ts.map +1 -1
  26. package/dist/lexicon/lexicons.js +117 -6
  27. package/dist/lexicon/lexicons.js.map +1 -1
  28. package/dist/lexicon/types/com/atproto/repo/applyWrites.d.ts +38 -4
  29. package/dist/lexicon/types/com/atproto/repo/applyWrites.d.ts.map +1 -1
  30. package/dist/lexicon/types/com/atproto/repo/applyWrites.js +31 -1
  31. package/dist/lexicon/types/com/atproto/repo/applyWrites.js.map +1 -1
  32. package/dist/lexicon/types/com/atproto/repo/createRecord.d.ts +5 -2
  33. package/dist/lexicon/types/com/atproto/repo/createRecord.d.ts.map +1 -1
  34. package/dist/lexicon/types/com/atproto/repo/defs.d.ts +12 -0
  35. package/dist/lexicon/types/com/atproto/repo/defs.d.ts.map +1 -0
  36. package/dist/lexicon/types/com/atproto/repo/defs.js +16 -0
  37. package/dist/lexicon/types/com/atproto/repo/defs.js.map +1 -0
  38. package/dist/lexicon/types/com/atproto/repo/deleteRecord.d.ts +14 -2
  39. package/dist/lexicon/types/com/atproto/repo/deleteRecord.d.ts.map +1 -1
  40. package/dist/lexicon/types/com/atproto/repo/putRecord.d.ts +5 -2
  41. package/dist/lexicon/types/com/atproto/repo/putRecord.d.ts.map +1 -1
  42. package/package.json +6 -5
  43. package/src/data-plane/client.ts +4 -1
  44. package/src/data-plane/server/db/migrations/20240829T211238293Z-simplify-actor-sync.ts +23 -0
  45. package/src/data-plane/server/db/migrations/index.ts +1 -0
  46. package/src/data-plane/server/db/tables/actor-sync.ts +0 -3
  47. package/src/data-plane/server/indexing/index.ts +4 -25
  48. package/src/data-plane/server/subscription.ts +104 -0
  49. package/src/lexicon/lexicons.ts +117 -6
  50. package/src/lexicon/types/com/atproto/repo/applyWrites.ts +70 -3
  51. package/src/lexicon/types/com/atproto/repo/createRecord.ts +5 -2
  52. package/src/lexicon/types/com/atproto/repo/defs.ts +25 -0
  53. package/src/lexicon/types/com/atproto/repo/deleteRecord.ts +13 -1
  54. package/src/lexicon/types/com/atproto/repo/putRecord.ts +5 -2
  55. package/tests/data-plane/indexing.test.ts +1 -1
  56. package/tests/data-plane/{subscription/repo.test.ts → subscription.test.ts} +4 -9
  57. package/tests/views/actor-search.test.ts +1 -1
  58. package/dist/data-plane/server/subscription/index.d.ts +0 -33
  59. package/dist/data-plane/server/subscription/index.d.ts.map +0 -1
  60. package/dist/data-plane/server/subscription/index.js +0 -341
  61. package/dist/data-plane/server/subscription/index.js.map +0 -1
  62. package/dist/data-plane/server/subscription/util.d.ts +0 -65
  63. package/dist/data-plane/server/subscription/util.d.ts.map +0 -1
  64. package/dist/data-plane/server/subscription/util.js +0 -215
  65. package/dist/data-plane/server/subscription/util.js.map +0 -1
  66. package/src/data-plane/server/subscription/index.ts +0 -352
  67. package/src/data-plane/server/subscription/util.ts +0 -156
  68. package/tests/data-plane/subscription/util.test.ts +0 -185
@@ -1,352 +0,0 @@
1
- import assert from 'node:assert'
2
- import { CID } from 'multiformats/cid'
3
- import { AtUri } from '@atproto/syntax'
4
- import { Subscription } from '@atproto/xrpc-server'
5
- import { cborDecode, handleAllSettledErrors } from '@atproto/common'
6
- import { ValidationError } from '@atproto/lexicon'
7
- import { IdResolver } from '@atproto/identity'
8
- import {
9
- WriteOpAction,
10
- readCarWithRoot,
11
- cborToLexRecord,
12
- def,
13
- Commit,
14
- } from '@atproto/repo'
15
- import { ids, lexicons } from '../../../lexicon/lexicons'
16
- import { OutputSchema as Message } from '../../../lexicon/types/com/atproto/sync/subscribeRepos'
17
- import * as message from '../../../lexicon/types/com/atproto/sync/subscribeRepos'
18
- import { subLogger as log } from '../../../logger'
19
- import { IndexingService } from '../indexing'
20
- import { Database } from '../db'
21
- import {
22
- ConsecutiveItem,
23
- ConsecutiveList,
24
- PartitionedQueue,
25
- ProcessableMessage,
26
- loggableMessage,
27
- } from './util'
28
- import { BackgroundQueue } from '../background'
29
-
30
- export class RepoSubscription {
31
- ac = new AbortController()
32
- running: Promise<void> | undefined
33
- cursor = 0
34
- seenSeq: number | null = null
35
- repoQueue = new PartitionedQueue({ concurrency: Infinity })
36
- consecutive = new ConsecutiveList<number>()
37
- background: BackgroundQueue
38
- indexingSvc: IndexingService
39
-
40
- constructor(
41
- private opts: {
42
- service: string
43
- db: Database
44
- idResolver: IdResolver
45
- background: BackgroundQueue
46
- },
47
- ) {
48
- this.background = new BackgroundQueue(this.opts.db)
49
- this.indexingSvc = new IndexingService(
50
- this.opts.db,
51
- this.opts.idResolver,
52
- this.background,
53
- )
54
- }
55
-
56
- run() {
57
- if (this.running) return
58
- this.ac = new AbortController()
59
- this.repoQueue = new PartitionedQueue({ concurrency: Infinity })
60
- this.consecutive = new ConsecutiveList<number>()
61
- this.running = this.process()
62
- .catch((err) => {
63
- if (err.name !== 'AbortError') {
64
- // allow this to cause an unhandled rejection, let deployment handle the crash.
65
- log.error({ err }, 'subscription crashed')
66
- throw err
67
- }
68
- })
69
- .finally(() => (this.running = undefined))
70
- }
71
-
72
- private async process() {
73
- const sub = this.getSubscription()
74
- for await (const msg of sub) {
75
- const details = getMessageDetails(msg)
76
- if ('info' in details) {
77
- // These messages are not sequenced, we just log them and carry on
78
- log.warn(
79
- { provider: this.opts.service, message: loggableMessage(msg) },
80
- `sub ${details.info ? 'info' : 'unknown'} message`,
81
- )
82
- continue
83
- }
84
- const item = this.consecutive.push(details.seq)
85
- this.repoQueue.add(details.repo, async () => {
86
- await this.handleMessage(item, details)
87
- })
88
- this.seenSeq = details.seq
89
- await this.repoQueue.main.onEmpty() // backpressure
90
- }
91
- }
92
-
93
- private async handleMessage(
94
- item: ConsecutiveItem<number>,
95
- envelope: Envelope,
96
- ) {
97
- const msg = envelope.message
98
- try {
99
- if (message.isCommit(msg)) {
100
- await this.handleCommit(msg)
101
- } else if (message.isHandle(msg)) {
102
- await this.handleUpdateHandle(msg)
103
- } else if (message.isIdentity(msg)) {
104
- await this.handleIdentityEvt(msg)
105
- } else if (message.isAccount(msg)) {
106
- await this.handleAccountEvt(msg)
107
- } else if (message.isTombstone(msg)) {
108
- // Ignore tombstones
109
- } else if (message.isMigrate(msg)) {
110
- // Ignore migrations
111
- } else {
112
- const exhaustiveCheck: never = msg
113
- throw new Error(`Unhandled message type: ${exhaustiveCheck['$type']}`)
114
- }
115
- } catch (err) {
116
- // We log messages we can't process and move on:
117
- // otherwise the cursor would get stuck on a poison message.
118
- log.error(
119
- { err, message: loggableMessage(msg) },
120
- 'indexer message processing error',
121
- )
122
- } finally {
123
- const latest = item.complete().at(-1)
124
- if (latest !== undefined) {
125
- this.cursor = latest
126
- }
127
- }
128
- }
129
-
130
- private async handleCommit(msg: message.Commit) {
131
- const indexRecords = async () => {
132
- const { root, rootCid, ops } = await getOps(msg)
133
- if (msg.tooBig) {
134
- await this.indexingSvc.indexRepo(msg.repo, rootCid.toString())
135
- await this.indexingSvc.setCommitLastSeen(root, msg)
136
- return
137
- }
138
- if (msg.rebase) {
139
- const needsReindex =
140
- await this.indexingSvc.checkCommitNeedsIndexing(root)
141
- if (needsReindex) {
142
- await this.indexingSvc.indexRepo(msg.repo, rootCid.toString())
143
- }
144
- await this.indexingSvc.setCommitLastSeen(root, msg)
145
- return
146
- }
147
- for (const op of ops) {
148
- if (op.action === WriteOpAction.Delete) {
149
- await this.indexingSvc.deleteRecord(op.uri)
150
- } else {
151
- try {
152
- await this.indexingSvc.indexRecord(
153
- op.uri,
154
- op.cid,
155
- op.record,
156
- op.action, // create or update
157
- msg.time,
158
- )
159
- } catch (err) {
160
- if (err instanceof ValidationError) {
161
- log.warn(
162
- {
163
- did: msg.repo,
164
- commit: msg.commit.toString(),
165
- uri: op.uri.toString(),
166
- cid: op.cid.toString(),
167
- },
168
- 'skipping indexing of invalid record',
169
- )
170
- } else {
171
- log.error(
172
- {
173
- err,
174
- did: msg.repo,
175
- commit: msg.commit.toString(),
176
- uri: op.uri.toString(),
177
- cid: op.cid.toString(),
178
- },
179
- 'skipping indexing due to error processing record',
180
- )
181
- }
182
- }
183
- }
184
- }
185
- await this.indexingSvc.setCommitLastSeen(root, msg)
186
- }
187
- const results = await Promise.allSettled([
188
- indexRecords(),
189
- this.indexingSvc.indexHandle(msg.repo, msg.time),
190
- ])
191
- handleAllSettledErrors(results)
192
- }
193
-
194
- private async handleUpdateHandle(msg: message.Handle) {
195
- await this.indexingSvc.indexHandle(msg.did, msg.time, true)
196
- }
197
-
198
- private async handleIdentityEvt(msg: message.Identity) {
199
- await this.indexingSvc.indexHandle(msg.did, msg.time, true)
200
- }
201
-
202
- private async handleAccountEvt(msg: message.Account) {
203
- if (msg.active === false && msg.status === 'deleted') {
204
- await this.indexingSvc.deleteActor(msg.did)
205
- } else {
206
- await this.indexingSvc.updateActorStatus(msg.did, msg.active, msg.status)
207
- }
208
- }
209
-
210
- private getSubscription() {
211
- return new Subscription({
212
- service: this.opts.service,
213
- method: ids.ComAtprotoSyncSubscribeRepos,
214
- signal: this.ac.signal,
215
- getParams: async () => {
216
- return { cursor: this.cursor }
217
- },
218
- onReconnectError: (err, reconnects, initial) => {
219
- log.warn({ err, reconnects, initial }, 'sub reconnect')
220
- },
221
- validate: (value) => {
222
- try {
223
- return lexicons.assertValidXrpcMessage<Message>(
224
- ids.ComAtprotoSyncSubscribeRepos,
225
- value,
226
- )
227
- } catch (err) {
228
- log.warn(
229
- {
230
- err,
231
- seq: ifNumber(value?.['seq']),
232
- repo: ifString(value?.['repo']),
233
- commit: ifString(value?.['commit']?.toString()),
234
- time: ifString(value?.['time']),
235
- provider: this.opts.service,
236
- },
237
- 'ingester sub skipped invalid message',
238
- )
239
- }
240
- },
241
- })
242
- }
243
-
244
- async destroy() {
245
- this.ac.abort()
246
- await this.running
247
- await this.repoQueue.destroy()
248
- await this.background.processAll()
249
- }
250
- }
251
-
252
- type Envelope = {
253
- repo: string
254
- message: ProcessableMessage
255
- }
256
-
257
- function ifString(val: unknown): string | undefined {
258
- return typeof val === 'string' ? val : undefined
259
- }
260
-
261
- function ifNumber(val: unknown): number | undefined {
262
- return typeof val === 'number' ? val : undefined
263
- }
264
-
265
- function getMessageDetails(msg: Message):
266
- | { info: message.Info | null }
267
- | {
268
- seq: number
269
- repo: string
270
- message: ProcessableMessage
271
- } {
272
- if (message.isCommit(msg)) {
273
- return { seq: msg.seq, repo: msg.repo, message: msg }
274
- } else if (message.isHandle(msg)) {
275
- return { seq: msg.seq, repo: msg.did, message: msg }
276
- } else if (message.isIdentity(msg)) {
277
- return { seq: msg.seq, repo: msg.did, message: msg }
278
- } else if (message.isAccount(msg)) {
279
- return { seq: msg.seq, repo: msg.did, message: msg }
280
- } else if (message.isMigrate(msg)) {
281
- return { seq: msg.seq, repo: msg.did, message: msg }
282
- } else if (message.isTombstone(msg)) {
283
- return { seq: msg.seq, repo: msg.did, message: msg }
284
- } else if (message.isInfo(msg)) {
285
- return { info: msg }
286
- }
287
- return { info: null }
288
- }
289
-
290
- async function getOps(
291
- msg: message.Commit,
292
- ): Promise<{ root: Commit; rootCid: CID; ops: PreparedWrite[] }> {
293
- const car = await readCarWithRoot(msg.blocks as Uint8Array)
294
- const rootBytes = car.blocks.get(car.root)
295
- assert(rootBytes, 'Missing commit block in car slice')
296
-
297
- const root = def.commit.schema.parse(cborDecode(rootBytes))
298
- const ops: PreparedWrite[] = msg.ops.map((op) => {
299
- const [collection, rkey] = op.path.split('/')
300
- assert(collection && rkey)
301
- if (
302
- op.action === WriteOpAction.Create ||
303
- op.action === WriteOpAction.Update
304
- ) {
305
- assert(op.cid)
306
- const record = car.blocks.get(op.cid)
307
- assert(record)
308
- return {
309
- action:
310
- op.action === WriteOpAction.Create
311
- ? WriteOpAction.Create
312
- : WriteOpAction.Update,
313
- cid: op.cid,
314
- record: cborToLexRecord(record),
315
- blobs: [],
316
- uri: AtUri.make(msg.repo, collection, rkey),
317
- }
318
- } else if (op.action === WriteOpAction.Delete) {
319
- return {
320
- action: WriteOpAction.Delete,
321
- uri: AtUri.make(msg.repo, collection, rkey),
322
- }
323
- } else {
324
- throw new Error(`Unknown repo op action: ${op.action}`)
325
- }
326
- })
327
-
328
- return { root, rootCid: car.root, ops }
329
- }
330
-
331
- type PreparedCreate = {
332
- action: WriteOpAction.Create
333
- uri: AtUri
334
- cid: CID
335
- record: Record<string, unknown>
336
- blobs: CID[] // differs from similar type in pds
337
- }
338
-
339
- type PreparedUpdate = {
340
- action: WriteOpAction.Update
341
- uri: AtUri
342
- cid: CID
343
- record: Record<string, unknown>
344
- blobs: CID[] // differs from similar type in pds
345
- }
346
-
347
- type PreparedDelete = {
348
- action: WriteOpAction.Delete
349
- uri: AtUri
350
- }
351
-
352
- type PreparedWrite = PreparedCreate | PreparedUpdate | PreparedDelete
@@ -1,156 +0,0 @@
1
- import assert from 'node:assert'
2
- import PQueue from 'p-queue'
3
- import { OutputSchema as RepoMessage } from '../../../lexicon/types/com/atproto/sync/subscribeRepos'
4
- import * as message from '../../../lexicon/types/com/atproto/sync/subscribeRepos'
5
-
6
- // A queue with arbitrarily many partitions, each processing work sequentially.
7
- // Partitions are created lazily and taken out of memory when they go idle.
8
- export class PartitionedQueue {
9
- main: PQueue
10
- partitions = new Map<string, PQueue>()
11
-
12
- constructor(opts: { concurrency: number }) {
13
- this.main = new PQueue({ concurrency: opts.concurrency })
14
- }
15
-
16
- async add(partitionId: string, task: () => Promise<void>) {
17
- if (this.main.isPaused) return
18
- return this.main.add(() => {
19
- return this.getPartition(partitionId).add(task)
20
- })
21
- }
22
-
23
- async destroy() {
24
- this.main.pause()
25
- this.main.clear()
26
- this.partitions.forEach((p) => p.clear())
27
- await this.main.onIdle() // All in-flight work completes
28
- }
29
-
30
- private getPartition(partitionId: string) {
31
- let partition = this.partitions.get(partitionId)
32
- if (!partition) {
33
- partition = new PQueue({ concurrency: 1 })
34
- partition.once('idle', () => this.partitions.delete(partitionId))
35
- this.partitions.set(partitionId, partition)
36
- }
37
- return partition
38
- }
39
- }
40
-
41
- export class LatestQueue {
42
- queue = new PQueue({ concurrency: 1 })
43
-
44
- async add(task: () => Promise<void>) {
45
- if (this.queue.isPaused) return
46
- this.queue.clear() // Only queue the latest task, invalidate any previous ones
47
- return this.queue.add(task)
48
- }
49
-
50
- async destroy() {
51
- this.queue.pause()
52
- this.queue.clear()
53
- await this.queue.onIdle() // All in-flight work completes
54
- }
55
- }
56
-
57
- /**
58
- * Add items to a list, and mark those items as
59
- * completed. Upon item completion, get list of consecutive
60
- * items completed at the head of the list. Example:
61
- *
62
- * const consecutive = new ConsecutiveList<number>()
63
- * const item1 = consecutive.push(1)
64
- * const item2 = consecutive.push(2)
65
- * const item3 = consecutive.push(3)
66
- * item2.complete() // []
67
- * item1.complete() // [1, 2]
68
- * item3.complete() // [3]
69
- *
70
- */
71
- export class ConsecutiveList<T> {
72
- list: ConsecutiveItem<T>[] = []
73
-
74
- push(value: T) {
75
- const item = new ConsecutiveItem<T>(this, value)
76
- this.list.push(item)
77
- return item
78
- }
79
-
80
- complete(): T[] {
81
- let i = 0
82
- while (this.list[i]?.isComplete) {
83
- i += 1
84
- }
85
- return this.list.splice(0, i).map((item) => item.value)
86
- }
87
- }
88
-
89
- export class ConsecutiveItem<T> {
90
- isComplete = false
91
- constructor(
92
- private consecutive: ConsecutiveList<T>,
93
- public value: T,
94
- ) {}
95
-
96
- complete() {
97
- this.isComplete = true
98
- return this.consecutive.complete()
99
- }
100
- }
101
-
102
- export class PerfectMap<K, V> extends Map<K, V> {
103
- get(key: K): V {
104
- const val = super.get(key)
105
- assert(val !== undefined, `Key not found in PerfectMap: ${key}`)
106
- return val
107
- }
108
- }
109
-
110
- // These are the message types that have a sequence number and a repo
111
- export type ProcessableMessage =
112
- | message.Commit
113
- | message.Handle
114
- | message.Identity
115
- | message.Migrate
116
- | message.Tombstone
117
-
118
- export function loggableMessage(msg: RepoMessage) {
119
- if (message.isCommit(msg)) {
120
- const { seq, rebase, prev, repo, commit, time, tooBig, blobs } = msg
121
- return {
122
- $type: msg.$type,
123
- seq,
124
- rebase,
125
- prev: prev?.toString(),
126
- repo,
127
- commit: commit.toString(),
128
- time,
129
- tooBig,
130
- hasBlobs: blobs.length > 0,
131
- }
132
- } else if (message.isHandle(msg)) {
133
- return msg
134
- } else if (message.isIdentity(msg)) {
135
- return msg
136
- } else if (message.isAccount(msg)) {
137
- return msg
138
- } else if (message.isMigrate(msg)) {
139
- return msg
140
- } else if (message.isTombstone(msg)) {
141
- return msg
142
- } else if (message.isInfo(msg)) {
143
- return msg
144
- }
145
- return msg
146
- }
147
-
148
- export function jitter(maxMs) {
149
- return Math.round((Math.random() - 0.5) * maxMs * 2)
150
- }
151
-
152
- export function strToInt(str: string) {
153
- const int = parseInt(str, 10)
154
- assert(!isNaN(int), 'string could not be parsed to an integer')
155
- return int
156
- }
@@ -1,185 +0,0 @@
1
- import { wait } from '@atproto/common'
2
- import { randomStr } from '@atproto/crypto'
3
- import {
4
- ConsecutiveList,
5
- LatestQueue,
6
- PartitionedQueue,
7
- } from '../../../src/data-plane/server/subscription/util'
8
-
9
- describe('subscription utils', () => {
10
- describe('ConsecutiveList', () => {
11
- it('tracks consecutive complete items.', () => {
12
- const consecutive = new ConsecutiveList<number>()
13
- // add items
14
- const item1 = consecutive.push(1)
15
- const item2 = consecutive.push(2)
16
- const item3 = consecutive.push(3)
17
- expect(item1.isComplete).toEqual(false)
18
- expect(item2.isComplete).toEqual(false)
19
- expect(item3.isComplete).toEqual(false)
20
- // complete items out of order
21
- expect(consecutive.list.length).toBe(3)
22
- expect(item2.complete()).toEqual([])
23
- expect(item2.isComplete).toEqual(true)
24
- expect(consecutive.list.length).toBe(3)
25
- expect(item1.complete()).toEqual([1, 2])
26
- expect(item1.isComplete).toEqual(true)
27
- expect(consecutive.list.length).toBe(1)
28
- expect(item3.complete()).toEqual([3])
29
- expect(consecutive.list.length).toBe(0)
30
- expect(item3.isComplete).toEqual(true)
31
- })
32
- })
33
-
34
- describe('LatestQueue', () => {
35
- it('only performs most recently queued item.', async () => {
36
- const latest = new LatestQueue()
37
- const complete: number[] = []
38
- latest.add(async () => {
39
- await wait(1)
40
- complete.push(1)
41
- })
42
- latest.add(async () => {
43
- await wait(1)
44
- complete.push(2)
45
- })
46
- latest.add(async () => {
47
- await wait(1)
48
- complete.push(3)
49
- })
50
- latest.add(async () => {
51
- await wait(1)
52
- complete.push(4)
53
- })
54
- await latest.queue.onIdle()
55
- expect(complete).toEqual([1, 4]) // skip 2, 3
56
- latest.add(async () => {
57
- await wait(1)
58
- complete.push(5)
59
- })
60
- latest.add(async () => {
61
- await wait(1)
62
- complete.push(6)
63
- })
64
- await latest.queue.onIdle()
65
- expect(complete).toEqual([1, 4, 5, 6])
66
- })
67
-
68
- it('stops processing queued messages on destroy.', async () => {
69
- const latest = new LatestQueue()
70
- const complete: number[] = []
71
- latest.add(async () => {
72
- await wait(1)
73
- complete.push(1)
74
- })
75
- latest.add(async () => {
76
- await wait(1)
77
- complete.push(2)
78
- })
79
- const destroyed = latest.destroy()
80
- latest.add(async () => {
81
- await wait(1)
82
- complete.push(3)
83
- })
84
- await destroyed
85
- expect(complete).toEqual([1]) // 2 was cleared, 3 was after destroy
86
- // show that waiting on destroyed above was already enough to reflect all complete items
87
- await latest.queue.onIdle()
88
- expect(complete).toEqual([1])
89
- })
90
- })
91
-
92
- describe('PartitionedQueue', () => {
93
- it('performs work in parallel across partitions, serial within a partition.', async () => {
94
- const partitioned = new PartitionedQueue({ concurrency: Infinity })
95
- const complete: number[] = []
96
- // partition 1 items start slow but get faster: slow should still complete first.
97
- partitioned.add('1', async () => {
98
- await wait(30)
99
- complete.push(11)
100
- })
101
- partitioned.add('1', async () => {
102
- await wait(20)
103
- complete.push(12)
104
- })
105
- partitioned.add('1', async () => {
106
- await wait(1)
107
- complete.push(13)
108
- })
109
- expect(partitioned.partitions.size).toEqual(1)
110
- // partition 2 items complete quickly except the last, which is slowest of all events.
111
- partitioned.add('2', async () => {
112
- await wait(1)
113
- complete.push(21)
114
- })
115
- partitioned.add('2', async () => {
116
- await wait(1)
117
- complete.push(22)
118
- })
119
- partitioned.add('2', async () => {
120
- await wait(1)
121
- complete.push(23)
122
- })
123
- partitioned.add('2', async () => {
124
- await wait(60)
125
- complete.push(24)
126
- })
127
- expect(partitioned.partitions.size).toEqual(2)
128
- await partitioned.main.onIdle()
129
- expect(complete).toEqual([21, 22, 23, 11, 12, 13, 24])
130
- expect(partitioned.partitions.size).toEqual(0)
131
- })
132
-
133
- it('limits overall concurrency.', async () => {
134
- const partitioned = new PartitionedQueue({ concurrency: 1 })
135
- const complete: number[] = []
136
- // if concurrency were not constrained, partition 1 would complete all items
137
- // before any items from partition 2. since it is constrained, the work is complete in the order added.
138
- partitioned.add('1', async () => {
139
- await wait(1)
140
- complete.push(11)
141
- })
142
- partitioned.add('2', async () => {
143
- await wait(10)
144
- complete.push(21)
145
- })
146
- partitioned.add('1', async () => {
147
- await wait(1)
148
- complete.push(12)
149
- })
150
- partitioned.add('2', async () => {
151
- await wait(10)
152
- complete.push(22)
153
- })
154
- // only partition 1 exists so far due to the concurrency
155
- expect(partitioned.partitions.size).toEqual(1)
156
- await partitioned.main.onIdle()
157
- expect(complete).toEqual([11, 21, 12, 22])
158
- expect(partitioned.partitions.size).toEqual(0)
159
- })
160
-
161
- it('settles with many items.', async () => {
162
- const partitioned = new PartitionedQueue({ concurrency: 100 })
163
- const complete: { partition: string; id: number }[] = []
164
- const partitions = new Set<string>()
165
- for (let i = 0; i < 500; ++i) {
166
- const partition = randomStr(1, 'base16').slice(0, 1)
167
- partitions.add(partition)
168
- partitioned.add(partition, async () => {
169
- await wait((i % 2) * 2)
170
- complete.push({ partition, id: i })
171
- })
172
- }
173
- expect(partitioned.partitions.size).toEqual(partitions.size)
174
- await partitioned.main.onIdle()
175
- expect(complete.length).toEqual(500)
176
- for (const partition of partitions) {
177
- const ids = complete
178
- .filter((item) => item.partition === partition)
179
- .map((item) => item.id)
180
- expect(ids).toEqual([...ids].sort((a, b) => a - b))
181
- }
182
- expect(partitioned.partitions.size).toEqual(0)
183
- })
184
- })
185
- })