@atproto/pds 0.4.176 → 0.4.178

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. package/CHANGELOG.md +42 -0
  2. package/dist/account-manager/db/migrations/007-lexicon-failures-index.d.ts +4 -0
  3. package/dist/account-manager/db/migrations/007-lexicon-failures-index.d.ts.map +1 -0
  4. package/dist/account-manager/db/migrations/007-lexicon-failures-index.js +17 -0
  5. package/dist/account-manager/db/migrations/007-lexicon-failures-index.js.map +1 -0
  6. package/dist/account-manager/db/migrations/index.d.ts +2 -0
  7. package/dist/account-manager/db/migrations/index.d.ts.map +1 -1
  8. package/dist/account-manager/db/migrations/index.js +2 -0
  9. package/dist/account-manager/db/migrations/index.js.map +1 -1
  10. package/dist/account-manager/helpers/lexicon.d.ts.map +1 -1
  11. package/dist/account-manager/helpers/lexicon.js +7 -0
  12. package/dist/account-manager/helpers/lexicon.js.map +1 -1
  13. package/dist/account-manager/helpers/token.d.ts +32 -32
  14. package/dist/account-manager/scope-reference-getter.d.ts +14 -0
  15. package/dist/account-manager/scope-reference-getter.d.ts.map +1 -0
  16. package/dist/account-manager/scope-reference-getter.js +69 -0
  17. package/dist/account-manager/scope-reference-getter.js.map +1 -0
  18. package/dist/actor-store/actor-store.d.ts.map +1 -1
  19. package/dist/actor-store/actor-store.js +4 -1
  20. package/dist/actor-store/actor-store.js.map +1 -1
  21. package/dist/actor-store/blob/transactor.d.ts +2 -2
  22. package/dist/actor-store/blob/transactor.d.ts.map +1 -1
  23. package/dist/actor-store/blob/transactor.js +73 -24
  24. package/dist/actor-store/blob/transactor.js.map +1 -1
  25. package/dist/actor-store/record/reader.d.ts.map +1 -1
  26. package/dist/actor-store/record/reader.js +12 -9
  27. package/dist/actor-store/record/reader.js.map +1 -1
  28. package/dist/actor-store/repo/sql-repo-reader.d.ts.map +1 -1
  29. package/dist/actor-store/repo/sql-repo-reader.js +2 -2
  30. package/dist/actor-store/repo/sql-repo-reader.js.map +1 -1
  31. package/dist/actor-store/repo/sql-repo-transactor.d.ts.map +1 -1
  32. package/dist/actor-store/repo/sql-repo-transactor.js +16 -19
  33. package/dist/actor-store/repo/sql-repo-transactor.js.map +1 -1
  34. package/dist/actor-store/repo/transactor.d.ts.map +1 -1
  35. package/dist/actor-store/repo/transactor.js +11 -15
  36. package/dist/actor-store/repo/transactor.js.map +1 -1
  37. package/dist/api/com/atproto/admin/updateSubjectStatus.js +6 -2
  38. package/dist/api/com/atproto/admin/updateSubjectStatus.js.map +1 -1
  39. package/dist/api/com/atproto/repo/importRepo.d.ts.map +1 -1
  40. package/dist/api/com/atproto/repo/importRepo.js +43 -51
  41. package/dist/api/com/atproto/repo/importRepo.js.map +1 -1
  42. package/dist/auth-verifier.d.ts.map +1 -1
  43. package/dist/auth-verifier.js +2 -12
  44. package/dist/auth-verifier.js.map +1 -1
  45. package/dist/context.d.ts.map +1 -1
  46. package/dist/context.js +20 -4
  47. package/dist/context.js.map +1 -1
  48. package/dist/disk-blobstore.d.ts.map +1 -1
  49. package/dist/disk-blobstore.js +10 -2
  50. package/dist/disk-blobstore.js.map +1 -1
  51. package/dist/lexicon/index.d.ts +49 -0
  52. package/dist/lexicon/index.d.ts.map +1 -1
  53. package/dist/lexicon/index.js +52 -1
  54. package/dist/lexicon/index.js.map +1 -1
  55. package/dist/lexicon/lexicons.d.ts +500 -24
  56. package/dist/lexicon/lexicons.d.ts.map +1 -1
  57. package/dist/lexicon/lexicons.js +344 -7
  58. package/dist/lexicon/lexicons.js.map +1 -1
  59. package/dist/lexicon/types/com/atproto/moderation/defs.d.ts +8 -8
  60. package/dist/lexicon/types/com/atproto/moderation/defs.d.ts.map +1 -1
  61. package/dist/lexicon/types/com/atproto/moderation/defs.js +7 -7
  62. package/dist/lexicon/types/com/atproto/moderation/defs.js.map +1 -1
  63. package/dist/lexicon/types/com/atproto/temp/dereferenceScope.d.ts +24 -0
  64. package/dist/lexicon/types/com/atproto/temp/dereferenceScope.d.ts.map +1 -0
  65. package/dist/lexicon/types/com/atproto/temp/dereferenceScope.js +7 -0
  66. package/dist/lexicon/types/com/atproto/temp/dereferenceScope.js.map +1 -0
  67. package/dist/lexicon/types/tools/ozone/moderation/defs.d.ts +10 -2
  68. package/dist/lexicon/types/tools/ozone/moderation/defs.d.ts.map +1 -1
  69. package/dist/lexicon/types/tools/ozone/moderation/defs.js +9 -0
  70. package/dist/lexicon/types/tools/ozone/moderation/defs.js.map +1 -1
  71. package/dist/lexicon/types/tools/ozone/moderation/emitEvent.d.ts +1 -1
  72. package/dist/lexicon/types/tools/ozone/moderation/emitEvent.d.ts.map +1 -1
  73. package/dist/lexicon/types/tools/ozone/moderation/getAccountTimeline.d.ts +1 -1
  74. package/dist/lexicon/types/tools/ozone/moderation/getAccountTimeline.d.ts.map +1 -1
  75. package/dist/lexicon/types/tools/ozone/moderation/getAccountTimeline.js.map +1 -1
  76. package/dist/lexicon/types/tools/ozone/report/defs.d.ts +92 -0
  77. package/dist/lexicon/types/tools/ozone/report/defs.d.ts.map +1 -0
  78. package/dist/lexicon/types/tools/ozone/report/defs.js +98 -0
  79. package/dist/lexicon/types/tools/ozone/report/defs.js.map +1 -0
  80. package/dist/logger.d.ts +1 -0
  81. package/dist/logger.d.ts.map +1 -1
  82. package/dist/logger.js +2 -1
  83. package/dist/logger.js.map +1 -1
  84. package/dist/scripts/rebuild-repo.d.ts.map +1 -1
  85. package/dist/scripts/rebuild-repo.js +3 -5
  86. package/dist/scripts/rebuild-repo.js.map +1 -1
  87. package/dist/scripts/sequencer-recovery/recoverer.js +8 -10
  88. package/dist/scripts/sequencer-recovery/recoverer.js.map +1 -1
  89. package/dist/sequencer/sequencer.js +2 -2
  90. package/dist/sequencer/sequencer.js.map +1 -1
  91. package/package.json +19 -16
  92. package/src/account-manager/db/migrations/007-lexicon-failures-index.ts +14 -0
  93. package/src/account-manager/db/migrations/index.ts +2 -0
  94. package/src/account-manager/helpers/lexicon.ts +14 -1
  95. package/src/account-manager/scope-reference-getter.ts +92 -0
  96. package/src/actor-store/actor-store.ts +5 -9
  97. package/src/actor-store/blob/transactor.ts +115 -42
  98. package/src/actor-store/record/reader.ts +14 -12
  99. package/src/actor-store/repo/sql-repo-reader.ts +12 -14
  100. package/src/actor-store/repo/sql-repo-transactor.ts +17 -23
  101. package/src/actor-store/repo/transactor.ts +29 -32
  102. package/src/api/com/atproto/admin/updateSubjectStatus.ts +7 -7
  103. package/src/api/com/atproto/repo/importRepo.ts +41 -55
  104. package/src/auth-verifier.ts +4 -20
  105. package/src/context.ts +26 -5
  106. package/src/disk-blobstore.ts +20 -3
  107. package/src/lexicon/index.ts +82 -0
  108. package/src/lexicon/lexicons.ts +357 -7
  109. package/src/lexicon/types/com/atproto/moderation/defs.ts +52 -7
  110. package/src/lexicon/types/com/atproto/temp/dereferenceScope.ts +42 -0
  111. package/src/lexicon/types/tools/ozone/moderation/defs.ts +23 -0
  112. package/src/lexicon/types/tools/ozone/moderation/emitEvent.ts +1 -0
  113. package/src/lexicon/types/tools/ozone/moderation/getAccountTimeline.ts +1 -0
  114. package/src/lexicon/types/tools/ozone/report/defs.ts +154 -0
  115. package/src/logger.ts +1 -0
  116. package/src/scripts/rebuild-repo.ts +4 -5
  117. package/src/scripts/sequencer-recovery/recoverer.ts +8 -12
  118. package/src/sequencer/sequencer.ts +3 -3
  119. package/tsconfig.build.tsbuildinfo +1 -1
@@ -3,7 +3,13 @@ import stream from 'node:stream'
3
3
  import bytes from 'bytes'
4
4
  import { fromStream as fileTypeFromStream } from 'file-type'
5
5
  import { CID } from 'multiformats/cid'
6
- import { cloneStream, sha256RawToCid, streamSize } from '@atproto/common'
6
+ import PQueue from 'p-queue'
7
+ import {
8
+ SECOND,
9
+ cloneStream,
10
+ sha256RawToCid,
11
+ streamSize,
12
+ } from '@atproto/common'
7
13
  import { BlobRef } from '@atproto/lexicon'
8
14
  import { BlobNotFoundError, BlobStore, WriteOpAction } from '@atproto/repo'
9
15
  import { AtUri } from '@atproto/syntax'
@@ -11,12 +17,8 @@ import { InvalidRequestError } from '@atproto/xrpc-server'
11
17
  import { BackgroundQueue } from '../../background'
12
18
  import * as img from '../../image'
13
19
  import { StatusAttr } from '../../lexicon/types/com/atproto/admin/defs'
14
- import {
15
- PreparedBlobRef,
16
- PreparedDelete,
17
- PreparedUpdate,
18
- PreparedWrite,
19
- } from '../../repo/types'
20
+ import { blobStoreLogger as log } from '../../logger'
21
+ import { PreparedBlobRef, PreparedWrite } from '../../repo/types'
20
22
  import { ActorDb, Blob as BlobTable } from '../db'
21
23
  import { BlobReader } from './reader'
22
24
 
@@ -113,38 +115,68 @@ export class BlobTransactor extends BlobReader {
113
115
  async processWriteBlobs(rev: string, writes: PreparedWrite[]) {
114
116
  await this.deleteDereferencedBlobs(writes)
115
117
 
116
- const blobPromises: Promise<void>[] = []
118
+ const ac = new AbortController()
119
+
120
+ // Limit the number of parallel requests made to the BlobStore by using a
121
+ // a queue with concurrency management.
122
+ type Task = () => Promise<void>
123
+ const tasks: Task[] = []
124
+
117
125
  for (const write of writes) {
118
- if (
119
- write.action === WriteOpAction.Create ||
120
- write.action === WriteOpAction.Update
121
- ) {
126
+ if (isCreate(write) || isUpdate(write)) {
122
127
  for (const blob of write.blobs) {
123
- blobPromises.push(this.verifyBlobAndMakePermanent(blob))
124
- blobPromises.push(this.associateBlob(blob, write.uri))
128
+ tasks.push(async () => {
129
+ if (ac.signal.aborted) return
130
+ await this.associateBlob(blob, write.uri)
131
+ await this.verifyBlobAndMakePermanent(blob, ac.signal)
132
+ })
125
133
  }
126
134
  }
127
135
  }
128
- await Promise.all(blobPromises)
136
+
137
+ try {
138
+ const queue = new PQueue({
139
+ concurrency: 20,
140
+ // The blob store should already limit the time of every operation. We
141
+ // add a timeout here as an extra precaution.
142
+ timeout: 60 * SECOND,
143
+ throwOnTimeout: true,
144
+ })
145
+
146
+ // Will reject as soon as any task fails, causing the "finally" block
147
+ // below to run, aborting every other pending tasks.
148
+ await queue.addAll(tasks)
149
+ } finally {
150
+ ac.abort()
151
+ }
129
152
  }
130
153
 
131
- async updateBlobTakedownStatus(blob: CID, takedown: StatusAttr) {
154
+ async updateBlobTakedownStatus(cid: CID, takedown: StatusAttr) {
132
155
  const takedownRef = takedown.applied
133
156
  ? takedown.ref ?? new Date().toISOString()
134
157
  : null
135
158
  await this.db.db
136
159
  .updateTable('blob')
137
160
  .set({ takedownRef })
138
- .where('cid', '=', blob.toString())
161
+ .where('cid', '=', cid.toString())
139
162
  .executeTakeFirst()
163
+
140
164
  try {
165
+ // @NOTE find a way to not perform i/o operations during the transaction
166
+ // (typically by using a state in the "blob" table, and another process to
167
+ // handle the actual i/o)
141
168
  if (takedown.applied) {
142
- await this.blobstore.quarantine(blob)
169
+ await this.blobstore.quarantine(cid)
143
170
  } else {
144
- await this.blobstore.unquarantine(blob)
171
+ await this.blobstore.unquarantine(cid)
145
172
  }
146
173
  } catch (err) {
147
174
  if (!(err instanceof BlobNotFoundError)) {
175
+ log.error(
176
+ { err, cid: cid.toString() },
177
+ 'could not update blob takedown status',
178
+ )
179
+
148
180
  throw err
149
181
  }
150
182
  }
@@ -154,21 +186,17 @@ export class BlobTransactor extends BlobReader {
154
186
  writes: PreparedWrite[],
155
187
  skipBlobStore?: boolean,
156
188
  ) {
157
- const deletes = writes.filter(
158
- (w) => w.action === WriteOpAction.Delete,
159
- ) as PreparedDelete[]
160
- const updates = writes.filter(
161
- (w) => w.action === WriteOpAction.Update,
162
- ) as PreparedUpdate[]
189
+ const deletes = writes.filter(isDelete)
190
+ const updates = writes.filter(isUpdate)
163
191
  const uris = [...deletes, ...updates].map((w) => w.uri.toString())
164
192
  if (uris.length === 0) return
165
193
 
166
194
  const deletedRepoBlobs = await this.db.db
167
195
  .deleteFrom('record_blob')
168
196
  .where('recordUri', 'in', uris)
169
- .returningAll()
197
+ .returning('blobCid')
170
198
  .execute()
171
- if (deletedRepoBlobs.length < 1) return
199
+ if (deletedRepoBlobs.length === 0) return
172
200
 
173
201
  const deletedRepoBlobCids = deletedRepoBlobs.map((row) => row.blobCid)
174
202
  const duplicateCids = await this.db.db
@@ -178,53 +206,85 @@ export class BlobTransactor extends BlobReader {
178
206
  .execute()
179
207
 
180
208
  const newBlobCids = writes
181
- .map((w) =>
182
- w.action === WriteOpAction.Create || w.action === WriteOpAction.Update
183
- ? w.blobs
184
- : [],
185
- )
186
- .flat()
187
- .map((b) => b.cid.toString())
209
+ .filter((w) => isUpdate(w) || isCreate(w))
210
+ .flatMap((w) => w.blobs.map((b) => b.cid.toString()))
211
+
188
212
  const cidsToKeep = [
189
213
  ...newBlobCids,
190
214
  ...duplicateCids.map((row) => row.blobCid),
191
215
  ]
216
+
192
217
  const cidsToDelete = deletedRepoBlobCids.filter(
193
218
  (cid) => !cidsToKeep.includes(cid),
194
219
  )
195
- if (cidsToDelete.length < 1) return
220
+ if (cidsToDelete.length === 0) return
196
221
 
197
222
  await this.db.db
198
223
  .deleteFrom('blob')
199
224
  .where('cid', 'in', cidsToDelete)
200
225
  .execute()
226
+
201
227
  if (!skipBlobStore) {
202
228
  this.db.onCommit(() => {
203
229
  this.backgroundQueue.add(async () => {
204
- await Promise.allSettled(
205
- cidsToDelete.map((cid) => this.blobstore.delete(CID.parse(cid))),
206
- )
230
+ try {
231
+ const cids = cidsToDelete.map((cid) => CID.parse(cid))
232
+ await this.blobstore.deleteMany(cids)
233
+ } catch (err) {
234
+ log.error(
235
+ { err, cids: cidsToDelete },
236
+ 'could not delete blobs from blobstore',
237
+ )
238
+ }
207
239
  })
208
240
  })
209
241
  }
210
242
  }
211
243
 
212
- async verifyBlobAndMakePermanent(blob: PreparedBlobRef): Promise<void> {
244
+ async verifyBlobAndMakePermanent(
245
+ blob: PreparedBlobRef,
246
+ signal?: AbortSignal,
247
+ ): Promise<void> {
213
248
  const found = await this.db.db
214
249
  .selectFrom('blob')
215
- .selectAll()
250
+ .select(['tempKey', 'size', 'mimeType'])
216
251
  .where('cid', '=', blob.cid.toString())
217
252
  .where('takedownRef', 'is', null)
218
253
  .executeTakeFirst()
254
+
255
+ signal?.throwIfAborted()
256
+
219
257
  if (!found) {
220
258
  throw new InvalidRequestError(
221
259
  `Could not find blob: ${blob.cid.toString()}`,
222
260
  'BlobNotFound',
223
261
  )
224
262
  }
263
+
225
264
  if (found.tempKey) {
226
265
  verifyBlob(blob, found)
227
- await this.blobstore.makePermanent(found.tempKey, blob.cid)
266
+
267
+ // @NOTE it is less than ideal to perform async (i/o) operations during a
268
+ // transaction. Especially since there have been instances of the actor-db
269
+ // being locked, requiring to kick the processes.
270
+
271
+ // The better solution would be to update the blob state in the database
272
+ // (e.g. "makeItPermanent") and to process those updates outside of the
273
+ // transaction.
274
+
275
+ await this.blobstore
276
+ .makePermanent(found.tempKey, blob.cid)
277
+ .catch((err) => {
278
+ log.error(
279
+ { err, cid: blob.cid.toString() },
280
+ 'could not make blob permanent',
281
+ )
282
+
283
+ throw err
284
+ })
285
+
286
+ signal?.throwIfAborted()
287
+
228
288
  await this.db.db
229
289
  .updateTable('blob')
230
290
  .set({ tempKey: null })
@@ -300,7 +360,10 @@ function acceptedMime(mime: string, accepted: string[]): boolean {
300
360
  return accepted.includes(mime)
301
361
  }
302
362
 
303
- function verifyBlob(blob: PreparedBlobRef, found: BlobTable) {
363
+ function verifyBlob(
364
+ blob: PreparedBlobRef,
365
+ found: Pick<BlobTable, 'size' | 'mimeType'>,
366
+ ) {
304
367
  const throwInvalid = (msg: string, errName = 'InvalidBlob') => {
305
368
  throw new InvalidRequestError(msg, errName)
306
369
  }
@@ -328,3 +391,13 @@ function verifyBlob(blob: PreparedBlobRef, found: BlobTable) {
328
391
  )
329
392
  }
330
393
  }
394
+
395
+ function isCreate(write: PreparedWrite) {
396
+ return write.action === WriteOpAction.Create
397
+ }
398
+ function isUpdate(write: PreparedWrite) {
399
+ return write.action === WriteOpAction.Update
400
+ }
401
+ function isDelete(write: PreparedWrite) {
402
+ return write.action === WriteOpAction.Delete
403
+ }
@@ -213,19 +213,21 @@ export class RecordReader {
213
213
  // Ensures that we don't end-up with duplicate likes, reposts, and follows from race conditions.
214
214
 
215
215
  async getBacklinkConflicts(uri: AtUri, record: RepoRecord): Promise<AtUri[]> {
216
- const recordBacklinks = getBacklinks(uri, record)
217
- const conflicts = await Promise.all(
218
- recordBacklinks.map((backlink) =>
219
- this.getRecordBacklinks({
220
- collection: uri.collection,
221
- path: backlink.path,
222
- linkTo: backlink.linkTo,
223
- }),
224
- ),
225
- )
216
+ const conflicts: AtUri[] = []
217
+
218
+ for (const backlink of getBacklinks(uri, record)) {
219
+ const backlinks = await this.getRecordBacklinks({
220
+ collection: uri.collection,
221
+ path: backlink.path,
222
+ linkTo: backlink.linkTo,
223
+ })
224
+
225
+ for (const { rkey } of backlinks) {
226
+ conflicts.push(AtUri.make(uri.hostname, uri.collection, rkey))
227
+ }
228
+ }
229
+
226
230
  return conflicts
227
- .flat()
228
- .map(({ rkey }) => AtUri.make(uri.hostname, uri.collection, rkey))
229
231
  }
230
232
 
231
233
  async listExistingBlocks(): Promise<CidSet> {
@@ -59,20 +59,18 @@ export class SqlRepoReader extends ReadableBlockstore {
59
59
  const missing = new CidSet(cached.missing)
60
60
  const missingStr = cached.missing.map((c) => c.toString())
61
61
  const blocks = new BlockMap()
62
- await Promise.all(
63
- chunkArray(missingStr, 500).map(async (batch) => {
64
- const res = await this.db.db
65
- .selectFrom('repo_block')
66
- .where('repo_block.cid', 'in', batch)
67
- .select(['repo_block.cid as cid', 'repo_block.content as content'])
68
- .execute()
69
- for (const row of res) {
70
- const cid = CID.parse(row.cid)
71
- blocks.set(cid, row.content)
72
- missing.delete(cid)
73
- }
74
- }),
75
- )
62
+ for (const batch of chunkArray(missingStr, 500)) {
63
+ const res = await this.db.db
64
+ .selectFrom('repo_block')
65
+ .where('repo_block.cid', 'in', batch)
66
+ .select(['repo_block.cid as cid', 'repo_block.content as content'])
67
+ .execute()
68
+ for (const row of res) {
69
+ const cid = CID.parse(row.cid)
70
+ blocks.set(cid, row.content)
71
+ missing.delete(cid)
72
+ }
73
+ }
76
74
  this.cache.addMap(blocks)
77
75
  blocks.addMap(cached.blocks)
78
76
  return { blocks, missing: missing.toList() }
@@ -45,24 +45,20 @@ export class SqlRepoTransactor extends SqlRepoReader implements RepoStorage {
45
45
  }
46
46
 
47
47
  async putMany(toPut: BlockMap, rev: string): Promise<void> {
48
- const blocks: RepoBlock[] = []
49
- toPut.forEach((bytes, cid) => {
50
- blocks.push({
51
- cid: cid.toString(),
52
- repoRev: rev,
53
- size: bytes.length,
54
- content: bytes,
55
- })
56
- })
57
- await Promise.all(
58
- chunkArray(blocks, 50).map((batch) =>
59
- this.db.db
60
- .insertInto('repo_block')
61
- .values(batch)
62
- .onConflict((oc) => oc.doNothing())
63
- .execute(),
64
- ),
65
- )
48
+ const blocks: RepoBlock[] = Array.from(toPut, ([cid, bytes]) => ({
49
+ cid: cid.toString(),
50
+ repoRev: rev,
51
+ size: bytes.length,
52
+ content: bytes,
53
+ }))
54
+
55
+ for (const batch of chunkArray(blocks, 50)) {
56
+ await this.db.db
57
+ .insertInto('repo_block')
58
+ .values(batch)
59
+ .onConflict((oc) => oc.doNothing())
60
+ .execute()
61
+ }
66
62
  }
67
63
 
68
64
  async deleteMany(cids: CID[]) {
@@ -75,11 +71,9 @@ export class SqlRepoTransactor extends SqlRepoReader implements RepoStorage {
75
71
  }
76
72
 
77
73
  async applyCommit(commit: CommitData, isCreate?: boolean) {
78
- await Promise.all([
79
- this.updateRoot(commit.cid, commit.rev, isCreate),
80
- this.putMany(commit.newBlocks, commit.rev),
81
- this.deleteMany(commit.removedCids.toList()),
82
- ])
74
+ await this.updateRoot(commit.cid, commit.rev, isCreate)
75
+ await this.putMany(commit.newBlocks, commit.rev)
76
+ await this.deleteMany(commit.removedCids.toList())
83
77
  }
84
78
 
85
79
  async updateRoot(cid: CID, rev: string, isCreate = false): Promise<void> {
@@ -55,11 +55,10 @@ export class RepoTransactor extends RepoReader {
55
55
  this.signingKey,
56
56
  writes.map(createWriteToOp),
57
57
  )
58
- await Promise.all([
59
- this.storage.applyCommit(commit, true),
60
- this.indexWrites(writes, commit.rev),
61
- this.blob.processWriteBlobs(commit.rev, writes),
62
- ])
58
+ await this.storage.applyCommit(commit, true)
59
+ await this.indexWrites(writes, commit.rev)
60
+ await this.blob.processWriteBlobs(commit.rev, writes)
61
+
63
62
  const ops = writes.map((w) => ({
64
63
  action: 'create' as const,
65
64
  path: formatDataKey(w.uri.collection, w.uri.rkey),
@@ -87,14 +86,13 @@ export class RepoTransactor extends RepoReader {
87
86
  throw new InvalidRequestError('Too many writes. Max event size: 2MB')
88
87
  }
89
88
 
90
- await Promise.all([
91
- // persist the commit to repo storage
92
- this.storage.applyCommit(commit),
93
- // & send to indexing
94
- this.indexWrites(writes, commit.rev),
95
- // process blobs
96
- this.blob.processWriteBlobs(commit.rev, writes),
97
- ])
89
+ // persist the commit to repo storage
90
+ await this.storage.applyCommit(commit)
91
+ // & send to indexing
92
+ await this.indexWrites(writes, commit.rev)
93
+ // process blobs
94
+ await this.blob.processWriteBlobs(commit.rev, writes)
95
+
98
96
  return commit
99
97
  }
100
98
 
@@ -184,25 +182,24 @@ export class RepoTransactor extends RepoReader {
184
182
 
185
183
  async indexWrites(writes: PreparedWrite[], rev: string) {
186
184
  this.db.assertTransaction()
187
- await Promise.all(
188
- writes.map(async (write) => {
189
- if (
190
- write.action === WriteOpAction.Create ||
191
- write.action === WriteOpAction.Update
192
- ) {
193
- await this.record.indexRecord(
194
- write.uri,
195
- write.cid,
196
- write.record,
197
- write.action,
198
- rev,
199
- this.now,
200
- )
201
- } else if (write.action === WriteOpAction.Delete) {
202
- await this.record.deleteRecord(write.uri)
203
- }
204
- }),
205
- )
185
+
186
+ for (const write of writes) {
187
+ if (
188
+ write.action === WriteOpAction.Create ||
189
+ write.action === WriteOpAction.Update
190
+ ) {
191
+ await this.record.indexRecord(
192
+ write.uri,
193
+ write.cid,
194
+ write.record,
195
+ write.action,
196
+ rev,
197
+ this.now,
198
+ )
199
+ } else if (write.action === WriteOpAction.Delete) {
200
+ await this.record.deleteRecord(write.uri)
201
+ }
202
+ }
206
203
  }
207
204
 
208
205
  async getDuplicateRecordCids(
@@ -19,16 +19,16 @@ export default function (server: Server, ctx: AppContext) {
19
19
  await ctx.accountManager.takedownAccount(subject.did, takedown)
20
20
  } else if (isStrongRef(subject)) {
21
21
  const uri = new AtUri(subject.uri)
22
- await ctx.actorStore.transact(uri.hostname, (store) =>
23
- store.record.updateRecordTakedownStatus(uri, takedown),
24
- )
22
+ await ctx.actorStore.transact(uri.hostname, async (store) => {
23
+ await store.record.updateRecordTakedownStatus(uri, takedown)
24
+ })
25
25
  } else if (isRepoBlobRef(subject)) {
26
- await ctx.actorStore.transact(subject.did, (store) =>
27
- store.repo.blob.updateBlobTakedownStatus(
26
+ await ctx.actorStore.transact(subject.did, async (store) => {
27
+ await store.repo.blob.updateBlobTakedownStatus(
28
28
  CID.parse(subject.cid),
29
29
  takedown,
30
- ),
31
- )
30
+ )
31
+ })
32
32
  } else {
33
33
  throw new InvalidRequestError('Invalid subject')
34
34
  }
@@ -1,5 +1,4 @@
1
1
  import { CID } from 'multiformats/cid'
2
- import PQueue from 'p-queue'
3
2
  import { TID } from '@atproto/common'
4
3
  import { BlobRef, LexValue, RepoRecord } from '@atproto/lexicon'
5
4
  import {
@@ -11,7 +10,6 @@ import {
11
10
  } from '@atproto/repo'
12
11
  import { AtUri } from '@atproto/syntax'
13
12
  import { InvalidRequestError } from '@atproto/xrpc-server'
14
- import { ActorStoreTransactor } from '../../../../actor-store/actor-store-transactor'
15
13
  import { ACCESS_FULL } from '../../../../auth-scope'
16
14
  import { AppContext } from '../../../../context'
17
15
  import { Server } from '../../../../lexicon'
@@ -32,53 +30,47 @@ export default function (server: Server, ctx: AppContext) {
32
30
 
33
31
  const { did } = auth.credentials
34
32
 
35
- await ctx.actorStore.transact(did, (store) =>
36
- importRepo(store, input.body),
37
- )
38
- },
39
- })
40
- }
33
+ // @NOTE process as much as we can before the transaction, in particular
34
+ // the reading of the body stream.
35
+ const { roots, blocks } = await readCarStream(input.body)
36
+ if (roots.length !== 1) {
37
+ await blocks.dump()
38
+ throw new InvalidRequestError('expected one root')
39
+ }
41
40
 
42
- const importRepo = async (
43
- actorStore: ActorStoreTransactor,
44
- incomingCar: AsyncIterable<Uint8Array>,
45
- ) => {
46
- const now = new Date().toISOString()
47
- const rev = TID.nextStr()
48
- const did = actorStore.repo.did
41
+ const blockMap = new BlockMap()
42
+ for await (const block of blocks) {
43
+ blockMap.set(block.cid, block.bytes)
44
+ }
49
45
 
50
- const { roots, blocks } = await readCarStream(incomingCar)
51
- if (roots.length !== 1) {
52
- await blocks.dump()
53
- throw new InvalidRequestError('expected one root')
54
- }
55
- const blockMap = new BlockMap()
56
- for await (const block of blocks) {
57
- blockMap.set(block.cid, block.bytes)
58
- }
59
- const currRepo = await actorStore.repo.maybeLoadRepo()
60
- const diff = await verifyDiff(
61
- currRepo,
62
- blockMap,
63
- roots[0],
64
- undefined,
65
- undefined,
66
- { ensureLeaves: false },
67
- )
68
- diff.commit.rev = rev
69
- await actorStore.repo.storage.applyCommit(diff.commit, currRepo === null)
70
- const recordQueue = new PQueue({ concurrency: 50 })
71
- const controller = new AbortController()
72
- for (const write of diff.writes) {
73
- recordQueue
74
- .add(
75
- async () => {
46
+ await ctx.actorStore.transact(did, async (store) => {
47
+ const now = new Date().toISOString()
48
+ const rev = TID.nextStr()
49
+ const did = store.repo.did
50
+
51
+ const currRepo = await store.repo.maybeLoadRepo()
52
+ const diff = await verifyDiff(
53
+ currRepo,
54
+ blockMap,
55
+ roots[0],
56
+ undefined,
57
+ undefined,
58
+ { ensureLeaves: false },
59
+ )
60
+ diff.commit.rev = rev
61
+ await store.repo.storage.applyCommit(diff.commit, currRepo === null)
62
+
63
+ // @NOTE There is no point in performing the following concurrently
64
+ // since better-sqlite3 is synchronous.
65
+ for (const write of diff.writes) {
76
66
  const uri = AtUri.make(did, write.collection, write.rkey)
77
67
  if (write.action === WriteOpAction.Delete) {
78
- await actorStore.record.deleteRecord(uri)
68
+ await store.record.deleteRecord(uri)
79
69
  } else {
80
70
  let parsedRecord: RepoRecord
81
71
  try {
72
+ // @NOTE getAndParseRecord returns a promise for historical
73
+ // reasons but it's internal processing is actually synchronous.
82
74
  const parsed = await getAndParseRecord(blockMap, write.cid)
83
75
  parsedRecord = parsed.record
84
76
  } catch {
@@ -86,7 +78,8 @@ const importRepo = async (
86
78
  `Could not parse record at '${write.collection}/${write.rkey}'`,
87
79
  )
88
80
  }
89
- const indexRecord = actorStore.record.indexRecord(
81
+
82
+ await store.record.indexRecord(
90
83
  uri,
91
84
  write.cid,
92
85
  parsedRecord,
@@ -95,19 +88,12 @@ const importRepo = async (
95
88
  now,
96
89
  )
97
90
  const recordBlobs = findBlobRefs(parsedRecord)
98
- const indexRecordBlobs = actorStore.repo.blob.insertBlobs(
99
- uri.toString(),
100
- recordBlobs,
101
- )
102
- await Promise.all([indexRecord, indexRecordBlobs])
91
+ await store.repo.blob.insertBlobs(uri.toString(), recordBlobs)
103
92
  }
104
- },
105
- { signal: controller.signal },
106
- )
107
- .catch((err) => controller.abort(err))
108
- }
109
- await recordQueue.onIdle()
110
- controller.signal.throwIfAborted()
93
+ }
94
+ })
95
+ },
96
+ })
111
97
  }
112
98
 
113
99
  export const findBlobRefs = (val: LexValue, layer = 0): BlobRef[] => {