@voyantjs/distribution 0.20.0 → 0.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/dist/channel-push/admin-routes.d.ts +31 -0
  2. package/dist/channel-push/admin-routes.d.ts.map +1 -0
  3. package/dist/channel-push/admin-routes.js +165 -0
  4. package/dist/channel-push/availability-push.d.ts +76 -0
  5. package/dist/channel-push/availability-push.d.ts.map +1 -0
  6. package/dist/channel-push/availability-push.js +238 -0
  7. package/dist/channel-push/booking-push.d.ts +114 -0
  8. package/dist/channel-push/booking-push.d.ts.map +1 -0
  9. package/dist/channel-push/booking-push.js +503 -0
  10. package/dist/channel-push/content-push.d.ts +60 -0
  11. package/dist/channel-push/content-push.d.ts.map +1 -0
  12. package/dist/channel-push/content-push.js +256 -0
  13. package/dist/channel-push/index.d.ts +15 -0
  14. package/dist/channel-push/index.d.ts.map +1 -0
  15. package/dist/channel-push/index.js +18 -0
  16. package/dist/channel-push/plugin.d.ts +18 -0
  17. package/dist/channel-push/plugin.d.ts.map +1 -0
  18. package/dist/channel-push/plugin.js +21 -0
  19. package/dist/channel-push/reconciler.d.ts +85 -0
  20. package/dist/channel-push/reconciler.d.ts.map +1 -0
  21. package/dist/channel-push/reconciler.js +175 -0
  22. package/dist/channel-push/subscriber.d.ts +40 -0
  23. package/dist/channel-push/subscriber.d.ts.map +1 -0
  24. package/dist/channel-push/subscriber.js +174 -0
  25. package/dist/channel-push/types.d.ts +43 -0
  26. package/dist/channel-push/types.d.ts.map +1 -0
  27. package/dist/channel-push/types.js +32 -0
  28. package/dist/channel-push/workflows.d.ts +56 -0
  29. package/dist/channel-push/workflows.d.ts.map +1 -0
  30. package/dist/channel-push/workflows.js +100 -0
  31. package/dist/index.d.ts +4 -0
  32. package/dist/index.d.ts.map +1 -1
  33. package/dist/index.js +3 -0
  34. package/dist/rate-limit.d.ts +69 -0
  35. package/dist/rate-limit.d.ts.map +1 -0
  36. package/dist/rate-limit.js +135 -0
  37. package/dist/routes.d.ts +170 -10
  38. package/dist/routes.d.ts.map +1 -1
  39. package/dist/schema-core.d.ts +417 -1
  40. package/dist/schema-core.d.ts.map +1 -1
  41. package/dist/schema-core.js +98 -1
  42. package/dist/schema-push-intents.d.ts +387 -0
  43. package/dist/schema-push-intents.d.ts.map +1 -0
  44. package/dist/schema-push-intents.js +77 -0
  45. package/dist/schema.d.ts +1 -0
  46. package/dist/schema.d.ts.map +1 -1
  47. package/dist/schema.js +1 -0
  48. package/dist/service.d.ts +103 -7
  49. package/dist/service.d.ts.map +1 -1
  50. package/dist/validation.d.ts +5 -5
  51. package/dist/webhook-deliveries.d.ts +86 -0
  52. package/dist/webhook-deliveries.d.ts.map +1 -0
  53. package/dist/webhook-deliveries.js +293 -0
  54. package/package.json +16 -8
@@ -0,0 +1,503 @@
1
+ /**
2
+ * Booking-push pipeline.
3
+ *
4
+ * Triggered by `booking.confirmed`. The subscriber writes pending
5
+ * `channel_booking_links` rows and returns immediately (per the EventBus
6
+ * fire-and-forget contract). The durable processor (`processBookingPush`)
7
+ * drains those rows, calls `adapter.pushBooking()` per link, and marks
8
+ * each row `ok` or `failed`.
9
+ *
10
+ * The processor is a plain async function so it's callable from:
11
+ * - The `booking.confirmed` subscriber (inline, dev/single-process)
12
+ * - The `channel.booking.push` durable workflow's body (production)
13
+ * - The reconciler (Phase G) for catch-up after long outages
14
+ * - Tests / admin retry endpoints
15
+ *
16
+ * Per docs/architecture/channel-push-architecture.md §4 + §12.1.
17
+ */
18
+ import { bookingItems, bookings } from "@voyantjs/bookings/schema";
19
+ import { AdapterRateLimitedError, } from "@voyantjs/catalog";
20
+ import { newId } from "@voyantjs/db/lib/typeid";
21
+ import { channelBookingLinks, channelContracts, channelProductMappings, channels, } from "@voyantjs/distribution/schema";
22
+ import { and, asc, eq, inArray, lte, or, sql } from "drizzle-orm";
23
+ import { acquireToken, channelScopeKey, drainBucket } from "../rate-limit.js";
24
+ import { prepareOutboundEnvelope } from "../webhook-deliveries.js";
25
+ import { defaultLogger, getChannelPushDepsOrThrow } from "./types.js";
26
+ /** Stable string identifier for the booking-push workflow. */
27
+ export const CHANNEL_BOOKING_PUSH_WORKFLOW_ID = "channel.booking.push";
28
+ /**
29
+ * Build the stable idempotency key the upstream uses to dedupe pushes
30
+ * across retries. Per §3.
31
+ */
32
+ export function bookingPushIdempotencyKey(bookingId, bookingItemId, channelId) {
33
+ return `book:${bookingId}:${bookingItemId ?? "*"}:${channelId}`;
34
+ }
35
+ /**
36
+ * Resolve the channels that want a push for this booking. One row per
37
+ * (booking_item, channel) pair where the mapping has push_bookings =
38
+ * true and the channel is active. Booking-level pushes (no item id) are
39
+ * supported via a synthetic item id of null.
40
+ *
41
+ * Per §7.4 — booking push uses `channel_product_mappings` (not
42
+ * `channel_inventory_allotments`) so channels mapped to a product
43
+ * without a slot allotment still receive the push.
44
+ */
45
+ export async function resolveBookingPushTargets(db, bookingId) {
46
+ const items = (await db
47
+ .select({
48
+ id: bookingItems.id,
49
+ productId: bookingItems.productId,
50
+ })
51
+ .from(bookingItems)
52
+ .where(eq(bookingItems.bookingId, bookingId)));
53
+ if (items.length === 0)
54
+ return [];
55
+ const productIds = Array.from(new Set(items.filter((i) => i.productId).map((i) => i.productId)));
56
+ if (productIds.length === 0)
57
+ return [];
58
+ const rows = (await db
59
+ .select({
60
+ mapping: channelProductMappings,
61
+ channel: channels,
62
+ })
63
+ .from(channelProductMappings)
64
+ .innerJoin(channels, eq(channelProductMappings.channelId, channels.id))
65
+ .where(and(eq(channelProductMappings.active, true), eq(channelProductMappings.pushBookings, true), inArray(channelProductMappings.productId, productIds), eq(channels.status, "active"))));
66
+ if (rows.length === 0)
67
+ return [];
68
+ const out = [];
69
+ for (const item of items) {
70
+ if (!item.productId)
71
+ continue;
72
+ for (const row of rows) {
73
+ if (row.mapping.productId !== item.productId)
74
+ continue;
75
+ out.push({
76
+ bookingItemId: item.id,
77
+ productId: item.productId,
78
+ mapping: row.mapping,
79
+ channel: row.channel,
80
+ });
81
+ }
82
+ }
83
+ return out;
84
+ }
85
+ /**
86
+ * Insert pending `channel_booking_links` rows for each push target.
87
+ * `INSERT ... ON CONFLICT DO NOTHING` against the
88
+ * `(channel_id, booking_id, COALESCE(booking_item_id, ''))` unique
89
+ * index — durable handoff with no doubled-push risk per §7.1.
90
+ *
91
+ * Returns the count of newly-inserted rows. Subscribers don't strictly
92
+ * need this — the processor reads pending rows by query — but tests
93
+ * find it useful.
94
+ */
95
+ export async function upsertPendingBookingLinks(db, bookingId, targets) {
96
+ if (targets.length === 0)
97
+ return 0;
98
+ const rows = targets.map((target) => ({
99
+ id: newId("channel_booking_links"),
100
+ channelId: target.channel.id,
101
+ bookingId,
102
+ bookingItemId: target.bookingItemId,
103
+ sourceKind: target.mapping.sourceKind ?? null,
104
+ sourceConnectionId: target.mapping.sourceConnectionId ?? null,
105
+ pushStatus: "pending",
106
+ idempotencyKey: bookingPushIdempotencyKey(bookingId, target.bookingItemId, target.channel.id),
107
+ }));
108
+ // Drizzle's onConflictDoNothing without an explicit target falls back
109
+ // to the (channel, booking, item) unique index we created in §7.1.
110
+ const inserted = (await db
111
+ .insert(channelBookingLinks)
112
+ .values(rows)
113
+ .onConflictDoNothing()
114
+ .returning());
115
+ return inserted.length;
116
+ }
117
+ /**
118
+ * Drain pending `channel_booking_links` rows for one booking and call
119
+ * `adapter.pushBooking()` per link. Idempotent: re-running the
120
+ * processor against the same booking is safe — the `idempotency_key`
121
+ * column ensures retries don't double-push upstream.
122
+ *
123
+ * Each adapter call:
124
+ * 1. Acquires a token from the per-channel/connection bucket.
125
+ * 2. Calls the adapter through `prepareOutboundEnvelope` so every
126
+ * attempt lands in `webhook_deliveries` with redacted headers.
127
+ * 3. Updates the link to `ok` (with upstream_ref, hash) or `failed`
128
+ * (with last_error, attempts++).
129
+ *
130
+ * Per §4.2 + §12.1.
131
+ */
132
+ export async function processBookingPush(input, deps) {
133
+ const { db, registry, logger = defaultLogger } = deps ?? getChannelPushDepsOrThrow();
134
+ const outcomes = [];
135
+ const links = (await db
136
+ .select({
137
+ link: channelBookingLinks,
138
+ channel: channels,
139
+ })
140
+ .from(channelBookingLinks)
141
+ .innerJoin(channels, eq(channelBookingLinks.channelId, channels.id))
142
+ .where(and(eq(channelBookingLinks.bookingId, input.bookingId), eq(channelBookingLinks.pushStatus, "pending"))));
143
+ if (links.length === 0) {
144
+ return {
145
+ bookingId: input.bookingId,
146
+ attempted: 0,
147
+ succeeded: 0,
148
+ failed: 0,
149
+ compensated: 0,
150
+ outcomes,
151
+ };
152
+ }
153
+ const [booking] = (await db
154
+ .select()
155
+ .from(bookings)
156
+ .where(eq(bookings.id, input.bookingId))
157
+ .limit(1));
158
+ if (!booking) {
159
+ logger.error?.(`processBookingPush: booking ${input.bookingId} not found`, {});
160
+ return {
161
+ bookingId: input.bookingId,
162
+ attempted: 0,
163
+ succeeded: 0,
164
+ failed: 0,
165
+ compensated: 0,
166
+ outcomes,
167
+ };
168
+ }
169
+ let succeeded = 0;
170
+ let failed = 0;
171
+ // Track succeeded links so we can compensate them if a sibling fails
172
+ // and the contract policy demands strict-atomicity. Per §4.2.
173
+ const successList = [];
174
+ for (const { link, channel } of links) {
175
+ const connectionId = link.sourceConnectionId ?? channel.id;
176
+ const adapter = registry.resolveByConnection(connectionId);
177
+ if (!adapter) {
178
+ // Skip — no adapter wired for this connection. Mark the row
179
+ // failed so ops sees it; the reconciler retries when the adapter
180
+ // shows up.
181
+ await markLinkFailed(db, link.id, link.pushAttempts + 1, "no_adapter_registered");
182
+ outcomes.push({
183
+ channelId: channel.id,
184
+ bookingItemId: link.bookingItemId ?? null,
185
+ status: "failed",
186
+ error: "no_adapter_registered",
187
+ });
188
+ failed += 1;
189
+ continue;
190
+ }
191
+ if (!adapter.capabilities.supportsBookingPush || !adapter.pushBooking) {
192
+ await markLinkFailed(db, link.id, link.pushAttempts + 1, "adapter_unsupported");
193
+ outcomes.push({
194
+ channelId: channel.id,
195
+ bookingItemId: link.bookingItemId ?? null,
196
+ status: "failed",
197
+ error: "adapter_unsupported",
198
+ });
199
+ failed += 1;
200
+ continue;
201
+ }
202
+ // Resolve the per-(channel, item) mapping for the external ids.
203
+ const mapping = await readMappingForLink(db, link, booking);
204
+ if (!mapping) {
205
+ await markLinkFailed(db, link.id, link.pushAttempts + 1, "no_mapping");
206
+ outcomes.push({
207
+ channelId: channel.id,
208
+ bookingItemId: link.bookingItemId ?? null,
209
+ status: "failed",
210
+ error: "no_mapping",
211
+ });
212
+ failed += 1;
213
+ continue;
214
+ }
215
+ // Rate limit before dispatching.
216
+ const rlConfig = rateLimitConfigForChannel(channel);
217
+ if (rlConfig) {
218
+ const acq = await acquireToken(db, channelScopeKey(channel.id, connectionId), rlConfig, "booking");
219
+ if (!acq.acquired) {
220
+ // Bookings are supposed to pre-empt other flows; if we can't
221
+ // acquire, the bucket is over-tight or the channel just got
222
+ // 429'd. Mark the link failed and move on — reconciler retries.
223
+ await markLinkFailed(db, link.id, link.pushAttempts + 1, "rate_limited");
224
+ outcomes.push({
225
+ channelId: channel.id,
226
+ bookingItemId: link.bookingItemId ?? null,
227
+ status: "failed",
228
+ error: "rate_limited",
229
+ });
230
+ failed += 1;
231
+ continue;
232
+ }
233
+ }
234
+ const request = {
235
+ idempotencyKey: link.idempotencyKey ??
236
+ bookingPushIdempotencyKey(input.bookingId, link.bookingItemId ?? null, channel.id),
237
+ bookingId: input.bookingId,
238
+ bookingItemId: link.bookingItemId ?? undefined,
239
+ externalProductId: mapping.externalProductId ?? "",
240
+ externalRateId: mapping.externalRateId ?? undefined,
241
+ externalCategoryId: mapping.externalCategoryId ?? undefined,
242
+ channelId: channel.id,
243
+ contractPolicy: undefined,
244
+ payload: serializeBookingForPush(booking, link.bookingItemId ?? null),
245
+ };
246
+ const adapterCtx = {
247
+ connection_id: connectionId,
248
+ };
249
+ // Every attempt writes a webhook_deliveries row through the
250
+ // redactor — direct INSERTs are forbidden per §11.3.
251
+ const envelope = await prepareOutboundEnvelope(db, {
252
+ sourceModule: "distribution",
253
+ sourceEvent: "channel.booking.push",
254
+ sourceEntityModule: "bookings",
255
+ sourceEntityId: input.bookingId,
256
+ targetUrl: `adapter:${adapter.kind}`,
257
+ targetKind: `channel:${adapter.kind}`,
258
+ targetRef: channel.id,
259
+ requestMethod: "POST",
260
+ requestBody: request,
261
+ attemptNumber: link.pushAttempts + 1,
262
+ idempotencyKey: request.idempotencyKey,
263
+ });
264
+ try {
265
+ const result = await adapter.pushBooking(adapterCtx, request);
266
+ await envelope.complete({
267
+ responseStatus: 200,
268
+ responseBody: result,
269
+ });
270
+ await markLinkOk(db, link.id, link.pushAttempts + 1, result.upstreamRef, result.externalReference ?? null, result.externalStatus ?? null);
271
+ outcomes.push({
272
+ channelId: channel.id,
273
+ bookingItemId: link.bookingItemId ?? null,
274
+ status: "ok",
275
+ upstreamRef: result.upstreamRef,
276
+ });
277
+ succeeded += 1;
278
+ successList.push({ link, channel, adapter, adapterCtx, upstreamRef: result.upstreamRef });
279
+ }
280
+ catch (err) {
281
+ const message = err instanceof Error ? err.message : String(err);
282
+ // 429 from upstream — drain the bucket for the cooldown so
283
+ // concurrent dispatchers also see "no tokens" until the channel
284
+ // is ready, and stamp the delivery with the rate-limited class
285
+ // (per §14.4).
286
+ const isRateLimited = err instanceof AdapterRateLimitedError;
287
+ if (isRateLimited) {
288
+ await drainBucket(db, channelScopeKey(channel.id, connectionId), err.retryAfterMs);
289
+ }
290
+ await envelope.complete({
291
+ errorClass: isRateLimited ? "rate_limited" : "adapter_error",
292
+ errorMessage: message,
293
+ });
294
+ await markLinkFailed(db, link.id, link.pushAttempts + 1, message);
295
+ outcomes.push({
296
+ channelId: channel.id,
297
+ bookingItemId: link.bookingItemId ?? null,
298
+ status: "failed",
299
+ error: message,
300
+ });
301
+ failed += 1;
302
+ logger.error?.(`pushBooking failed for ${link.id}`, { error: message });
303
+ }
304
+ }
305
+ // Compensation pass: if any link failed and the channel-contract
306
+ // policy is strict-atomic, roll back succeeded siblings so all
307
+ // channels see a consistent "no booking" state. Per §4.2.
308
+ let compensated = 0;
309
+ if (failed > 0 && successList.length > 0) {
310
+ const policy = await resolveCompensationPolicy(db, links[0]?.channel.id ?? null);
311
+ if (policy === "strict-atomic") {
312
+ for (const entry of successList) {
313
+ const success = await compensateSucceededLink(db, entry, input.bookingId, logger);
314
+ if (success) {
315
+ compensated += 1;
316
+ // Update the existing outcome row to compensated.
317
+ for (const outcome of outcomes) {
318
+ if (outcome.channelId === entry.channel.id &&
319
+ outcome.bookingItemId === (entry.link.bookingItemId ?? null) &&
320
+ outcome.status === "ok") {
321
+ outcome.status = "compensated";
322
+ break;
323
+ }
324
+ }
325
+ }
326
+ }
327
+ if (compensated > 0) {
328
+ logger.warn?.(`processBookingPush: compensated ${compensated} succeeded link(s) under strict-atomic policy`, { bookingId: input.bookingId, compensated, failed });
329
+ }
330
+ }
331
+ }
332
+ return {
333
+ bookingId: input.bookingId,
334
+ attempted: links.length,
335
+ succeeded,
336
+ failed,
337
+ compensated,
338
+ outcomes,
339
+ };
340
+ }
341
+ /**
342
+ * Read the compensation policy for a channel by walking
343
+ * `channel_contracts` (most-recent active contract wins). Returns
344
+ * `eventually-consistent` when no contract exists or no compensation
345
+ * key is set — that's the doc-default safe behavior for travel
346
+ * inventory.
347
+ */
348
+ async function resolveCompensationPolicy(db, channelId) {
349
+ if (!channelId)
350
+ return "eventually-consistent";
351
+ const today = new Date().toISOString().slice(0, 10);
352
+ const [contract] = (await db
353
+ .select({ policy: channelContracts.policy })
354
+ .from(channelContracts)
355
+ .where(and(eq(channelContracts.channelId, channelId), eq(channelContracts.status, "active"), or(sql `${channelContracts.endsAt} IS NULL`, lte(channelContracts.startsAt, today))))
356
+ .orderBy(asc(channelContracts.startsAt))
357
+ .limit(1));
358
+ const raw = contract?.policy?.compensation;
359
+ return raw === "strict-atomic" ? "strict-atomic" : "eventually-consistent";
360
+ }
361
+ /**
362
+ * Roll back a succeeded link by calling `adapter.cancel` for the
363
+ * upstream reference. Marks the link `compensated` regardless of the
364
+ * cancel call's outcome — leaving it `ok` would lie to the operator
365
+ * dashboard. Per §4.2.
366
+ */
367
+ async function compensateSucceededLink(db, entry, bookingId, logger) {
368
+ let cancelError = null;
369
+ if (entry.adapter.cancel) {
370
+ const envelope = await prepareOutboundEnvelope(db, {
371
+ sourceModule: "distribution",
372
+ sourceEvent: "channel.booking.compensate",
373
+ sourceEntityModule: "bookings",
374
+ sourceEntityId: bookingId,
375
+ targetUrl: `adapter:${entry.adapter.kind}`,
376
+ targetKind: `channel:${entry.adapter.kind}`,
377
+ targetRef: entry.channel.id,
378
+ requestMethod: "POST",
379
+ requestBody: { upstream_ref: entry.upstreamRef, reason: "channel-push-compensation" },
380
+ attemptNumber: 1,
381
+ idempotencyKey: `compensate:${entry.link.id}`,
382
+ });
383
+ try {
384
+ const result = await entry.adapter.cancel(entry.adapterCtx, {
385
+ upstream_ref: entry.upstreamRef,
386
+ reason: "channel-push-compensation",
387
+ });
388
+ await envelope.complete({ responseStatus: 200, responseBody: result });
389
+ }
390
+ catch (err) {
391
+ cancelError = err instanceof Error ? err.message : String(err);
392
+ await envelope.complete({ errorClass: "adapter_error", errorMessage: cancelError });
393
+ logger.warn?.(`compensateSucceededLink: cancel failed for ${entry.link.id}`, {
394
+ error: cancelError,
395
+ });
396
+ }
397
+ }
398
+ else {
399
+ cancelError = "adapter does not implement cancel";
400
+ logger.warn?.(`compensateSucceededLink: ${entry.adapter.kind} has no cancel method`, {
401
+ linkId: entry.link.id,
402
+ });
403
+ }
404
+ const now = new Date();
405
+ await db
406
+ .update(channelBookingLinks)
407
+ .set({
408
+ pushStatus: "compensated",
409
+ lastPushAt: now,
410
+ lastError: cancelError,
411
+ updatedAt: now,
412
+ })
413
+ .where(eq(channelBookingLinks.id, entry.link.id));
414
+ return true;
415
+ }
416
+ // ─────────────────────────────────────────────────────────────────────────────
417
+ // Helpers
418
+ // ─────────────────────────────────────────────────────────────────────────────
419
+ async function markLinkOk(db, linkId, attempts, upstreamRef, externalReference, externalStatus) {
420
+ const now = new Date();
421
+ await db
422
+ .update(channelBookingLinks)
423
+ .set({
424
+ pushStatus: "ok",
425
+ pushAttempts: attempts,
426
+ lastPushAt: now,
427
+ lastError: null,
428
+ externalBookingId: upstreamRef,
429
+ externalReference,
430
+ externalStatus,
431
+ lastSyncedAt: now,
432
+ updatedAt: now,
433
+ })
434
+ .where(eq(channelBookingLinks.id, linkId));
435
+ }
436
+ async function markLinkFailed(db, linkId, attempts, message) {
437
+ const now = new Date();
438
+ await db
439
+ .update(channelBookingLinks)
440
+ .set({
441
+ pushStatus: "failed",
442
+ pushAttempts: attempts,
443
+ lastPushAt: now,
444
+ lastError: message,
445
+ updatedAt: now,
446
+ })
447
+ .where(eq(channelBookingLinks.id, linkId));
448
+ }
449
+ async function readMappingForLink(db, link, booking) {
450
+ // Walk via booking_items.product_id when the link is item-scoped;
451
+ // otherwise pick the first mapping for any of the booking's items
452
+ // (booking-level fallback used by bookings that fully syndicate).
453
+ let productId = null;
454
+ if (link.bookingItemId) {
455
+ const [row] = (await db
456
+ .select({ productId: bookingItems.productId })
457
+ .from(bookingItems)
458
+ .where(eq(bookingItems.id, link.bookingItemId))
459
+ .limit(1));
460
+ productId = row?.productId ?? null;
461
+ }
462
+ else {
463
+ const [row] = (await db
464
+ .select({ productId: bookingItems.productId })
465
+ .from(bookingItems)
466
+ .where(eq(bookingItems.bookingId, booking.id))
467
+ .limit(1));
468
+ productId = row?.productId ?? null;
469
+ }
470
+ if (!productId)
471
+ return null;
472
+ const [mapping] = (await db
473
+ .select()
474
+ .from(channelProductMappings)
475
+ .where(and(eq(channelProductMappings.channelId, link.channelId), eq(channelProductMappings.productId, productId)))
476
+ .limit(1));
477
+ return mapping ?? null;
478
+ }
479
+ function rateLimitConfigForChannel(channel) {
480
+ if (!channel.rateLimitRps || !channel.rateLimitBurst)
481
+ return null;
482
+ return {
483
+ rps: channel.rateLimitRps,
484
+ burst: channel.rateLimitBurst,
485
+ priorityGates: channel.rateLimitPriorityGates ?? undefined,
486
+ };
487
+ }
488
+ function serializeBookingForPush(booking, bookingItemId) {
489
+ // v1: ship a thin shape — booking number, dates, pax, optionally
490
+ // narrowed to the targeted item. PII redaction (per §15) is left to
491
+ // the adapter; future iterations push redaction up here.
492
+ return {
493
+ bookingId: booking.id,
494
+ bookingNumber: booking.bookingNumber,
495
+ bookingItemId,
496
+ status: booking.status,
497
+ startDate: booking.startDate,
498
+ endDate: booking.endDate,
499
+ pax: booking.pax,
500
+ sellCurrency: booking.sellCurrency,
501
+ sellAmountCents: booking.sellAmountCents,
502
+ };
503
+ }
@@ -0,0 +1,60 @@
1
+ /**
2
+ * Content-push pipeline.
3
+ *
4
+ * Triggered by `product.content.changed`. The subscriber upserts a
5
+ * `channel_content_push_intents` row per (channel, product); concurrent
6
+ * edits collapse to one row. The processor drains intents, hashes the
7
+ * current content, and skips when the hash equals
8
+ * `channel_product_mappings.last_pushed_content_hash` — channel-side
9
+ * idempotency per §6.1.
10
+ *
11
+ * Per docs/architecture/channel-push-architecture.md §6 + §12.3.
12
+ */
13
+ import type { AnyDrizzleDb } from "@voyantjs/db";
14
+ import { channelProductMappings, channels } from "../schema.js";
15
+ import { type ChannelPushDeps } from "./types.js";
16
+ /** Stable string identifier for the content-push workflow. */
17
+ export declare const CHANNEL_CONTENT_PUSH_WORKFLOW_ID: "channel.content.push";
18
+ /**
19
+ * Resolve the channels that want a content push for this product.
20
+ * Per §7.4 — content push uses `channel_product_mappings` (content is
21
+ * product-shaped, not slot-shaped).
22
+ */
23
+ export declare function resolveContentPushTargets(db: AnyDrizzleDb, productId: string): Promise<Array<{
24
+ channelId: string;
25
+ sourceConnectionId: string;
26
+ mapping: typeof channelProductMappings.$inferSelect;
27
+ channel: typeof channels.$inferSelect;
28
+ }>>;
29
+ export declare function upsertContentIntent(db: AnyDrizzleDb, input: {
30
+ channelId: string;
31
+ sourceConnectionId: string;
32
+ productId: string;
33
+ }): Promise<void>;
34
+ export interface ProcessContentPushInput {
35
+ channelId?: string;
36
+ limit?: number;
37
+ }
38
+ export interface ProcessContentPushResult {
39
+ attempted: number;
40
+ succeeded: number;
41
+ failed: number;
42
+ skipped: number;
43
+ }
44
+ /**
45
+ * Drain pending content intents. Hashes current product content and
46
+ * skips when the hash matches the upstream's last-known hash.
47
+ *
48
+ * v1 ships a minimal `content` payload (product row fields). Real
49
+ * verticals supply richer payloads via a future content provider hook.
50
+ *
51
+ * Per §6 + §12.3.
52
+ */
53
+ export declare function processContentPushIntents(input?: ProcessContentPushInput, deps?: ChannelPushDeps): Promise<ProcessContentPushResult>;
54
+ /**
55
+ * Stable canonical-JSON hash. Mirrors the body-fingerprint behavior
56
+ * of `webhook-deliveries.ts` — purely a "is this the same content as
57
+ * before?" fingerprint, not a cryptographic hash.
58
+ */
59
+ export declare function canonicalHash(value: unknown): string;
60
+ //# sourceMappingURL=content-push.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"content-push.d.ts","sourceRoot":"","sources":["../../src/channel-push/content-push.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AAOH,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,cAAc,CAAA;AAKhD,OAAO,EAA6B,sBAAsB,EAAE,QAAQ,EAAE,MAAM,cAAc,CAAA;AAG1F,OAAO,EAAE,KAAK,eAAe,EAA4C,MAAM,YAAY,CAAA;AAE3F,8DAA8D;AAC9D,eAAO,MAAM,gCAAgC,EAAG,sBAA+B,CAAA;AAE/E;;;;GAIG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,YAAY,EAChB,SAAS,EAAE,MAAM,GAChB,OAAO,CACR,KAAK,CAAC;IACJ,SAAS,EAAE,MAAM,CAAA;IACjB,kBAAkB,EAAE,MAAM,CAAA;IAC1B,OAAO,EAAE,OAAO,sBAAsB,CAAC,YAAY,CAAA;IACnD,OAAO,EAAE,OAAO,QAAQ,CAAC,YAAY,CAAA;CACtC,CAAC,CACH,CA4BA;AAED,wBAAsB,mBAAmB,CACvC,EAAE,EAAE,YAAY,EAChB,KAAK,EAAE;IAAE,SAAS,EAAE,MAAM,CAAC;IAAC,kBAAkB,EAAE,MAAM,CAAC;IAAC,SAAS,EAAE,MAAM,CAAA;CAAE,GAC1E,OAAO,CAAC,IAAI,CAAC,CAkBf;AAED,MAAM,WAAW,uBAAuB;IACtC,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB,KAAK,CAAC,EAAE,MAAM,CAAA;CACf;AAED,MAAM,WAAW,wBAAwB;IACvC,SAAS,EAAE,MAAM,CAAA;IACjB,SAAS,EAAE,MAAM,CAAA;IACjB,MAAM,EAAE,MAAM,CAAA;IACd,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;;;;;;;GAQG;AACH,wBAAsB,yBAAyB,CAC7C,KAAK,GAAE,uBAA4B,EACnC,IAAI,CAAC,EAAE,eAAe,GACrB,OAAO,CAAC,wBAAwB,CAAC,CAuKnC;AAkCD;;;;GAIG;AACH,wBAAgB,aAAa,CAAC,KAAK,EAAE,OAAO,GAAG,MAAM,CAepD"}