@voyantjs/distribution 0.20.0 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/channel-push/admin-routes.d.ts +31 -0
- package/dist/channel-push/admin-routes.d.ts.map +1 -0
- package/dist/channel-push/admin-routes.js +165 -0
- package/dist/channel-push/availability-push.d.ts +76 -0
- package/dist/channel-push/availability-push.d.ts.map +1 -0
- package/dist/channel-push/availability-push.js +238 -0
- package/dist/channel-push/booking-push.d.ts +114 -0
- package/dist/channel-push/booking-push.d.ts.map +1 -0
- package/dist/channel-push/booking-push.js +503 -0
- package/dist/channel-push/content-push.d.ts +60 -0
- package/dist/channel-push/content-push.d.ts.map +1 -0
- package/dist/channel-push/content-push.js +256 -0
- package/dist/channel-push/index.d.ts +15 -0
- package/dist/channel-push/index.d.ts.map +1 -0
- package/dist/channel-push/index.js +18 -0
- package/dist/channel-push/plugin.d.ts +18 -0
- package/dist/channel-push/plugin.d.ts.map +1 -0
- package/dist/channel-push/plugin.js +21 -0
- package/dist/channel-push/reconciler.d.ts +85 -0
- package/dist/channel-push/reconciler.d.ts.map +1 -0
- package/dist/channel-push/reconciler.js +175 -0
- package/dist/channel-push/subscriber.d.ts +40 -0
- package/dist/channel-push/subscriber.d.ts.map +1 -0
- package/dist/channel-push/subscriber.js +174 -0
- package/dist/channel-push/types.d.ts +43 -0
- package/dist/channel-push/types.d.ts.map +1 -0
- package/dist/channel-push/types.js +32 -0
- package/dist/channel-push/workflows.d.ts +56 -0
- package/dist/channel-push/workflows.d.ts.map +1 -0
- package/dist/channel-push/workflows.js +100 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +3 -0
- package/dist/rate-limit.d.ts +69 -0
- package/dist/rate-limit.d.ts.map +1 -0
- package/dist/rate-limit.js +135 -0
- package/dist/routes.d.ts +170 -10
- package/dist/routes.d.ts.map +1 -1
- package/dist/schema-core.d.ts +417 -1
- package/dist/schema-core.d.ts.map +1 -1
- package/dist/schema-core.js +98 -1
- package/dist/schema-push-intents.d.ts +387 -0
- package/dist/schema-push-intents.d.ts.map +1 -0
- package/dist/schema-push-intents.js +77 -0
- package/dist/schema.d.ts +1 -0
- package/dist/schema.d.ts.map +1 -1
- package/dist/schema.js +1 -0
- package/dist/service.d.ts +103 -7
- package/dist/service.d.ts.map +1 -1
- package/dist/validation.d.ts +5 -5
- package/dist/webhook-deliveries.d.ts +86 -0
- package/dist/webhook-deliveries.d.ts.map +1 -0
- package/dist/webhook-deliveries.js +293 -0
- package/package.json +16 -8
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Admin API for the channel-push operator dashboard.
|
|
3
|
+
*
|
|
4
|
+
* Ships the data layer for "channel sync" views per §9 + §10 (Phase D)
|
|
5
|
+
* + §14.5 — operators want to see (a) which booking links are stuck,
|
|
6
|
+
* (b) the delivery log per booking, (c) per-channel throttling, and
|
|
7
|
+
* (d) a one-click retry button. The React surface lives in templates;
|
|
8
|
+
* this file is the backing API.
|
|
9
|
+
*
|
|
10
|
+
* Routes are mounted under `/v1/admin/distribution/channel-push/*`.
|
|
11
|
+
*
|
|
12
|
+
* GET /links — counts + filterable list of channel_booking_links
|
|
13
|
+
* POST /retry/:bookingId — drain pending links for one booking
|
|
14
|
+
* GET /deliveries — webhook_deliveries scoped by booking/channel
|
|
15
|
+
* GET /throttling — per-channel rate-limited count in last hour
|
|
16
|
+
* POST /reconcile/:flow — manually trigger a reconciler scanner
|
|
17
|
+
*
|
|
18
|
+
* Per docs/architecture/channel-push-architecture.md §9 + §14.5.
|
|
19
|
+
*/
|
|
20
|
+
import type { PostgresJsDatabase } from "drizzle-orm/postgres-js";
|
|
21
|
+
import { Hono } from "hono";
|
|
22
|
+
type Env = {
|
|
23
|
+
Variables: {
|
|
24
|
+
db: PostgresJsDatabase;
|
|
25
|
+
userId?: string;
|
|
26
|
+
};
|
|
27
|
+
};
|
|
28
|
+
export type ChannelPushAdminRoutes = ReturnType<typeof createChannelPushAdminRoutes>;
|
|
29
|
+
export declare function createChannelPushAdminRoutes(): Hono<Env, import("hono/types").BlankSchema, "/">;
|
|
30
|
+
export {};
|
|
31
|
+
//# sourceMappingURL=admin-routes.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"admin-routes.d.ts","sourceRoot":"","sources":["../../src/channel-push/admin-routes.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;GAkBG;AAIH,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,yBAAyB,CAAA;AACjE,OAAO,EAAE,IAAI,EAAE,MAAM,MAAM,CAAA;AAM3B,KAAK,GAAG,GAAG;IACT,SAAS,EAAE;QACT,EAAE,EAAE,kBAAkB,CAAA;QACtB,MAAM,CAAC,EAAE,MAAM,CAAA;KAChB,CAAA;CACF,CAAA;AAED,MAAM,MAAM,sBAAsB,GAAG,UAAU,CAAC,OAAO,4BAA4B,CAAC,CAAA;AAEpF,wBAAgB,4BAA4B,qDA2J3C"}
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Admin API for the channel-push operator dashboard.
|
|
3
|
+
*
|
|
4
|
+
* Ships the data layer for "channel sync" views per §9 + §10 (Phase D)
|
|
5
|
+
* + §14.5 — operators want to see (a) which booking links are stuck,
|
|
6
|
+
* (b) the delivery log per booking, (c) per-channel throttling, and
|
|
7
|
+
* (d) a one-click retry button. The React surface lives in templates;
|
|
8
|
+
* this file is the backing API.
|
|
9
|
+
*
|
|
10
|
+
* Routes are mounted under `/v1/admin/distribution/channel-push/*`.
|
|
11
|
+
*
|
|
12
|
+
* GET /links — counts + filterable list of channel_booking_links
|
|
13
|
+
* POST /retry/:bookingId — drain pending links for one booking
|
|
14
|
+
* GET /deliveries — webhook_deliveries scoped by booking/channel
|
|
15
|
+
* GET /throttling — per-channel rate-limited count in last hour
|
|
16
|
+
* POST /reconcile/:flow — manually trigger a reconciler scanner
|
|
17
|
+
*
|
|
18
|
+
* Per docs/architecture/channel-push-architecture.md §9 + §14.5.
|
|
19
|
+
*/
|
|
20
|
+
import { infraWebhookDeliveriesTable } from "@voyantjs/db/schema/infra";
|
|
21
|
+
import { and, desc, eq, gte, sql } from "drizzle-orm";
|
|
22
|
+
import { Hono } from "hono";
|
|
23
|
+
import { channelBookingLinks, channels } from "../schema.js";
|
|
24
|
+
import { reconcileAvailability, reconcileBookingLinks, reconcileContent } from "./reconciler.js";
|
|
25
|
+
import { triggerBookingPushForBooking } from "./subscriber.js";
|
|
26
|
+
export function createChannelPushAdminRoutes() {
|
|
27
|
+
const app = new Hono();
|
|
28
|
+
// ── GET /links ───────────────────────────────────────────────────
|
|
29
|
+
// Status counts + filterable list of channel_booking_links. The
|
|
30
|
+
// dashboard's "channel sync" view consumes this for both the summary
|
|
31
|
+
// tiles ("X pending, Y failed, Z compensated") and the row table.
|
|
32
|
+
app.get("/links", async (c) => {
|
|
33
|
+
const db = c.get("db");
|
|
34
|
+
const status = c.req.query("status");
|
|
35
|
+
const channelId = c.req.query("channelId");
|
|
36
|
+
const bookingId = c.req.query("bookingId");
|
|
37
|
+
const limit = clampLimit(c.req.query("limit"));
|
|
38
|
+
const filters = [
|
|
39
|
+
status ? eq(channelBookingLinks.pushStatus, status) : sql `true`,
|
|
40
|
+
channelId ? eq(channelBookingLinks.channelId, channelId) : sql `true`,
|
|
41
|
+
bookingId ? eq(channelBookingLinks.bookingId, bookingId) : sql `true`,
|
|
42
|
+
];
|
|
43
|
+
const rows = await db
|
|
44
|
+
.select({
|
|
45
|
+
link: channelBookingLinks,
|
|
46
|
+
channelName: channels.name,
|
|
47
|
+
channelKind: channels.kind,
|
|
48
|
+
})
|
|
49
|
+
.from(channelBookingLinks)
|
|
50
|
+
.innerJoin(channels, eq(channelBookingLinks.channelId, channels.id))
|
|
51
|
+
.where(and(...filters))
|
|
52
|
+
.orderBy(desc(channelBookingLinks.lastPushAt), desc(channelBookingLinks.createdAt))
|
|
53
|
+
.limit(limit);
|
|
54
|
+
const counts = await db
|
|
55
|
+
.select({
|
|
56
|
+
status: channelBookingLinks.pushStatus,
|
|
57
|
+
count: sql `count(*)::int`,
|
|
58
|
+
})
|
|
59
|
+
.from(channelBookingLinks)
|
|
60
|
+
.where(and(...filters))
|
|
61
|
+
.groupBy(channelBookingLinks.pushStatus);
|
|
62
|
+
return c.json({
|
|
63
|
+
data: rows,
|
|
64
|
+
counts: Object.fromEntries(counts.map((c) => [c.status, c.count])),
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
// ── POST /retry/:bookingId ───────────────────────────────────────
|
|
68
|
+
// Operator-driven retry. Re-resolves push targets, upserts pending
|
|
69
|
+
// intent rows, and runs processBookingPush inline. Idempotent on
|
|
70
|
+
// the booking_links unique constraint, so accidental double-clicks
|
|
71
|
+
// are safe.
|
|
72
|
+
app.post("/retry/:bookingId", async (c) => {
|
|
73
|
+
const bookingId = c.req.param("bookingId");
|
|
74
|
+
try {
|
|
75
|
+
await triggerBookingPushForBooking(bookingId);
|
|
76
|
+
return c.json({ data: { ok: true, bookingId } });
|
|
77
|
+
}
|
|
78
|
+
catch (err) {
|
|
79
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
80
|
+
return c.json({ error: message }, 500);
|
|
81
|
+
}
|
|
82
|
+
});
|
|
83
|
+
// ── GET /deliveries ──────────────────────────────────────────────
|
|
84
|
+
// Drilldown view: every webhook_deliveries row scoped to a booking,
|
|
85
|
+
// channel, or both. Used by the "show me what we sent" link in the
|
|
86
|
+
// dashboard's failure rows.
|
|
87
|
+
app.get("/deliveries", async (c) => {
|
|
88
|
+
const db = c.get("db");
|
|
89
|
+
const bookingId = c.req.query("bookingId");
|
|
90
|
+
const channelId = c.req.query("channelId");
|
|
91
|
+
const limit = clampLimit(c.req.query("limit"));
|
|
92
|
+
const filters = [
|
|
93
|
+
eq(infraWebhookDeliveriesTable.sourceModule, "distribution"),
|
|
94
|
+
bookingId
|
|
95
|
+
? and(eq(infraWebhookDeliveriesTable.sourceEntityModule, "bookings"), eq(infraWebhookDeliveriesTable.sourceEntityId, bookingId))
|
|
96
|
+
: sql `true`,
|
|
97
|
+
channelId ? eq(infraWebhookDeliveriesTable.targetRef, channelId) : sql `true`,
|
|
98
|
+
];
|
|
99
|
+
const rows = (await db
|
|
100
|
+
.select()
|
|
101
|
+
.from(infraWebhookDeliveriesTable)
|
|
102
|
+
.where(and(...filters))
|
|
103
|
+
.orderBy(desc(infraWebhookDeliveriesTable.createdAt))
|
|
104
|
+
.limit(limit));
|
|
105
|
+
return c.json({ data: rows });
|
|
106
|
+
});
|
|
107
|
+
// ── GET /throttling ──────────────────────────────────────────────
|
|
108
|
+
// Per-channel rate-limited count in the last hour. The dashboard
|
|
109
|
+
// shows a yellow "throttled" badge when any channel has > 0
|
|
110
|
+
// rate_limited rows in the window. Per §14.5.
|
|
111
|
+
app.get("/throttling", async (c) => {
|
|
112
|
+
const db = c.get("db");
|
|
113
|
+
const sinceMs = Number.parseInt(c.req.query("sinceMs") ?? String(60 * 60 * 1000), 10);
|
|
114
|
+
const since = new Date(Date.now() - (Number.isFinite(sinceMs) ? sinceMs : 60 * 60 * 1000));
|
|
115
|
+
const rows = await db
|
|
116
|
+
.select({
|
|
117
|
+
channelId: infraWebhookDeliveriesTable.targetRef,
|
|
118
|
+
count: sql `count(*)::int`,
|
|
119
|
+
})
|
|
120
|
+
.from(infraWebhookDeliveriesTable)
|
|
121
|
+
.where(and(eq(infraWebhookDeliveriesTable.sourceModule, "distribution"), eq(infraWebhookDeliveriesTable.errorClass, "rate_limited"), gte(infraWebhookDeliveriesTable.createdAt, since)))
|
|
122
|
+
.groupBy(infraWebhookDeliveriesTable.targetRef);
|
|
123
|
+
return c.json({
|
|
124
|
+
data: rows.filter((r) => r.channelId != null),
|
|
125
|
+
sinceMs,
|
|
126
|
+
});
|
|
127
|
+
});
|
|
128
|
+
// ── POST /reconcile/:flow ────────────────────────────────────────
|
|
129
|
+
// Manual reconciler trigger for ops — useful when a channel comes
|
|
130
|
+
// back up after a long outage and you don't want to wait for the
|
|
131
|
+
// next scheduled run. `flow` is one of "bookings", "availability",
|
|
132
|
+
// "content".
|
|
133
|
+
app.post("/reconcile/:flow", async (c) => {
|
|
134
|
+
const flow = c.req.param("flow");
|
|
135
|
+
try {
|
|
136
|
+
switch (flow) {
|
|
137
|
+
case "bookings": {
|
|
138
|
+
const result = await reconcileBookingLinks({});
|
|
139
|
+
return c.json({ data: result });
|
|
140
|
+
}
|
|
141
|
+
case "availability": {
|
|
142
|
+
const result = await reconcileAvailability({});
|
|
143
|
+
return c.json({ data: result });
|
|
144
|
+
}
|
|
145
|
+
case "content": {
|
|
146
|
+
const result = await reconcileContent({});
|
|
147
|
+
return c.json({ data: result });
|
|
148
|
+
}
|
|
149
|
+
default:
|
|
150
|
+
return c.json({ error: `unknown flow "${flow}"` }, 400);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
catch (err) {
|
|
154
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
155
|
+
return c.json({ error: message }, 500);
|
|
156
|
+
}
|
|
157
|
+
});
|
|
158
|
+
return app;
|
|
159
|
+
}
|
|
160
|
+
function clampLimit(raw) {
|
|
161
|
+
const parsed = raw ? Number.parseInt(raw, 10) : 50;
|
|
162
|
+
if (!Number.isFinite(parsed) || parsed <= 0)
|
|
163
|
+
return 50;
|
|
164
|
+
return Math.min(parsed, 500);
|
|
165
|
+
}
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Availability-push pipeline.
|
|
3
|
+
*
|
|
4
|
+
* Triggered by `availability.slot.changed`. The subscriber upserts a
|
|
5
|
+
* `channel_availability_push_intents` row per (channel, slot) — concurrent
|
|
6
|
+
* supersession events collapse to one row via the unique constraint.
|
|
7
|
+
* The processor (`processAvailabilityPushIntents`) drains intents per
|
|
8
|
+
* channel, reads the *current* slot state, and dispatches via
|
|
9
|
+
* `adapter.pushAvailability()`. Stale-event protection comes from
|
|
10
|
+
* reading current state at processing time, not the event payload.
|
|
11
|
+
*
|
|
12
|
+
* Per docs/architecture/channel-push-architecture.md §5 + §12.2.
|
|
13
|
+
*/
|
|
14
|
+
import type { AnyDrizzleDb } from "@voyantjs/db";
|
|
15
|
+
import { channelProductMappings, channels } from "../schema.js";
|
|
16
|
+
import { type ChannelPushDeps } from "./types.js";
|
|
17
|
+
/** Stable string identifier for the availability-push workflow. */
|
|
18
|
+
export declare const CHANNEL_AVAILABILITY_PUSH_WORKFLOW_ID: "channel.availability.push";
|
|
19
|
+
export interface ResolveAllotmentTargetsForSlotInput {
|
|
20
|
+
slotId: string;
|
|
21
|
+
productId: string;
|
|
22
|
+
optionId: string | null;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Resolve the channels that hold an allotment for this slot/product/option.
|
|
26
|
+
* Per §7.4 — availability push uses `channel_inventory_allotments` (NOT
|
|
27
|
+
* `channel_product_mappings`), so channels mapped to the product but
|
|
28
|
+
* with no per-slot allotment don't receive pushes.
|
|
29
|
+
*
|
|
30
|
+
* v1 returns one row per channel that has an active allotment whose
|
|
31
|
+
* scope matches the slot (by product, optionally by option). Per-slot
|
|
32
|
+
* targeting via `channel_inventory_allotment_targets` is consulted in a
|
|
33
|
+
* future iteration; v1 dispatches at allotment-level so any allotment
|
|
34
|
+
* row covering the product/option triggers a push.
|
|
35
|
+
*/
|
|
36
|
+
export declare function resolveAllotmentTargetsForSlot(db: AnyDrizzleDb, input: ResolveAllotmentTargetsForSlotInput): Promise<Array<{
|
|
37
|
+
channelId: string;
|
|
38
|
+
sourceConnectionId: string;
|
|
39
|
+
mapping: typeof channelProductMappings.$inferSelect;
|
|
40
|
+
channel: typeof channels.$inferSelect;
|
|
41
|
+
}>>;
|
|
42
|
+
/**
|
|
43
|
+
* Insert/update an intent row per (channel, slot). The unique
|
|
44
|
+
* constraint on `(channel_id, slot_id)` collapses concurrent
|
|
45
|
+
* supersession events to one row; the worker reads the *current* slot
|
|
46
|
+
* state when it processes, so stale event payloads never propagate.
|
|
47
|
+
*/
|
|
48
|
+
export declare function upsertAvailabilityIntent(db: AnyDrizzleDb, input: {
|
|
49
|
+
channelId: string;
|
|
50
|
+
sourceConnectionId: string;
|
|
51
|
+
slotId: string;
|
|
52
|
+
productId: string;
|
|
53
|
+
optionId: string | null;
|
|
54
|
+
startsAt: Date;
|
|
55
|
+
}): Promise<void>;
|
|
56
|
+
export interface ProcessAvailabilityPushInput {
|
|
57
|
+
/** When set, drain intents only for this channel. Otherwise drain all. */
|
|
58
|
+
channelId?: string;
|
|
59
|
+
/** Max intents to process per call (across all channels). Default 100. */
|
|
60
|
+
limit?: number;
|
|
61
|
+
}
|
|
62
|
+
export interface ProcessAvailabilityPushResult {
|
|
63
|
+
attempted: number;
|
|
64
|
+
succeeded: number;
|
|
65
|
+
failed: number;
|
|
66
|
+
skipped: number;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Drain pending availability intents. Reads CURRENT slot state for each
|
|
70
|
+
* intent (so superseded values never propagate). On success, deletes the
|
|
71
|
+
* intent row. On failure, increments `attempts` and stamps `last_error`.
|
|
72
|
+
*
|
|
73
|
+
* Per §5.3 + §12.2.
|
|
74
|
+
*/
|
|
75
|
+
export declare function processAvailabilityPushIntents(input?: ProcessAvailabilityPushInput, deps?: ChannelPushDeps): Promise<ProcessAvailabilityPushResult>;
|
|
76
|
+
//# sourceMappingURL=availability-push.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"availability-push.d.ts","sourceRoot":"","sources":["../../src/channel-push/availability-push.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAQH,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,cAAc,CAAA;AAIhD,OAAO,EAGL,sBAAsB,EACtB,QAAQ,EACT,MAAM,cAAc,CAAA;AAGrB,OAAO,EAAE,KAAK,eAAe,EAA4C,MAAM,YAAY,CAAA;AAE3F,mEAAmE;AACnE,eAAO,MAAM,qCAAqC,EAAG,2BAAoC,CAAA;AAEzF,MAAM,WAAW,mCAAmC;IAClD,MAAM,EAAE,MAAM,CAAA;IACd,SAAS,EAAE,MAAM,CAAA;IACjB,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAA;CACxB;AAED;;;;;;;;;;;GAWG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,YAAY,EAChB,KAAK,EAAE,mCAAmC,GACzC,OAAO,CACR,KAAK,CAAC;IACJ,SAAS,EAAE,MAAM,CAAA;IACjB,kBAAkB,EAAE,MAAM,CAAA;IAC1B,OAAO,EAAE,OAAO,sBAAsB,CAAC,YAAY,CAAA;IACnD,OAAO,EAAE,OAAO,QAAQ,CAAC,YAAY,CAAA;CACtC,CAAC,CACH,CAkDA;AAED;;;;;GAKG;AACH,wBAAsB,wBAAwB,CAC5C,EAAE,EAAE,YAAY,EAChB,KAAK,EAAE;IACL,SAAS,EAAE,MAAM,CAAA;IACjB,kBAAkB,EAAE,MAAM,CAAA;IAC1B,MAAM,EAAE,MAAM,CAAA;IACd,SAAS,EAAE,MAAM,CAAA;IACjB,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAA;IACvB,QAAQ,EAAE,IAAI,CAAA;CACf,GACA,OAAO,CAAC,IAAI,CAAC,CAsBf;AAED,MAAM,WAAW,4BAA4B;IAC3C,0EAA0E;IAC1E,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB,0EAA0E;IAC1E,KAAK,CAAC,EAAE,MAAM,CAAA;CACf;AAED,MAAM,WAAW,6BAA6B;IAC5C,SAAS,EAAE,MAAM,CAAA;IACjB,SAAS,EAAE,MAAM,CAAA;IACjB,MAAM,EAAE,MAAM,CAAA;IACd,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;;;;;GAMG;AACH,wBAAsB,8BAA8B,CAClD,KAAK,GAAE,4BAAiC,EACxC,IAAI,CAAC,EAAE,eAAe,GACrB,OAAO,CAAC,6BAA6B,CAAC,CA+JxC"}
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Availability-push pipeline.
|
|
3
|
+
*
|
|
4
|
+
* Triggered by `availability.slot.changed`. The subscriber upserts a
|
|
5
|
+
* `channel_availability_push_intents` row per (channel, slot) — concurrent
|
|
6
|
+
* supersession events collapse to one row via the unique constraint.
|
|
7
|
+
* The processor (`processAvailabilityPushIntents`) drains intents per
|
|
8
|
+
* channel, reads the *current* slot state, and dispatches via
|
|
9
|
+
* `adapter.pushAvailability()`. Stale-event protection comes from
|
|
10
|
+
* reading current state at processing time, not the event payload.
|
|
11
|
+
*
|
|
12
|
+
* Per docs/architecture/channel-push-architecture.md §5 + §12.2.
|
|
13
|
+
*/
|
|
14
|
+
import { availabilitySlots } from "@voyantjs/availability/schema";
|
|
15
|
+
import { AdapterRateLimitedError, } from "@voyantjs/catalog";
|
|
16
|
+
import { newId } from "@voyantjs/db/lib/typeid";
|
|
17
|
+
import { and, asc, eq, inArray, sql } from "drizzle-orm";
|
|
18
|
+
import { acquireToken, channelScopeKey, drainBucket } from "../rate-limit.js";
|
|
19
|
+
import { channelAvailabilityPushIntents, channelInventoryAllotments, channelProductMappings, channels, } from "../schema.js";
|
|
20
|
+
import { prepareOutboundEnvelope } from "../webhook-deliveries.js";
|
|
21
|
+
import { defaultLogger, getChannelPushDepsOrThrow } from "./types.js";
|
|
22
|
+
/** Stable string identifier for the availability-push workflow. */
|
|
23
|
+
export const CHANNEL_AVAILABILITY_PUSH_WORKFLOW_ID = "channel.availability.push";
|
|
24
|
+
/**
|
|
25
|
+
* Resolve the channels that hold an allotment for this slot/product/option.
|
|
26
|
+
* Per §7.4 — availability push uses `channel_inventory_allotments` (NOT
|
|
27
|
+
* `channel_product_mappings`), so channels mapped to the product but
|
|
28
|
+
* with no per-slot allotment don't receive pushes.
|
|
29
|
+
*
|
|
30
|
+
* v1 returns one row per channel that has an active allotment whose
|
|
31
|
+
* scope matches the slot (by product, optionally by option). Per-slot
|
|
32
|
+
* targeting via `channel_inventory_allotment_targets` is consulted in a
|
|
33
|
+
* future iteration; v1 dispatches at allotment-level so any allotment
|
|
34
|
+
* row covering the product/option triggers a push.
|
|
35
|
+
*/
|
|
36
|
+
export async function resolveAllotmentTargetsForSlot(db, input) {
|
|
37
|
+
// Resolve allotments for this product (optionally option-scoped).
|
|
38
|
+
const allotmentRows = (await db
|
|
39
|
+
.select({
|
|
40
|
+
channelId: channelInventoryAllotments.channelId,
|
|
41
|
+
})
|
|
42
|
+
.from(channelInventoryAllotments)
|
|
43
|
+
.innerJoin(channels, eq(channelInventoryAllotments.channelId, channels.id))
|
|
44
|
+
.where(and(eq(channelInventoryAllotments.productId, input.productId), eq(channelInventoryAllotments.active, true), eq(channels.status, "active"), input.optionId
|
|
45
|
+
? sql `(${channelInventoryAllotments.optionId} IS NULL OR ${channelInventoryAllotments.optionId} = ${input.optionId})`
|
|
46
|
+
: sql `${channelInventoryAllotments.optionId} IS NULL`)));
|
|
47
|
+
if (allotmentRows.length === 0)
|
|
48
|
+
return [];
|
|
49
|
+
const channelIds = Array.from(new Set(allotmentRows.map((r) => r.channelId)));
|
|
50
|
+
const mappings = (await db
|
|
51
|
+
.select({
|
|
52
|
+
mapping: channelProductMappings,
|
|
53
|
+
channel: channels,
|
|
54
|
+
})
|
|
55
|
+
.from(channelProductMappings)
|
|
56
|
+
.innerJoin(channels, eq(channelProductMappings.channelId, channels.id))
|
|
57
|
+
.where(and(eq(channelProductMappings.productId, input.productId), eq(channelProductMappings.active, true), eq(channelProductMappings.pushAvailability, true), inArray(channelProductMappings.channelId, channelIds))));
|
|
58
|
+
return mappings
|
|
59
|
+
.filter((row) => row.mapping.sourceConnectionId)
|
|
60
|
+
.map((row) => ({
|
|
61
|
+
channelId: row.channel.id,
|
|
62
|
+
sourceConnectionId: row.mapping.sourceConnectionId,
|
|
63
|
+
mapping: row.mapping,
|
|
64
|
+
channel: row.channel,
|
|
65
|
+
}));
|
|
66
|
+
}
|
|
67
|
+
/**
|
|
68
|
+
* Insert/update an intent row per (channel, slot). The unique
|
|
69
|
+
* constraint on `(channel_id, slot_id)` collapses concurrent
|
|
70
|
+
* supersession events to one row; the worker reads the *current* slot
|
|
71
|
+
* state when it processes, so stale event payloads never propagate.
|
|
72
|
+
*/
|
|
73
|
+
export async function upsertAvailabilityIntent(db, input) {
|
|
74
|
+
await db
|
|
75
|
+
.insert(channelAvailabilityPushIntents)
|
|
76
|
+
.values({
|
|
77
|
+
id: newId("channel_availability_push_intents"),
|
|
78
|
+
channelId: input.channelId,
|
|
79
|
+
sourceConnectionId: input.sourceConnectionId,
|
|
80
|
+
slotId: input.slotId,
|
|
81
|
+
productId: input.productId,
|
|
82
|
+
optionId: input.optionId,
|
|
83
|
+
startsAt: input.startsAt,
|
|
84
|
+
})
|
|
85
|
+
.onConflictDoUpdate({
|
|
86
|
+
target: [channelAvailabilityPushIntents.channelId, channelAvailabilityPushIntents.slotId],
|
|
87
|
+
set: {
|
|
88
|
+
requestedAt: new Date(),
|
|
89
|
+
updatedAt: new Date(),
|
|
90
|
+
// Reset attempts when a new event lands — fresh chance.
|
|
91
|
+
attempts: 0,
|
|
92
|
+
lastError: null,
|
|
93
|
+
},
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Drain pending availability intents. Reads CURRENT slot state for each
|
|
98
|
+
* intent (so superseded values never propagate). On success, deletes the
|
|
99
|
+
* intent row. On failure, increments `attempts` and stamps `last_error`.
|
|
100
|
+
*
|
|
101
|
+
* Per §5.3 + §12.2.
|
|
102
|
+
*/
|
|
103
|
+
export async function processAvailabilityPushIntents(input = {}, deps) {
|
|
104
|
+
const { db, registry, logger = defaultLogger } = deps ?? getChannelPushDepsOrThrow();
|
|
105
|
+
const limit = input.limit ?? 100;
|
|
106
|
+
const intents = (await db
|
|
107
|
+
.select({
|
|
108
|
+
intent: channelAvailabilityPushIntents,
|
|
109
|
+
channel: channels,
|
|
110
|
+
})
|
|
111
|
+
.from(channelAvailabilityPushIntents)
|
|
112
|
+
.innerJoin(channels, eq(channelAvailabilityPushIntents.channelId, channels.id))
|
|
113
|
+
.where(and(input.channelId ? eq(channelAvailabilityPushIntents.channelId, input.channelId) : sql `true`, eq(channels.status, "active")))
|
|
114
|
+
.orderBy(asc(channelAvailabilityPushIntents.requestedAt))
|
|
115
|
+
.limit(limit));
|
|
116
|
+
let succeeded = 0;
|
|
117
|
+
let failed = 0;
|
|
118
|
+
let skipped = 0;
|
|
119
|
+
for (const { intent, channel } of intents) {
|
|
120
|
+
// Read current slot state — stale events naturally don't propagate.
|
|
121
|
+
const [slot] = (await db
|
|
122
|
+
.select()
|
|
123
|
+
.from(availabilitySlots)
|
|
124
|
+
.where(eq(availabilitySlots.id, intent.slotId))
|
|
125
|
+
.limit(1));
|
|
126
|
+
if (!slot) {
|
|
127
|
+
// Slot deleted; drop the intent. Reconciler covers any drift.
|
|
128
|
+
await db
|
|
129
|
+
.delete(channelAvailabilityPushIntents)
|
|
130
|
+
.where(eq(channelAvailabilityPushIntents.id, intent.id));
|
|
131
|
+
skipped += 1;
|
|
132
|
+
continue;
|
|
133
|
+
}
|
|
134
|
+
const adapter = registry.resolveByConnection(intent.sourceConnectionId);
|
|
135
|
+
if (!adapter?.capabilities.supportsAvailabilityPush || !adapter.pushAvailability) {
|
|
136
|
+
await stampIntentError(db, intent.id, intent.attempts + 1, adapter ? "adapter_unsupported" : "no_adapter_registered");
|
|
137
|
+
failed += 1;
|
|
138
|
+
continue;
|
|
139
|
+
}
|
|
140
|
+
// Look up external ids via channel_product_mappings.
|
|
141
|
+
const [mapping] = (await db
|
|
142
|
+
.select()
|
|
143
|
+
.from(channelProductMappings)
|
|
144
|
+
.where(and(eq(channelProductMappings.channelId, channel.id), eq(channelProductMappings.productId, intent.productId)))
|
|
145
|
+
.limit(1));
|
|
146
|
+
if (!mapping) {
|
|
147
|
+
await stampIntentError(db, intent.id, intent.attempts + 1, "no_mapping");
|
|
148
|
+
failed += 1;
|
|
149
|
+
continue;
|
|
150
|
+
}
|
|
151
|
+
// Rate limit before dispatching. Availability uses the gated
|
|
152
|
+
// priority (default 0.3) so bookings always pre-empt.
|
|
153
|
+
const rlConfig = rateLimitConfigForChannel(channel);
|
|
154
|
+
if (rlConfig) {
|
|
155
|
+
const acq = await acquireToken(db, channelScopeKey(channel.id, intent.sourceConnectionId), rlConfig, "availability");
|
|
156
|
+
if (!acq.acquired) {
|
|
157
|
+
// Per §14.3: availability denials don't sleep. The next event
|
|
158
|
+
// for the same key supersedes; intent stays for next pass.
|
|
159
|
+
await stampIntentError(db, intent.id, intent.attempts + 1, "rate_limited");
|
|
160
|
+
failed += 1;
|
|
161
|
+
continue;
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
const request = {
|
|
165
|
+
channelId: channel.id,
|
|
166
|
+
externalProductId: mapping.externalProductId ?? "",
|
|
167
|
+
externalRateId: mapping.externalRateId ?? undefined,
|
|
168
|
+
externalCategoryId: mapping.externalCategoryId ?? undefined,
|
|
169
|
+
slotId: slot.id,
|
|
170
|
+
productId: slot.productId,
|
|
171
|
+
optionId: slot.optionId ?? undefined,
|
|
172
|
+
startsAt: slot.startsAt,
|
|
173
|
+
remainingPax: slot.unlimited ? Number.MAX_SAFE_INTEGER : (slot.remainingPax ?? 0),
|
|
174
|
+
source: "refresh",
|
|
175
|
+
};
|
|
176
|
+
const adapterCtx = {
|
|
177
|
+
connection_id: intent.sourceConnectionId,
|
|
178
|
+
};
|
|
179
|
+
const envelope = await prepareOutboundEnvelope(db, {
|
|
180
|
+
sourceModule: "distribution",
|
|
181
|
+
sourceEvent: "channel.availability.push",
|
|
182
|
+
sourceEntityModule: "availability",
|
|
183
|
+
sourceEntityId: slot.id,
|
|
184
|
+
targetUrl: `adapter:${adapter.kind}`,
|
|
185
|
+
targetKind: `channel:${adapter.kind}`,
|
|
186
|
+
targetRef: channel.id,
|
|
187
|
+
requestMethod: "POST",
|
|
188
|
+
requestBody: request,
|
|
189
|
+
attemptNumber: intent.attempts + 1,
|
|
190
|
+
});
|
|
191
|
+
try {
|
|
192
|
+
const result = await adapter.pushAvailability(adapterCtx, request);
|
|
193
|
+
await envelope.complete({ responseStatus: 200, responseBody: result });
|
|
194
|
+
// Drain on success.
|
|
195
|
+
await db
|
|
196
|
+
.delete(channelAvailabilityPushIntents)
|
|
197
|
+
.where(eq(channelAvailabilityPushIntents.id, intent.id));
|
|
198
|
+
succeeded += 1;
|
|
199
|
+
}
|
|
200
|
+
catch (err) {
|
|
201
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
202
|
+
const isRateLimited = err instanceof AdapterRateLimitedError;
|
|
203
|
+
if (isRateLimited) {
|
|
204
|
+
await drainBucket(db, channelScopeKey(channel.id, intent.sourceConnectionId), err.retryAfterMs);
|
|
205
|
+
}
|
|
206
|
+
await envelope.complete({
|
|
207
|
+
errorClass: isRateLimited ? "rate_limited" : "adapter_error",
|
|
208
|
+
errorMessage: message,
|
|
209
|
+
});
|
|
210
|
+
await stampIntentError(db, intent.id, intent.attempts + 1, message);
|
|
211
|
+
failed += 1;
|
|
212
|
+
logger.error?.(`pushAvailability failed for slot ${slot.id} channel ${channel.id}`, {
|
|
213
|
+
error: message,
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
return {
|
|
218
|
+
attempted: intents.length,
|
|
219
|
+
succeeded,
|
|
220
|
+
failed,
|
|
221
|
+
skipped,
|
|
222
|
+
};
|
|
223
|
+
}
|
|
224
|
+
async function stampIntentError(db, id, attempts, message) {
|
|
225
|
+
await db
|
|
226
|
+
.update(channelAvailabilityPushIntents)
|
|
227
|
+
.set({ attempts, lastError: message, updatedAt: new Date() })
|
|
228
|
+
.where(eq(channelAvailabilityPushIntents.id, id));
|
|
229
|
+
}
|
|
230
|
+
function rateLimitConfigForChannel(channel) {
|
|
231
|
+
if (!channel.rateLimitRps || !channel.rateLimitBurst)
|
|
232
|
+
return null;
|
|
233
|
+
return {
|
|
234
|
+
rps: channel.rateLimitRps,
|
|
235
|
+
burst: channel.rateLimitBurst,
|
|
236
|
+
priorityGates: channel.rateLimitPriorityGates ?? undefined,
|
|
237
|
+
};
|
|
238
|
+
}
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Booking-push pipeline.
|
|
3
|
+
*
|
|
4
|
+
* Triggered by `booking.confirmed`. The subscriber writes pending
|
|
5
|
+
* `channel_booking_links` rows and returns immediately (per the EventBus
|
|
6
|
+
* fire-and-forget contract). The durable processor (`processBookingPush`)
|
|
7
|
+
* drains those rows, calls `adapter.pushBooking()` per link, and marks
|
|
8
|
+
* each row `ok` or `failed`.
|
|
9
|
+
*
|
|
10
|
+
* The processor is a plain async function so it's callable from:
|
|
11
|
+
* - The `booking.confirmed` subscriber (inline, dev/single-process)
|
|
12
|
+
* - The `channel.booking.push` durable workflow's body (production)
|
|
13
|
+
* - The reconciler (Phase G) for catch-up after long outages
|
|
14
|
+
* - Tests / admin retry endpoints
|
|
15
|
+
*
|
|
16
|
+
* Per docs/architecture/channel-push-architecture.md §4 + §12.1.
|
|
17
|
+
*/
|
|
18
|
+
import type { AnyDrizzleDb } from "@voyantjs/db";
|
|
19
|
+
import { channelProductMappings, channels } from "@voyantjs/distribution/schema";
|
|
20
|
+
import { type ChannelPushDeps } from "./types.js";
|
|
21
|
+
/** Stable string identifier for the booking-push workflow. */
|
|
22
|
+
export declare const CHANNEL_BOOKING_PUSH_WORKFLOW_ID: "channel.booking.push";
|
|
23
|
+
export interface ProcessBookingPushInput {
|
|
24
|
+
bookingId: string;
|
|
25
|
+
}
|
|
26
|
+
export interface ProcessBookingPushResult {
|
|
27
|
+
bookingId: string;
|
|
28
|
+
attempted: number;
|
|
29
|
+
succeeded: number;
|
|
30
|
+
failed: number;
|
|
31
|
+
/**
|
|
32
|
+
* Number of succeeded links that were compensated (rolled back via
|
|
33
|
+
* `adapter.cancel`) because the contract's `compensation` policy is
|
|
34
|
+
* `"strict-atomic"` and at least one sibling failed. Always 0 under
|
|
35
|
+
* the default `"eventually-consistent"` policy.
|
|
36
|
+
*/
|
|
37
|
+
compensated: number;
|
|
38
|
+
/** Per-link outcomes for diagnostics. */
|
|
39
|
+
outcomes: Array<{
|
|
40
|
+
channelId: string;
|
|
41
|
+
bookingItemId: string | null;
|
|
42
|
+
status: "ok" | "failed" | "skipped" | "compensated";
|
|
43
|
+
upstreamRef?: string;
|
|
44
|
+
error?: string;
|
|
45
|
+
}>;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Compensation modes per `channel_contracts.policy.compensation`.
|
|
49
|
+
*
|
|
50
|
+
* - `eventually-consistent` (default): partial successes stay; ops gets
|
|
51
|
+
* alerted via `webhook_deliveries` and retries via the reconciler.
|
|
52
|
+
* Usually correct for travel inventory — succeeded channels know
|
|
53
|
+
* about the booking and will honor it; the failed ones converge.
|
|
54
|
+
* - `strict-atomic`: on any per-link failure, the engine calls
|
|
55
|
+
* `adapter.cancel` for succeeded siblings and marks them
|
|
56
|
+
* `push_status = 'compensated'`. Use only when ALL channels MUST
|
|
57
|
+
* agree on the booking's existence (rare).
|
|
58
|
+
*
|
|
59
|
+
* Per docs/architecture/channel-push-architecture.md §4.2 + §9.
|
|
60
|
+
*/
|
|
61
|
+
export type CompensationPolicy = "strict-atomic" | "eventually-consistent";
|
|
62
|
+
/**
|
|
63
|
+
* Build the stable idempotency key the upstream uses to dedupe pushes
|
|
64
|
+
* across retries. Per §3.
|
|
65
|
+
*/
|
|
66
|
+
export declare function bookingPushIdempotencyKey(bookingId: string, bookingItemId: string | null, channelId: string): string;
|
|
67
|
+
/**
|
|
68
|
+
* Resolve the channels that want a push for this booking. One row per
|
|
69
|
+
* (booking_item, channel) pair where the mapping has push_bookings =
|
|
70
|
+
* true and the channel is active. Booking-level pushes (no item id) are
|
|
71
|
+
* supported via a synthetic item id of null.
|
|
72
|
+
*
|
|
73
|
+
* Per §7.4 — booking push uses `channel_product_mappings` (not
|
|
74
|
+
* `channel_inventory_allotments`) so channels mapped to a product
|
|
75
|
+
* without a slot allotment still receive the push.
|
|
76
|
+
*/
|
|
77
|
+
export declare function resolveBookingPushTargets(db: AnyDrizzleDb, bookingId: string): Promise<Array<{
|
|
78
|
+
bookingItemId: string | null;
|
|
79
|
+
productId: string;
|
|
80
|
+
mapping: typeof channelProductMappings.$inferSelect;
|
|
81
|
+
channel: typeof channels.$inferSelect;
|
|
82
|
+
}>>;
|
|
83
|
+
/**
|
|
84
|
+
* Insert pending `channel_booking_links` rows for each push target.
|
|
85
|
+
* `INSERT ... ON CONFLICT DO NOTHING` against the
|
|
86
|
+
* `(channel_id, booking_id, COALESCE(booking_item_id, ''))` unique
|
|
87
|
+
* index — durable handoff with no doubled-push risk per §7.1.
|
|
88
|
+
*
|
|
89
|
+
* Returns the count of newly-inserted rows. Subscribers don't strictly
|
|
90
|
+
* need this — the processor reads pending rows by query — but tests
|
|
91
|
+
* find it useful.
|
|
92
|
+
*/
|
|
93
|
+
export declare function upsertPendingBookingLinks(db: AnyDrizzleDb, bookingId: string, targets: Array<{
|
|
94
|
+
bookingItemId: string | null;
|
|
95
|
+
mapping: typeof channelProductMappings.$inferSelect;
|
|
96
|
+
channel: typeof channels.$inferSelect;
|
|
97
|
+
}>): Promise<number>;
|
|
98
|
+
/**
|
|
99
|
+
* Drain pending `channel_booking_links` rows for one booking and call
|
|
100
|
+
* `adapter.pushBooking()` per link. Idempotent: re-running the
|
|
101
|
+
* processor against the same booking is safe — the `idempotency_key`
|
|
102
|
+
* column ensures retries don't double-push upstream.
|
|
103
|
+
*
|
|
104
|
+
* Each adapter call:
|
|
105
|
+
* 1. Acquires a token from the per-channel/connection bucket.
|
|
106
|
+
* 2. Calls the adapter through `prepareOutboundEnvelope` so every
|
|
107
|
+
* attempt lands in `webhook_deliveries` with redacted headers.
|
|
108
|
+
* 3. Updates the link to `ok` (with upstream_ref, hash) or `failed`
|
|
109
|
+
* (with last_error, attempts++).
|
|
110
|
+
*
|
|
111
|
+
* Per §4.2 + §12.1.
|
|
112
|
+
*/
|
|
113
|
+
export declare function processBookingPush(input: ProcessBookingPushInput, deps?: ChannelPushDeps): Promise<ProcessBookingPushResult>;
|
|
114
|
+
//# sourceMappingURL=booking-push.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"booking-push.d.ts","sourceRoot":"","sources":["../../src/channel-push/booking-push.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;GAgBG;AASH,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,cAAc,CAAA;AAEhD,OAAO,EAGL,sBAAsB,EACtB,QAAQ,EACT,MAAM,+BAA+B,CAAA;AAMtC,OAAO,EAAE,KAAK,eAAe,EAA4C,MAAM,YAAY,CAAA;AAE3F,8DAA8D;AAC9D,eAAO,MAAM,gCAAgC,EAAG,sBAA+B,CAAA;AAE/E,MAAM,WAAW,uBAAuB;IACtC,SAAS,EAAE,MAAM,CAAA;CAClB;AAED,MAAM,WAAW,wBAAwB;IACvC,SAAS,EAAE,MAAM,CAAA;IACjB,SAAS,EAAE,MAAM,CAAA;IACjB,SAAS,EAAE,MAAM,CAAA;IACjB,MAAM,EAAE,MAAM,CAAA;IACd;;;;;OAKG;IACH,WAAW,EAAE,MAAM,CAAA;IACnB,yCAAyC;IACzC,QAAQ,EAAE,KAAK,CAAC;QACd,SAAS,EAAE,MAAM,CAAA;QACjB,aAAa,EAAE,MAAM,GAAG,IAAI,CAAA;QAC5B,MAAM,EAAE,IAAI,GAAG,QAAQ,GAAG,SAAS,GAAG,aAAa,CAAA;QACnD,WAAW,CAAC,EAAE,MAAM,CAAA;QACpB,KAAK,CAAC,EAAE,MAAM,CAAA;KACf,CAAC,CAAA;CACH;AAED;;;;;;;;;;;;;GAaG;AACH,MAAM,MAAM,kBAAkB,GAAG,eAAe,GAAG,uBAAuB,CAAA;AAE1E;;;GAGG;AACH,wBAAgB,yBAAyB,CACvC,SAAS,EAAE,MAAM,EACjB,aAAa,EAAE,MAAM,GAAG,IAAI,EAC5B,SAAS,EAAE,MAAM,GAChB,MAAM,CAER;AAED;;;;;;;;;GASG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,YAAY,EAChB,SAAS,EAAE,MAAM,GAChB,OAAO,CACR,KAAK,CAAC;IACJ,aAAa,EAAE,MAAM,GAAG,IAAI,CAAA;IAC5B,SAAS,EAAE,MAAM,CAAA;IACjB,OAAO,EAAE,OAAO,sBAAsB,CAAC,YAAY,CAAA;IACnD,OAAO,EAAE,OAAO,QAAQ,CAAC,YAAY,CAAA;CACtC,CAAC,CACH,CAyDA;AAED;;;;;;;;;GASG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,YAAY,EAChB,SAAS,EAAE,MAAM,EACjB,OAAO,EAAE,KAAK,CAAC;IACb,aAAa,EAAE,MAAM,GAAG,IAAI,CAAA;IAC5B,OAAO,EAAE,OAAO,sBAAsB,CAAC,YAAY,CAAA;IACnD,OAAO,EAAE,OAAO,QAAQ,CAAC,YAAY,CAAA;CACtC,CAAC,GACD,OAAO,CAAC,MAAM,CAAC,CAuBjB;AAED;;;;;;;;;;;;;;GAcG;AACH,wBAAsB,kBAAkB,CACtC,KAAK,EAAE,uBAAuB,EAC9B,IAAI,CAAC,EAAE,eAAe,GACrB,OAAO,CAAC,wBAAwB,CAAC,CA8PnC"}
|