@paymentsdb/sync-engine 0.0.9 → 0.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-2N7737DI.js → chunk-CPILJLAL.js} +1 -1
- package/dist/{chunk-CVZHIBXG.js → chunk-CW2GRCQD.js} +1 -1
- package/dist/{chunk-5OQVNBPL.js → chunk-JYLI5JEV.js} +2 -2
- package/dist/{chunk-52SZMUB6.js → chunk-KN5PW4SC.js} +2 -2
- package/dist/cli/index.cjs +2 -2
- package/dist/cli/index.js +4 -4
- package/dist/cli/lib.cjs +2 -2
- package/dist/cli/lib.js +4 -4
- package/dist/index.cjs +1 -1
- package/dist/index.js +2 -2
- package/dist/supabase/index.cjs +2 -2
- package/dist/supabase/index.js +2 -2
- package/package.json +1 -1
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {
|
|
2
2
|
package_default
|
|
3
|
-
} from "./chunk-
|
|
3
|
+
} from "./chunk-CW2GRCQD.js";
|
|
4
4
|
|
|
5
5
|
// src/supabase/supabase.ts
|
|
6
6
|
import { SupabaseManagementAPI } from "supabase-management-js";
|
|
@@ -12,7 +12,7 @@ var stripe_setup_default = "import { StripeSync, runMigrations, VERSION } from '
|
|
|
12
12
|
var stripe_webhook_default = "import { StripeSync } from 'npm:@paymentsdb/sync-engine'\n\nDeno.serve(async (req) => {\n if (req.method !== 'POST') {\n return new Response('Method not allowed', { status: 405 })\n }\n\n const sig = req.headers.get('stripe-signature')\n if (!sig) {\n return new Response('Missing stripe-signature header', { status: 400 })\n }\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n const stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n\n try {\n const rawBody = new Uint8Array(await req.arrayBuffer())\n await stripeSync.processWebhook(rawBody, sig)\n return new Response(JSON.stringify({ received: true }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n } catch (error) {\n console.error('Webhook processing error:', error)\n const isSignatureError =\n error.message?.includes('signature') || error.type === 'StripeSignatureVerificationError'\n const status = isSignatureError ? 400 : 500\n return new Response(JSON.stringify({ error: error.message }), {\n status,\n headers: { 'Content-Type': 'application/json' },\n })\n } finally {\n await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
13
13
|
|
|
14
14
|
// raw-ts:/Users/prasoon/work/paymentsdb-sync-engine/packages/sync-engine/src/supabase/edge-functions/stripe-worker.ts
|
|
15
|
-
var stripe_worker_default = "/**\n * Stripe Sync Worker\n *\n * Triggered by pg_cron at a configurable interval (default: 60 seconds). Uses pgmq for durable work queue.\n *\n * Flow:\n * 1. Read batch of messages from pgmq (qty=10, vt=60s)\n * 2. If queue empty: enqueue all objects (continuous sync), then self-trigger\n * 3. Process messages in parallel (Promise.all):\n * - processNext(object)\n * - Delete message on success\n * - Re-enqueue if hasMore\n * 4. Self-trigger if work was re-enqueued (with progress) and run isn't too old\n * 5. Return results summary\n *\n * Concurrency:\n * - Multiple workers can run concurrently via overlapping pg_cron triggers.\n * - Each worker processes its batch of messages in parallel (Promise.all).\n * - pgmq visibility timeout prevents duplicate message reads across workers.\n * - processNext() is idempotent (uses internal cursor tracking), so duplicate\n * processing on timeout/crash is safe.\n *\n * Self-triggering:\n * - Accelerates initial sync by not waiting for next cron tick.\n * - Only triggers when work was re-enqueued AND progress was made.\n * - Stops after MAX_RUN_AGE_MS to prevent runaway loops.\n * - Cron remains as fallback for errors/timeouts.\n */\n\nimport { StripeSync } from 'npm:@paymentsdb/sync-engine'\nimport postgres from 'npm:postgres'\n\nconst QUEUE_NAME = 'stripe_sync_work'\nconst VISIBILITY_TIMEOUT = 60 // seconds\nconst BATCH_SIZE = 10\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000 // 6 hours - stop self-triggering after this\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7) // Remove 'Bearer '\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql\n let stripeSync\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return new Response(\n JSON.stringify({\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Validate that the token matches the unique worker secret stored in vault\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sync_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: (Deno.env.get('ENABLE_SIGMA') ?? 'false') === 'true',\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n } catch (error) {\n await sql.end()\n return new Response(\n JSON.stringify({\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Read batch of messages from queue\n const messages = await sql`\n SELECT * FROM pgmq.read(${QUEUE_NAME}::text, ${VISIBILITY_TIMEOUT}::int, ${BATCH_SIZE}::int)\n `\n\n // If queue empty, enqueue all objects for continuous sync\n if (messages.length === 0) {\n // Create sync run to make enqueued work visible (status='pending')\n const { runKey, objects } = await stripeSync.joinOrCreateSyncRun('worker')\n const msgs = objects.map((object) => JSON.stringify({ object }))\n\n await sql`\n SELECT pgmq.send_batch(\n ${QUEUE_NAME}::text,\n ${sql.array(msgs)}::jsonb[]\n )\n `\n\n // Self-trigger to start processing immediately (don't wait for cron)\n // But only if run isn't too old (prevents runaway on stale runs)\n let selfTriggered = false\n if (objects.length > 0) {\n const runAgeMs = Date.now() - runKey.runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after enqueue:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering after enqueue`\n )\n }\n }\n\n return new Response(JSON.stringify({ enqueued: objects.length, objects, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n }\n\n //
|
|
15
|
+
var stripe_worker_default = "/**\n * Stripe Sync Worker\n *\n * Triggered by pg_cron at a configurable interval (default: 60 seconds). Uses pgmq for durable work queue.\n *\n * Flow:\n * 1. Read batch of messages from pgmq (qty=10, vt=60s)\n * 2. If queue empty: enqueue all objects (continuous sync), then self-trigger\n * 3. Process messages in parallel (Promise.all):\n * - processNext(object)\n * - Delete message on success\n * - Re-enqueue if hasMore\n * 4. Self-trigger if work was re-enqueued (with progress) and run isn't too old\n * 5. Return results summary\n *\n * Concurrency:\n * - Multiple workers can run concurrently via overlapping pg_cron triggers.\n * - Each worker processes its batch of messages in parallel (Promise.all).\n * - pgmq visibility timeout prevents duplicate message reads across workers.\n * - processNext() is idempotent (uses internal cursor tracking), so duplicate\n * processing on timeout/crash is safe.\n *\n * Self-triggering:\n * - Accelerates initial sync by not waiting for next cron tick.\n * - Only triggers when work was re-enqueued AND progress was made.\n * - Stops after MAX_RUN_AGE_MS to prevent runaway loops.\n * - Cron remains as fallback for errors/timeouts.\n */\n\nimport { StripeSync } from 'npm:@paymentsdb/sync-engine'\nimport postgres from 'npm:postgres'\n\nconst QUEUE_NAME = 'stripe_sync_work'\nconst VISIBILITY_TIMEOUT = 60 // seconds\nconst BATCH_SIZE = 10\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000 // 6 hours - stop self-triggering after this\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7) // Remove 'Bearer '\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql\n let stripeSync\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return new Response(\n JSON.stringify({\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Validate that the token matches the unique worker secret stored in vault\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sync_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: (Deno.env.get('ENABLE_SIGMA') ?? 'false') === 'true',\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n } catch (error) {\n await sql.end()\n return new Response(\n JSON.stringify({\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Read batch of messages from queue\n const messages = await sql`\n SELECT * FROM pgmq.read(${QUEUE_NAME}::text, ${VISIBILITY_TIMEOUT}::int, ${BATCH_SIZE}::int)\n `\n\n // If queue empty, enqueue all objects for continuous sync\n if (messages.length === 0) {\n // Create sync run to make enqueued work visible (status='pending')\n const { runKey, objects } = await stripeSync.joinOrCreateSyncRun('worker')\n const msgs = objects.map((object) => JSON.stringify({ object }))\n\n await sql`\n SELECT pgmq.send_batch(\n ${QUEUE_NAME}::text,\n ${sql.array(msgs)}::jsonb[]\n )\n `\n\n // Self-trigger to start processing immediately (don't wait for cron)\n // But only if run isn't too old (prevents runaway on stale runs)\n let selfTriggered = false\n if (objects.length > 0) {\n const runAgeMs = Date.now() - runKey.runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after enqueue:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering after enqueue`\n )\n }\n }\n\n return new Response(JSON.stringify({ enqueued: objects.length, objects, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n }\n\n // Deduplicate messages by object type to prevent parallel processing of the same object.\n // Multiple messages for the same object can accumulate if previous workers re-enqueued\n // before completing, leading to race conditions where the same page is fetched multiple times.\n const messagesByObject = new Map<string, typeof messages>()\n for (const msg of messages) {\n const { object } = msg.message as { object: string }\n if (!messagesByObject.has(object)) {\n messagesByObject.set(object, [])\n }\n messagesByObject.get(object)!.push(msg)\n }\n\n // Process one message per unique object type in parallel\n // Pass triggeredBy: 'worker' so all processing uses the same run as joinOrCreateSyncRun('worker')\n const results = await Promise.all(\n Array.from(messagesByObject.entries()).map(async ([object, msgs]) => {\n try {\n const result = await stripeSync.processNext(object, { triggeredBy: 'worker' })\n\n // Delete ALL messages for this object (not just one) to prevent duplicate processing\n const msgIds = msgs.map((m) => m.msg_id)\n await sql`SELECT pgmq.delete(${QUEUE_NAME}::text, ${sql.array(msgIds)}::bigint[])`\n\n // Re-enqueue only ONCE if more pages (not once per message)\n if (result.hasMore) {\n await sql`SELECT pgmq.send(${QUEUE_NAME}::text, ${sql.json({ object })}::jsonb)`\n }\n\n return { object, ...result, reenqueued: result.hasMore, deduplicatedCount: msgs.length }\n } catch (error) {\n // Log error but continue to next object\n // Messages will become visible again after visibility timeout\n console.error(`Error processing ${object}:`, error)\n return {\n object,\n processed: 0,\n hasMore: false,\n reenqueued: false,\n runStartedAt: null,\n deduplicatedCount: msgs.length,\n error: error.message,\n stack: error.stack,\n }\n }\n })\n )\n\n // Self-trigger if work was re-enqueued with progress, and run isn't too old\n let selfTriggered = false\n const successfulResults = results.filter((r) => r.reenqueued && r.processed > 0 && !r.error)\n\n if (successfulResults.length > 0) {\n // Use runStartedAt from the first successful result (all use same run)\n const runStartedAt = successfulResults[0].runStartedAt\n if (runStartedAt) {\n const runAgeMs = Date.now() - runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after processing:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering`\n )\n }\n }\n }\n\n return new Response(JSON.stringify({ results, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n } catch (error) {\n console.error('Worker error:', error)\n return new Response(JSON.stringify({ error: error.message, stack: error.stack }), {\n status: 500,\n headers: { 'Content-Type': 'application/json' },\n })\n } finally {\n if (sql) await sql.end()\n if (stripeSync) await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
16
16
|
|
|
17
17
|
// raw-ts:/Users/prasoon/work/paymentsdb-sync-engine/packages/sync-engine/src/supabase/edge-functions/sigma-data-worker.ts
|
|
18
18
|
var sigma_data_worker_default = "/**\n * Stripe Sigma Data Worker.\n *\n * Hourly cron starts a run; self-trigger continues until all objects finish.\n * Progress persists in _sync_runs and _sync_obj_runs across invocations.\n */\n\nimport { StripeSync } from 'npm:stripe-experiment-sync'\nimport postgres from 'npm:postgres'\n\nconst BATCH_SIZE = 1\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000\nconst jsonResponse = (body: unknown, status = 200) =>\n new Response(JSON.stringify(body), {\n status,\n headers: { 'Content-Type': 'application/json' },\n })\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7)\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return jsonResponse({ error: 'SUPABASE_DB_URL not set' }, 500)\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql: ReturnType<typeof postgres> | undefined\n let stripeSync: StripeSync | undefined\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return jsonResponse(\n {\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n },\n 500\n )\n }\n\n try {\n // Validate the token against vault secret\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sigma_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Sigma worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid sigma worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: true,\n sigmaPageSizeOverride: 1000,\n })\n } catch (error) {\n await sql.end()\n return jsonResponse(\n {\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n },\n 500\n )\n }\n\n try {\n const accountId = await stripeSync.getAccountId()\n const sigmaObjects = stripeSync.getSupportedSigmaObjects()\n\n if (sigmaObjects.length === 0) {\n return jsonResponse({ message: 'No Sigma objects configured for sync' })\n }\n\n // Get or create sync run for sigma-worker (isolated from stripe-worker)\n const runResult = await stripeSync.postgresClient.getOrCreateSyncRun(accountId, 'sigma-worker')\n const runStartedAt =\n runResult?.runStartedAt ??\n (await stripeSync.postgresClient.getActiveSyncRun(accountId, 'sigma-worker'))?.runStartedAt\n\n if (!runStartedAt) {\n throw new Error('Failed to get or create sync run for sigma worker')\n }\n\n // Legacy cleanup: remove any prefixed sigma object runs that can block concurrency.\n // Previous versions stored objects as \"sigma.<table>\" which no longer matches processNext.\n await stripeSync.postgresClient.query(\n `UPDATE \"stripe\".\"_sync_obj_runs\"\n SET status = 'error',\n error_message = 'Legacy sigma worker prefix run (sigma.*); superseded by unprefixed runs',\n completed_at = now()\n WHERE \"_account_id\" = $1\n AND run_started_at = $2\n AND object LIKE 'sigma.%'\n AND status IN ('pending', 'running')`,\n [accountId, runStartedAt]\n )\n\n // Stop self-triggering after MAX_RUN_AGE_MS.\n const runAgeMs = Date.now() - runStartedAt.getTime()\n if (runAgeMs > MAX_RUN_AGE_MS) {\n console.warn(\n `Sigma worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), closing without self-trigger`\n )\n await stripeSync.postgresClient.closeSyncRun(accountId, runStartedAt)\n return jsonResponse({\n message: 'Sigma run exceeded max age, closed without processing',\n runAgeMinutes: Math.round(runAgeMs / 1000 / 60),\n selfTriggered: false,\n })\n }\n\n // Create object runs for all sigma objects (idempotent).\n await stripeSync.postgresClient.createObjectRuns(accountId, runStartedAt, sigmaObjects)\n await stripeSync.postgresClient.ensureSyncRunMaxConcurrent(accountId, runStartedAt, BATCH_SIZE)\n\n // Prefer running objects; otherwise claim pending ones.\n const runningObjects = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'running',\n sigmaObjects\n )\n\n const objectsToProcess = runningObjects.slice(0, BATCH_SIZE)\n let pendingObjects: string[] = []\n\n if (objectsToProcess.length === 0) {\n pendingObjects = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'pending',\n sigmaObjects\n )\n\n for (const objectKey of pendingObjects) {\n if (objectsToProcess.length >= BATCH_SIZE) break\n const started = await stripeSync.postgresClient.tryStartObjectSync(\n accountId,\n runStartedAt,\n objectKey\n )\n if (started) {\n objectsToProcess.push(objectKey)\n }\n }\n }\n\n if (objectsToProcess.length === 0) {\n if (pendingObjects.length === 0) {\n console.info('Sigma worker: all objects complete or errored - run finished')\n return jsonResponse({ message: 'Sigma sync run complete', selfTriggered: false })\n }\n\n console.info('Sigma worker: at concurrency limit, will self-trigger', {\n pendingCount: pendingObjects.length,\n })\n let selfTriggered = false\n try {\n await sql`SELECT stripe.trigger_sigma_worker()`\n selfTriggered = true\n } catch (error) {\n console.warn('Failed to self-trigger sigma worker:', error.message)\n }\n\n return jsonResponse({\n message: 'At concurrency limit',\n pendingCount: pendingObjects.length,\n selfTriggered,\n })\n }\n\n // Process objects sequentially (one lifecycle per invocation).\n const results: Array<Record<string, unknown>> = []\n\n for (const object of objectsToProcess) {\n const objectKey = object\n try {\n console.info(`Sigma worker: processing ${object}`)\n\n // Process one sigma page and upsert results.\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const result = await stripeSync.processNext(object as any, {\n runStartedAt,\n triggeredBy: 'sigma-worker',\n })\n\n results.push({\n object,\n processed: result.processed,\n hasMore: result.hasMore,\n status: 'success',\n })\n\n if (result.hasMore) {\n console.info(\n `Sigma worker: ${object} has more pages, processed ${result.processed} rows so far`\n )\n } else {\n console.info(`Sigma worker: ${object} complete, processed ${result.processed} rows`)\n }\n } catch (error) {\n console.error(`Sigma worker: error processing ${object}:`, error)\n\n // Mark object as failed and move on (no retries)\n await stripeSync.postgresClient.failObjectSync(\n accountId,\n runStartedAt,\n objectKey,\n error.message ?? 'Unknown error'\n )\n\n results.push({\n object,\n processed: 0,\n hasMore: false,\n status: 'error',\n error: error.message,\n })\n }\n }\n\n // Determine if self-trigger is needed\n const pendingAfter = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'pending',\n sigmaObjects\n )\n const runningAfter = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'running',\n sigmaObjects\n )\n\n // Calculate remaining run time for logging\n const remainingMs = MAX_RUN_AGE_MS - (Date.now() - runStartedAt.getTime())\n const remainingMinutes = Math.round(remainingMs / 1000 / 60)\n\n // Only self-trigger if there are pending or running objects AND run hasn't timed out\n const shouldSelfTrigger =\n (pendingAfter.length > 0 || runningAfter.length > 0) && remainingMs > 0\n\n let selfTriggered = false\n if (shouldSelfTrigger) {\n console.info('Sigma worker: more work remains, self-triggering', {\n pending: pendingAfter.length,\n running: runningAfter.length,\n remainingMinutes,\n })\n try {\n await sql`SELECT stripe.trigger_sigma_worker()`\n selfTriggered = true\n } catch (error) {\n console.warn('Failed to self-trigger sigma worker:', error.message)\n }\n } else if (pendingAfter.length > 0 || runningAfter.length > 0) {\n // Would self-trigger but run timed out\n console.warn('Sigma worker: work remains but run timed out, closing', {\n pending: pendingAfter.length,\n running: runningAfter.length,\n })\n await stripeSync.postgresClient.closeSyncRun(accountId, runStartedAt)\n } else {\n console.info('Sigma worker: no more work, run complete')\n }\n\n return jsonResponse({\n results,\n selfTriggered,\n remaining: { pending: pendingAfter.length, running: runningAfter.length },\n })\n } catch (error) {\n console.error('Sigma worker error:', error)\n return jsonResponse({ error: error.message, stack: error.stack }, 500)\n } finally {\n if (sql) await sql.end()\n if (stripeSync) await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
@@ -3,11 +3,11 @@ import {
|
|
|
3
3
|
StripeSync,
|
|
4
4
|
createStripeWebSocketClient,
|
|
5
5
|
runMigrations
|
|
6
|
-
} from "./chunk-
|
|
6
|
+
} from "./chunk-CPILJLAL.js";
|
|
7
7
|
import {
|
|
8
8
|
install,
|
|
9
9
|
uninstall
|
|
10
|
-
} from "./chunk-
|
|
10
|
+
} from "./chunk-JYLI5JEV.js";
|
|
11
11
|
|
|
12
12
|
// src/cli/config.ts
|
|
13
13
|
import dotenv from "dotenv";
|
package/dist/cli/index.cjs
CHANGED
|
@@ -33,7 +33,7 @@ var import_commander = require("commander");
|
|
|
33
33
|
// package.json
|
|
34
34
|
var package_default = {
|
|
35
35
|
name: "@paymentsdb/sync-engine",
|
|
36
|
-
version: "0.0.
|
|
36
|
+
version: "0.0.10",
|
|
37
37
|
private: false,
|
|
38
38
|
description: "Stripe Sync Engine to sync Stripe data to Postgres",
|
|
39
39
|
type: "module",
|
|
@@ -49127,7 +49127,7 @@ var stripe_setup_default = "import { StripeSync, runMigrations, VERSION } from '
|
|
|
49127
49127
|
var stripe_webhook_default = "import { StripeSync } from 'npm:@paymentsdb/sync-engine'\n\nDeno.serve(async (req) => {\n if (req.method !== 'POST') {\n return new Response('Method not allowed', { status: 405 })\n }\n\n const sig = req.headers.get('stripe-signature')\n if (!sig) {\n return new Response('Missing stripe-signature header', { status: 400 })\n }\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n const stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n\n try {\n const rawBody = new Uint8Array(await req.arrayBuffer())\n await stripeSync.processWebhook(rawBody, sig)\n return new Response(JSON.stringify({ received: true }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n } catch (error) {\n console.error('Webhook processing error:', error)\n const isSignatureError =\n error.message?.includes('signature') || error.type === 'StripeSignatureVerificationError'\n const status = isSignatureError ? 400 : 500\n return new Response(JSON.stringify({ error: error.message }), {\n status,\n headers: { 'Content-Type': 'application/json' },\n })\n } finally {\n await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
49128
49128
|
|
|
49129
49129
|
// raw-ts:/Users/prasoon/work/paymentsdb-sync-engine/packages/sync-engine/src/supabase/edge-functions/stripe-worker.ts
|
|
49130
|
-
var stripe_worker_default = "/**\n * Stripe Sync Worker\n *\n * Triggered by pg_cron at a configurable interval (default: 60 seconds). Uses pgmq for durable work queue.\n *\n * Flow:\n * 1. Read batch of messages from pgmq (qty=10, vt=60s)\n * 2. If queue empty: enqueue all objects (continuous sync), then self-trigger\n * 3. Process messages in parallel (Promise.all):\n * - processNext(object)\n * - Delete message on success\n * - Re-enqueue if hasMore\n * 4. Self-trigger if work was re-enqueued (with progress) and run isn't too old\n * 5. Return results summary\n *\n * Concurrency:\n * - Multiple workers can run concurrently via overlapping pg_cron triggers.\n * - Each worker processes its batch of messages in parallel (Promise.all).\n * - pgmq visibility timeout prevents duplicate message reads across workers.\n * - processNext() is idempotent (uses internal cursor tracking), so duplicate\n * processing on timeout/crash is safe.\n *\n * Self-triggering:\n * - Accelerates initial sync by not waiting for next cron tick.\n * - Only triggers when work was re-enqueued AND progress was made.\n * - Stops after MAX_RUN_AGE_MS to prevent runaway loops.\n * - Cron remains as fallback for errors/timeouts.\n */\n\nimport { StripeSync } from 'npm:@paymentsdb/sync-engine'\nimport postgres from 'npm:postgres'\n\nconst QUEUE_NAME = 'stripe_sync_work'\nconst VISIBILITY_TIMEOUT = 60 // seconds\nconst BATCH_SIZE = 10\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000 // 6 hours - stop self-triggering after this\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7) // Remove 'Bearer '\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql\n let stripeSync\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return new Response(\n JSON.stringify({\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Validate that the token matches the unique worker secret stored in vault\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sync_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: (Deno.env.get('ENABLE_SIGMA') ?? 'false') === 'true',\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n } catch (error) {\n await sql.end()\n return new Response(\n JSON.stringify({\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Read batch of messages from queue\n const messages = await sql`\n SELECT * FROM pgmq.read(${QUEUE_NAME}::text, ${VISIBILITY_TIMEOUT}::int, ${BATCH_SIZE}::int)\n `\n\n // If queue empty, enqueue all objects for continuous sync\n if (messages.length === 0) {\n // Create sync run to make enqueued work visible (status='pending')\n const { runKey, objects } = await stripeSync.joinOrCreateSyncRun('worker')\n const msgs = objects.map((object) => JSON.stringify({ object }))\n\n await sql`\n SELECT pgmq.send_batch(\n ${QUEUE_NAME}::text,\n ${sql.array(msgs)}::jsonb[]\n )\n `\n\n // Self-trigger to start processing immediately (don't wait for cron)\n // But only if run isn't too old (prevents runaway on stale runs)\n let selfTriggered = false\n if (objects.length > 0) {\n const runAgeMs = Date.now() - runKey.runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after enqueue:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering after enqueue`\n )\n }\n }\n\n return new Response(JSON.stringify({ enqueued: objects.length, objects, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n }\n\n //
|
|
49130
|
+
var stripe_worker_default = "/**\n * Stripe Sync Worker\n *\n * Triggered by pg_cron at a configurable interval (default: 60 seconds). Uses pgmq for durable work queue.\n *\n * Flow:\n * 1. Read batch of messages from pgmq (qty=10, vt=60s)\n * 2. If queue empty: enqueue all objects (continuous sync), then self-trigger\n * 3. Process messages in parallel (Promise.all):\n * - processNext(object)\n * - Delete message on success\n * - Re-enqueue if hasMore\n * 4. Self-trigger if work was re-enqueued (with progress) and run isn't too old\n * 5. Return results summary\n *\n * Concurrency:\n * - Multiple workers can run concurrently via overlapping pg_cron triggers.\n * - Each worker processes its batch of messages in parallel (Promise.all).\n * - pgmq visibility timeout prevents duplicate message reads across workers.\n * - processNext() is idempotent (uses internal cursor tracking), so duplicate\n * processing on timeout/crash is safe.\n *\n * Self-triggering:\n * - Accelerates initial sync by not waiting for next cron tick.\n * - Only triggers when work was re-enqueued AND progress was made.\n * - Stops after MAX_RUN_AGE_MS to prevent runaway loops.\n * - Cron remains as fallback for errors/timeouts.\n */\n\nimport { StripeSync } from 'npm:@paymentsdb/sync-engine'\nimport postgres from 'npm:postgres'\n\nconst QUEUE_NAME = 'stripe_sync_work'\nconst VISIBILITY_TIMEOUT = 60 // seconds\nconst BATCH_SIZE = 10\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000 // 6 hours - stop self-triggering after this\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7) // Remove 'Bearer '\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql\n let stripeSync\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return new Response(\n JSON.stringify({\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Validate that the token matches the unique worker secret stored in vault\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sync_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: (Deno.env.get('ENABLE_SIGMA') ?? 'false') === 'true',\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n } catch (error) {\n await sql.end()\n return new Response(\n JSON.stringify({\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Read batch of messages from queue\n const messages = await sql`\n SELECT * FROM pgmq.read(${QUEUE_NAME}::text, ${VISIBILITY_TIMEOUT}::int, ${BATCH_SIZE}::int)\n `\n\n // If queue empty, enqueue all objects for continuous sync\n if (messages.length === 0) {\n // Create sync run to make enqueued work visible (status='pending')\n const { runKey, objects } = await stripeSync.joinOrCreateSyncRun('worker')\n const msgs = objects.map((object) => JSON.stringify({ object }))\n\n await sql`\n SELECT pgmq.send_batch(\n ${QUEUE_NAME}::text,\n ${sql.array(msgs)}::jsonb[]\n )\n `\n\n // Self-trigger to start processing immediately (don't wait for cron)\n // But only if run isn't too old (prevents runaway on stale runs)\n let selfTriggered = false\n if (objects.length > 0) {\n const runAgeMs = Date.now() - runKey.runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after enqueue:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering after enqueue`\n )\n }\n }\n\n return new Response(JSON.stringify({ enqueued: objects.length, objects, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n }\n\n // Deduplicate messages by object type to prevent parallel processing of the same object.\n // Multiple messages for the same object can accumulate if previous workers re-enqueued\n // before completing, leading to race conditions where the same page is fetched multiple times.\n const messagesByObject = new Map<string, typeof messages>()\n for (const msg of messages) {\n const { object } = msg.message as { object: string }\n if (!messagesByObject.has(object)) {\n messagesByObject.set(object, [])\n }\n messagesByObject.get(object)!.push(msg)\n }\n\n // Process one message per unique object type in parallel\n // Pass triggeredBy: 'worker' so all processing uses the same run as joinOrCreateSyncRun('worker')\n const results = await Promise.all(\n Array.from(messagesByObject.entries()).map(async ([object, msgs]) => {\n try {\n const result = await stripeSync.processNext(object, { triggeredBy: 'worker' })\n\n // Delete ALL messages for this object (not just one) to prevent duplicate processing\n const msgIds = msgs.map((m) => m.msg_id)\n await sql`SELECT pgmq.delete(${QUEUE_NAME}::text, ${sql.array(msgIds)}::bigint[])`\n\n // Re-enqueue only ONCE if more pages (not once per message)\n if (result.hasMore) {\n await sql`SELECT pgmq.send(${QUEUE_NAME}::text, ${sql.json({ object })}::jsonb)`\n }\n\n return { object, ...result, reenqueued: result.hasMore, deduplicatedCount: msgs.length }\n } catch (error) {\n // Log error but continue to next object\n // Messages will become visible again after visibility timeout\n console.error(`Error processing ${object}:`, error)\n return {\n object,\n processed: 0,\n hasMore: false,\n reenqueued: false,\n runStartedAt: null,\n deduplicatedCount: msgs.length,\n error: error.message,\n stack: error.stack,\n }\n }\n })\n )\n\n // Self-trigger if work was re-enqueued with progress, and run isn't too old\n let selfTriggered = false\n const successfulResults = results.filter((r) => r.reenqueued && r.processed > 0 && !r.error)\n\n if (successfulResults.length > 0) {\n // Use runStartedAt from the first successful result (all use same run)\n const runStartedAt = successfulResults[0].runStartedAt\n if (runStartedAt) {\n const runAgeMs = Date.now() - runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after processing:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering`\n )\n }\n }\n }\n\n return new Response(JSON.stringify({ results, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n } catch (error) {\n console.error('Worker error:', error)\n return new Response(JSON.stringify({ error: error.message, stack: error.stack }), {\n status: 500,\n headers: { 'Content-Type': 'application/json' },\n })\n } finally {\n if (sql) await sql.end()\n if (stripeSync) await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
49131
49131
|
|
|
49132
49132
|
// raw-ts:/Users/prasoon/work/paymentsdb-sync-engine/packages/sync-engine/src/supabase/edge-functions/sigma-data-worker.ts
|
|
49133
49133
|
var sigma_data_worker_default = "/**\n * Stripe Sigma Data Worker.\n *\n * Hourly cron starts a run; self-trigger continues until all objects finish.\n * Progress persists in _sync_runs and _sync_obj_runs across invocations.\n */\n\nimport { StripeSync } from 'npm:stripe-experiment-sync'\nimport postgres from 'npm:postgres'\n\nconst BATCH_SIZE = 1\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000\nconst jsonResponse = (body: unknown, status = 200) =>\n new Response(JSON.stringify(body), {\n status,\n headers: { 'Content-Type': 'application/json' },\n })\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7)\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return jsonResponse({ error: 'SUPABASE_DB_URL not set' }, 500)\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql: ReturnType<typeof postgres> | undefined\n let stripeSync: StripeSync | undefined\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return jsonResponse(\n {\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n },\n 500\n )\n }\n\n try {\n // Validate the token against vault secret\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sigma_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Sigma worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid sigma worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: true,\n sigmaPageSizeOverride: 1000,\n })\n } catch (error) {\n await sql.end()\n return jsonResponse(\n {\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n },\n 500\n )\n }\n\n try {\n const accountId = await stripeSync.getAccountId()\n const sigmaObjects = stripeSync.getSupportedSigmaObjects()\n\n if (sigmaObjects.length === 0) {\n return jsonResponse({ message: 'No Sigma objects configured for sync' })\n }\n\n // Get or create sync run for sigma-worker (isolated from stripe-worker)\n const runResult = await stripeSync.postgresClient.getOrCreateSyncRun(accountId, 'sigma-worker')\n const runStartedAt =\n runResult?.runStartedAt ??\n (await stripeSync.postgresClient.getActiveSyncRun(accountId, 'sigma-worker'))?.runStartedAt\n\n if (!runStartedAt) {\n throw new Error('Failed to get or create sync run for sigma worker')\n }\n\n // Legacy cleanup: remove any prefixed sigma object runs that can block concurrency.\n // Previous versions stored objects as \"sigma.<table>\" which no longer matches processNext.\n await stripeSync.postgresClient.query(\n `UPDATE \"stripe\".\"_sync_obj_runs\"\n SET status = 'error',\n error_message = 'Legacy sigma worker prefix run (sigma.*); superseded by unprefixed runs',\n completed_at = now()\n WHERE \"_account_id\" = $1\n AND run_started_at = $2\n AND object LIKE 'sigma.%'\n AND status IN ('pending', 'running')`,\n [accountId, runStartedAt]\n )\n\n // Stop self-triggering after MAX_RUN_AGE_MS.\n const runAgeMs = Date.now() - runStartedAt.getTime()\n if (runAgeMs > MAX_RUN_AGE_MS) {\n console.warn(\n `Sigma worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), closing without self-trigger`\n )\n await stripeSync.postgresClient.closeSyncRun(accountId, runStartedAt)\n return jsonResponse({\n message: 'Sigma run exceeded max age, closed without processing',\n runAgeMinutes: Math.round(runAgeMs / 1000 / 60),\n selfTriggered: false,\n })\n }\n\n // Create object runs for all sigma objects (idempotent).\n await stripeSync.postgresClient.createObjectRuns(accountId, runStartedAt, sigmaObjects)\n await stripeSync.postgresClient.ensureSyncRunMaxConcurrent(accountId, runStartedAt, BATCH_SIZE)\n\n // Prefer running objects; otherwise claim pending ones.\n const runningObjects = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'running',\n sigmaObjects\n )\n\n const objectsToProcess = runningObjects.slice(0, BATCH_SIZE)\n let pendingObjects: string[] = []\n\n if (objectsToProcess.length === 0) {\n pendingObjects = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'pending',\n sigmaObjects\n )\n\n for (const objectKey of pendingObjects) {\n if (objectsToProcess.length >= BATCH_SIZE) break\n const started = await stripeSync.postgresClient.tryStartObjectSync(\n accountId,\n runStartedAt,\n objectKey\n )\n if (started) {\n objectsToProcess.push(objectKey)\n }\n }\n }\n\n if (objectsToProcess.length === 0) {\n if (pendingObjects.length === 0) {\n console.info('Sigma worker: all objects complete or errored - run finished')\n return jsonResponse({ message: 'Sigma sync run complete', selfTriggered: false })\n }\n\n console.info('Sigma worker: at concurrency limit, will self-trigger', {\n pendingCount: pendingObjects.length,\n })\n let selfTriggered = false\n try {\n await sql`SELECT stripe.trigger_sigma_worker()`\n selfTriggered = true\n } catch (error) {\n console.warn('Failed to self-trigger sigma worker:', error.message)\n }\n\n return jsonResponse({\n message: 'At concurrency limit',\n pendingCount: pendingObjects.length,\n selfTriggered,\n })\n }\n\n // Process objects sequentially (one lifecycle per invocation).\n const results: Array<Record<string, unknown>> = []\n\n for (const object of objectsToProcess) {\n const objectKey = object\n try {\n console.info(`Sigma worker: processing ${object}`)\n\n // Process one sigma page and upsert results.\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const result = await stripeSync.processNext(object as any, {\n runStartedAt,\n triggeredBy: 'sigma-worker',\n })\n\n results.push({\n object,\n processed: result.processed,\n hasMore: result.hasMore,\n status: 'success',\n })\n\n if (result.hasMore) {\n console.info(\n `Sigma worker: ${object} has more pages, processed ${result.processed} rows so far`\n )\n } else {\n console.info(`Sigma worker: ${object} complete, processed ${result.processed} rows`)\n }\n } catch (error) {\n console.error(`Sigma worker: error processing ${object}:`, error)\n\n // Mark object as failed and move on (no retries)\n await stripeSync.postgresClient.failObjectSync(\n accountId,\n runStartedAt,\n objectKey,\n error.message ?? 'Unknown error'\n )\n\n results.push({\n object,\n processed: 0,\n hasMore: false,\n status: 'error',\n error: error.message,\n })\n }\n }\n\n // Determine if self-trigger is needed\n const pendingAfter = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'pending',\n sigmaObjects\n )\n const runningAfter = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'running',\n sigmaObjects\n )\n\n // Calculate remaining run time for logging\n const remainingMs = MAX_RUN_AGE_MS - (Date.now() - runStartedAt.getTime())\n const remainingMinutes = Math.round(remainingMs / 1000 / 60)\n\n // Only self-trigger if there are pending or running objects AND run hasn't timed out\n const shouldSelfTrigger =\n (pendingAfter.length > 0 || runningAfter.length > 0) && remainingMs > 0\n\n let selfTriggered = false\n if (shouldSelfTrigger) {\n console.info('Sigma worker: more work remains, self-triggering', {\n pending: pendingAfter.length,\n running: runningAfter.length,\n remainingMinutes,\n })\n try {\n await sql`SELECT stripe.trigger_sigma_worker()`\n selfTriggered = true\n } catch (error) {\n console.warn('Failed to self-trigger sigma worker:', error.message)\n }\n } else if (pendingAfter.length > 0 || runningAfter.length > 0) {\n // Would self-trigger but run timed out\n console.warn('Sigma worker: work remains but run timed out, closing', {\n pending: pendingAfter.length,\n running: runningAfter.length,\n })\n await stripeSync.postgresClient.closeSyncRun(accountId, runStartedAt)\n } else {\n console.info('Sigma worker: no more work, run complete')\n }\n\n return jsonResponse({\n results,\n selfTriggered,\n remaining: { pending: pendingAfter.length, running: runningAfter.length },\n })\n } catch (error) {\n console.error('Sigma worker error:', error)\n return jsonResponse({ error: error.message, stack: error.stack }, 500)\n } finally {\n if (sql) await sql.end()\n if (stripeSync) await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
package/dist/cli/index.js
CHANGED
|
@@ -5,12 +5,12 @@ import {
|
|
|
5
5
|
migrateCommand,
|
|
6
6
|
syncCommand,
|
|
7
7
|
uninstallCommand
|
|
8
|
-
} from "../chunk-
|
|
9
|
-
import "../chunk-
|
|
10
|
-
import "../chunk-
|
|
8
|
+
} from "../chunk-KN5PW4SC.js";
|
|
9
|
+
import "../chunk-CPILJLAL.js";
|
|
10
|
+
import "../chunk-JYLI5JEV.js";
|
|
11
11
|
import {
|
|
12
12
|
package_default
|
|
13
|
-
} from "../chunk-
|
|
13
|
+
} from "../chunk-CW2GRCQD.js";
|
|
14
14
|
|
|
15
15
|
// src/cli/index.ts
|
|
16
16
|
import { Command } from "commander";
|
package/dist/cli/lib.cjs
CHANGED
|
@@ -117,7 +117,7 @@ async function loadConfig(options) {
|
|
|
117
117
|
// package.json
|
|
118
118
|
var package_default = {
|
|
119
119
|
name: "@paymentsdb/sync-engine",
|
|
120
|
-
version: "0.0.
|
|
120
|
+
version: "0.0.10",
|
|
121
121
|
private: false,
|
|
122
122
|
description: "Stripe Sync Engine to sync Stripe data to Postgres",
|
|
123
123
|
type: "module",
|
|
@@ -49141,7 +49141,7 @@ var stripe_setup_default = "import { StripeSync, runMigrations, VERSION } from '
|
|
|
49141
49141
|
var stripe_webhook_default = "import { StripeSync } from 'npm:@paymentsdb/sync-engine'\n\nDeno.serve(async (req) => {\n if (req.method !== 'POST') {\n return new Response('Method not allowed', { status: 405 })\n }\n\n const sig = req.headers.get('stripe-signature')\n if (!sig) {\n return new Response('Missing stripe-signature header', { status: 400 })\n }\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n const stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n\n try {\n const rawBody = new Uint8Array(await req.arrayBuffer())\n await stripeSync.processWebhook(rawBody, sig)\n return new Response(JSON.stringify({ received: true }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n } catch (error) {\n console.error('Webhook processing error:', error)\n const isSignatureError =\n error.message?.includes('signature') || error.type === 'StripeSignatureVerificationError'\n const status = isSignatureError ? 400 : 500\n return new Response(JSON.stringify({ error: error.message }), {\n status,\n headers: { 'Content-Type': 'application/json' },\n })\n } finally {\n await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
49142
49142
|
|
|
49143
49143
|
// raw-ts:/Users/prasoon/work/paymentsdb-sync-engine/packages/sync-engine/src/supabase/edge-functions/stripe-worker.ts
|
|
49144
|
-
var stripe_worker_default = "/**\n * Stripe Sync Worker\n *\n * Triggered by pg_cron at a configurable interval (default: 60 seconds). Uses pgmq for durable work queue.\n *\n * Flow:\n * 1. Read batch of messages from pgmq (qty=10, vt=60s)\n * 2. If queue empty: enqueue all objects (continuous sync), then self-trigger\n * 3. Process messages in parallel (Promise.all):\n * - processNext(object)\n * - Delete message on success\n * - Re-enqueue if hasMore\n * 4. Self-trigger if work was re-enqueued (with progress) and run isn't too old\n * 5. Return results summary\n *\n * Concurrency:\n * - Multiple workers can run concurrently via overlapping pg_cron triggers.\n * - Each worker processes its batch of messages in parallel (Promise.all).\n * - pgmq visibility timeout prevents duplicate message reads across workers.\n * - processNext() is idempotent (uses internal cursor tracking), so duplicate\n * processing on timeout/crash is safe.\n *\n * Self-triggering:\n * - Accelerates initial sync by not waiting for next cron tick.\n * - Only triggers when work was re-enqueued AND progress was made.\n * - Stops after MAX_RUN_AGE_MS to prevent runaway loops.\n * - Cron remains as fallback for errors/timeouts.\n */\n\nimport { StripeSync } from 'npm:@paymentsdb/sync-engine'\nimport postgres from 'npm:postgres'\n\nconst QUEUE_NAME = 'stripe_sync_work'\nconst VISIBILITY_TIMEOUT = 60 // seconds\nconst BATCH_SIZE = 10\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000 // 6 hours - stop self-triggering after this\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7) // Remove 'Bearer '\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql\n let stripeSync\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return new Response(\n JSON.stringify({\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Validate that the token matches the unique worker secret stored in vault\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sync_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: (Deno.env.get('ENABLE_SIGMA') ?? 'false') === 'true',\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n } catch (error) {\n await sql.end()\n return new Response(\n JSON.stringify({\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Read batch of messages from queue\n const messages = await sql`\n SELECT * FROM pgmq.read(${QUEUE_NAME}::text, ${VISIBILITY_TIMEOUT}::int, ${BATCH_SIZE}::int)\n `\n\n // If queue empty, enqueue all objects for continuous sync\n if (messages.length === 0) {\n // Create sync run to make enqueued work visible (status='pending')\n const { runKey, objects } = await stripeSync.joinOrCreateSyncRun('worker')\n const msgs = objects.map((object) => JSON.stringify({ object }))\n\n await sql`\n SELECT pgmq.send_batch(\n ${QUEUE_NAME}::text,\n ${sql.array(msgs)}::jsonb[]\n )\n `\n\n // Self-trigger to start processing immediately (don't wait for cron)\n // But only if run isn't too old (prevents runaway on stale runs)\n let selfTriggered = false\n if (objects.length > 0) {\n const runAgeMs = Date.now() - runKey.runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after enqueue:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering after enqueue`\n )\n }\n }\n\n return new Response(JSON.stringify({ enqueued: objects.length, objects, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n }\n\n //
|
|
49144
|
+
var stripe_worker_default = "/**\n * Stripe Sync Worker\n *\n * Triggered by pg_cron at a configurable interval (default: 60 seconds). Uses pgmq for durable work queue.\n *\n * Flow:\n * 1. Read batch of messages from pgmq (qty=10, vt=60s)\n * 2. If queue empty: enqueue all objects (continuous sync), then self-trigger\n * 3. Process messages in parallel (Promise.all):\n * - processNext(object)\n * - Delete message on success\n * - Re-enqueue if hasMore\n * 4. Self-trigger if work was re-enqueued (with progress) and run isn't too old\n * 5. Return results summary\n *\n * Concurrency:\n * - Multiple workers can run concurrently via overlapping pg_cron triggers.\n * - Each worker processes its batch of messages in parallel (Promise.all).\n * - pgmq visibility timeout prevents duplicate message reads across workers.\n * - processNext() is idempotent (uses internal cursor tracking), so duplicate\n * processing on timeout/crash is safe.\n *\n * Self-triggering:\n * - Accelerates initial sync by not waiting for next cron tick.\n * - Only triggers when work was re-enqueued AND progress was made.\n * - Stops after MAX_RUN_AGE_MS to prevent runaway loops.\n * - Cron remains as fallback for errors/timeouts.\n */\n\nimport { StripeSync } from 'npm:@paymentsdb/sync-engine'\nimport postgres from 'npm:postgres'\n\nconst QUEUE_NAME = 'stripe_sync_work'\nconst VISIBILITY_TIMEOUT = 60 // seconds\nconst BATCH_SIZE = 10\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000 // 6 hours - stop self-triggering after this\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7) // Remove 'Bearer '\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql\n let stripeSync\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return new Response(\n JSON.stringify({\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Validate that the token matches the unique worker secret stored in vault\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sync_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: (Deno.env.get('ENABLE_SIGMA') ?? 'false') === 'true',\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n } catch (error) {\n await sql.end()\n return new Response(\n JSON.stringify({\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Read batch of messages from queue\n const messages = await sql`\n SELECT * FROM pgmq.read(${QUEUE_NAME}::text, ${VISIBILITY_TIMEOUT}::int, ${BATCH_SIZE}::int)\n `\n\n // If queue empty, enqueue all objects for continuous sync\n if (messages.length === 0) {\n // Create sync run to make enqueued work visible (status='pending')\n const { runKey, objects } = await stripeSync.joinOrCreateSyncRun('worker')\n const msgs = objects.map((object) => JSON.stringify({ object }))\n\n await sql`\n SELECT pgmq.send_batch(\n ${QUEUE_NAME}::text,\n ${sql.array(msgs)}::jsonb[]\n )\n `\n\n // Self-trigger to start processing immediately (don't wait for cron)\n // But only if run isn't too old (prevents runaway on stale runs)\n let selfTriggered = false\n if (objects.length > 0) {\n const runAgeMs = Date.now() - runKey.runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after enqueue:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering after enqueue`\n )\n }\n }\n\n return new Response(JSON.stringify({ enqueued: objects.length, objects, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n }\n\n // Deduplicate messages by object type to prevent parallel processing of the same object.\n // Multiple messages for the same object can accumulate if previous workers re-enqueued\n // before completing, leading to race conditions where the same page is fetched multiple times.\n const messagesByObject = new Map<string, typeof messages>()\n for (const msg of messages) {\n const { object } = msg.message as { object: string }\n if (!messagesByObject.has(object)) {\n messagesByObject.set(object, [])\n }\n messagesByObject.get(object)!.push(msg)\n }\n\n // Process one message per unique object type in parallel\n // Pass triggeredBy: 'worker' so all processing uses the same run as joinOrCreateSyncRun('worker')\n const results = await Promise.all(\n Array.from(messagesByObject.entries()).map(async ([object, msgs]) => {\n try {\n const result = await stripeSync.processNext(object, { triggeredBy: 'worker' })\n\n // Delete ALL messages for this object (not just one) to prevent duplicate processing\n const msgIds = msgs.map((m) => m.msg_id)\n await sql`SELECT pgmq.delete(${QUEUE_NAME}::text, ${sql.array(msgIds)}::bigint[])`\n\n // Re-enqueue only ONCE if more pages (not once per message)\n if (result.hasMore) {\n await sql`SELECT pgmq.send(${QUEUE_NAME}::text, ${sql.json({ object })}::jsonb)`\n }\n\n return { object, ...result, reenqueued: result.hasMore, deduplicatedCount: msgs.length }\n } catch (error) {\n // Log error but continue to next object\n // Messages will become visible again after visibility timeout\n console.error(`Error processing ${object}:`, error)\n return {\n object,\n processed: 0,\n hasMore: false,\n reenqueued: false,\n runStartedAt: null,\n deduplicatedCount: msgs.length,\n error: error.message,\n stack: error.stack,\n }\n }\n })\n )\n\n // Self-trigger if work was re-enqueued with progress, and run isn't too old\n let selfTriggered = false\n const successfulResults = results.filter((r) => r.reenqueued && r.processed > 0 && !r.error)\n\n if (successfulResults.length > 0) {\n // Use runStartedAt from the first successful result (all use same run)\n const runStartedAt = successfulResults[0].runStartedAt\n if (runStartedAt) {\n const runAgeMs = Date.now() - runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after processing:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering`\n )\n }\n }\n }\n\n return new Response(JSON.stringify({ results, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n } catch (error) {\n console.error('Worker error:', error)\n return new Response(JSON.stringify({ error: error.message, stack: error.stack }), {\n status: 500,\n headers: { 'Content-Type': 'application/json' },\n })\n } finally {\n if (sql) await sql.end()\n if (stripeSync) await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
49145
49145
|
|
|
49146
49146
|
// raw-ts:/Users/prasoon/work/paymentsdb-sync-engine/packages/sync-engine/src/supabase/edge-functions/sigma-data-worker.ts
|
|
49147
49147
|
var sigma_data_worker_default = "/**\n * Stripe Sigma Data Worker.\n *\n * Hourly cron starts a run; self-trigger continues until all objects finish.\n * Progress persists in _sync_runs and _sync_obj_runs across invocations.\n */\n\nimport { StripeSync } from 'npm:stripe-experiment-sync'\nimport postgres from 'npm:postgres'\n\nconst BATCH_SIZE = 1\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000\nconst jsonResponse = (body: unknown, status = 200) =>\n new Response(JSON.stringify(body), {\n status,\n headers: { 'Content-Type': 'application/json' },\n })\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7)\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return jsonResponse({ error: 'SUPABASE_DB_URL not set' }, 500)\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql: ReturnType<typeof postgres> | undefined\n let stripeSync: StripeSync | undefined\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return jsonResponse(\n {\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n },\n 500\n )\n }\n\n try {\n // Validate the token against vault secret\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sigma_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Sigma worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid sigma worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: true,\n sigmaPageSizeOverride: 1000,\n })\n } catch (error) {\n await sql.end()\n return jsonResponse(\n {\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n },\n 500\n )\n }\n\n try {\n const accountId = await stripeSync.getAccountId()\n const sigmaObjects = stripeSync.getSupportedSigmaObjects()\n\n if (sigmaObjects.length === 0) {\n return jsonResponse({ message: 'No Sigma objects configured for sync' })\n }\n\n // Get or create sync run for sigma-worker (isolated from stripe-worker)\n const runResult = await stripeSync.postgresClient.getOrCreateSyncRun(accountId, 'sigma-worker')\n const runStartedAt =\n runResult?.runStartedAt ??\n (await stripeSync.postgresClient.getActiveSyncRun(accountId, 'sigma-worker'))?.runStartedAt\n\n if (!runStartedAt) {\n throw new Error('Failed to get or create sync run for sigma worker')\n }\n\n // Legacy cleanup: remove any prefixed sigma object runs that can block concurrency.\n // Previous versions stored objects as \"sigma.<table>\" which no longer matches processNext.\n await stripeSync.postgresClient.query(\n `UPDATE \"stripe\".\"_sync_obj_runs\"\n SET status = 'error',\n error_message = 'Legacy sigma worker prefix run (sigma.*); superseded by unprefixed runs',\n completed_at = now()\n WHERE \"_account_id\" = $1\n AND run_started_at = $2\n AND object LIKE 'sigma.%'\n AND status IN ('pending', 'running')`,\n [accountId, runStartedAt]\n )\n\n // Stop self-triggering after MAX_RUN_AGE_MS.\n const runAgeMs = Date.now() - runStartedAt.getTime()\n if (runAgeMs > MAX_RUN_AGE_MS) {\n console.warn(\n `Sigma worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), closing without self-trigger`\n )\n await stripeSync.postgresClient.closeSyncRun(accountId, runStartedAt)\n return jsonResponse({\n message: 'Sigma run exceeded max age, closed without processing',\n runAgeMinutes: Math.round(runAgeMs / 1000 / 60),\n selfTriggered: false,\n })\n }\n\n // Create object runs for all sigma objects (idempotent).\n await stripeSync.postgresClient.createObjectRuns(accountId, runStartedAt, sigmaObjects)\n await stripeSync.postgresClient.ensureSyncRunMaxConcurrent(accountId, runStartedAt, BATCH_SIZE)\n\n // Prefer running objects; otherwise claim pending ones.\n const runningObjects = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'running',\n sigmaObjects\n )\n\n const objectsToProcess = runningObjects.slice(0, BATCH_SIZE)\n let pendingObjects: string[] = []\n\n if (objectsToProcess.length === 0) {\n pendingObjects = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'pending',\n sigmaObjects\n )\n\n for (const objectKey of pendingObjects) {\n if (objectsToProcess.length >= BATCH_SIZE) break\n const started = await stripeSync.postgresClient.tryStartObjectSync(\n accountId,\n runStartedAt,\n objectKey\n )\n if (started) {\n objectsToProcess.push(objectKey)\n }\n }\n }\n\n if (objectsToProcess.length === 0) {\n if (pendingObjects.length === 0) {\n console.info('Sigma worker: all objects complete or errored - run finished')\n return jsonResponse({ message: 'Sigma sync run complete', selfTriggered: false })\n }\n\n console.info('Sigma worker: at concurrency limit, will self-trigger', {\n pendingCount: pendingObjects.length,\n })\n let selfTriggered = false\n try {\n await sql`SELECT stripe.trigger_sigma_worker()`\n selfTriggered = true\n } catch (error) {\n console.warn('Failed to self-trigger sigma worker:', error.message)\n }\n\n return jsonResponse({\n message: 'At concurrency limit',\n pendingCount: pendingObjects.length,\n selfTriggered,\n })\n }\n\n // Process objects sequentially (one lifecycle per invocation).\n const results: Array<Record<string, unknown>> = []\n\n for (const object of objectsToProcess) {\n const objectKey = object\n try {\n console.info(`Sigma worker: processing ${object}`)\n\n // Process one sigma page and upsert results.\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const result = await stripeSync.processNext(object as any, {\n runStartedAt,\n triggeredBy: 'sigma-worker',\n })\n\n results.push({\n object,\n processed: result.processed,\n hasMore: result.hasMore,\n status: 'success',\n })\n\n if (result.hasMore) {\n console.info(\n `Sigma worker: ${object} has more pages, processed ${result.processed} rows so far`\n )\n } else {\n console.info(`Sigma worker: ${object} complete, processed ${result.processed} rows`)\n }\n } catch (error) {\n console.error(`Sigma worker: error processing ${object}:`, error)\n\n // Mark object as failed and move on (no retries)\n await stripeSync.postgresClient.failObjectSync(\n accountId,\n runStartedAt,\n objectKey,\n error.message ?? 'Unknown error'\n )\n\n results.push({\n object,\n processed: 0,\n hasMore: false,\n status: 'error',\n error: error.message,\n })\n }\n }\n\n // Determine if self-trigger is needed\n const pendingAfter = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'pending',\n sigmaObjects\n )\n const runningAfter = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'running',\n sigmaObjects\n )\n\n // Calculate remaining run time for logging\n const remainingMs = MAX_RUN_AGE_MS - (Date.now() - runStartedAt.getTime())\n const remainingMinutes = Math.round(remainingMs / 1000 / 60)\n\n // Only self-trigger if there are pending or running objects AND run hasn't timed out\n const shouldSelfTrigger =\n (pendingAfter.length > 0 || runningAfter.length > 0) && remainingMs > 0\n\n let selfTriggered = false\n if (shouldSelfTrigger) {\n console.info('Sigma worker: more work remains, self-triggering', {\n pending: pendingAfter.length,\n running: runningAfter.length,\n remainingMinutes,\n })\n try {\n await sql`SELECT stripe.trigger_sigma_worker()`\n selfTriggered = true\n } catch (error) {\n console.warn('Failed to self-trigger sigma worker:', error.message)\n }\n } else if (pendingAfter.length > 0 || runningAfter.length > 0) {\n // Would self-trigger but run timed out\n console.warn('Sigma worker: work remains but run timed out, closing', {\n pending: pendingAfter.length,\n running: runningAfter.length,\n })\n await stripeSync.postgresClient.closeSyncRun(accountId, runStartedAt)\n } else {\n console.info('Sigma worker: no more work, run complete')\n }\n\n return jsonResponse({\n results,\n selfTriggered,\n remaining: { pending: pendingAfter.length, running: runningAfter.length },\n })\n } catch (error) {\n console.error('Sigma worker error:', error)\n return jsonResponse({ error: error.message, stack: error.stack }, 500)\n } finally {\n if (sql) await sql.end()\n if (stripeSync) await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
package/dist/cli/lib.js
CHANGED
|
@@ -6,10 +6,10 @@ import {
|
|
|
6
6
|
migrateCommand,
|
|
7
7
|
syncCommand,
|
|
8
8
|
uninstallCommand
|
|
9
|
-
} from "../chunk-
|
|
10
|
-
import "../chunk-
|
|
11
|
-
import "../chunk-
|
|
12
|
-
import "../chunk-
|
|
9
|
+
} from "../chunk-KN5PW4SC.js";
|
|
10
|
+
import "../chunk-CPILJLAL.js";
|
|
11
|
+
import "../chunk-JYLI5JEV.js";
|
|
12
|
+
import "../chunk-CW2GRCQD.js";
|
|
13
13
|
export {
|
|
14
14
|
backfillCommand,
|
|
15
15
|
createTunnel,
|
package/dist/index.cjs
CHANGED
|
@@ -46,7 +46,7 @@ var importMetaUrl = /* @__PURE__ */ getImportMetaUrl();
|
|
|
46
46
|
// package.json
|
|
47
47
|
var package_default = {
|
|
48
48
|
name: "@paymentsdb/sync-engine",
|
|
49
|
-
version: "0.0.
|
|
49
|
+
version: "0.0.10",
|
|
50
50
|
private: false,
|
|
51
51
|
description: "Stripe Sync Engine to sync Stripe data to Postgres",
|
|
52
52
|
type: "module",
|
package/dist/index.js
CHANGED
package/dist/supabase/index.cjs
CHANGED
|
@@ -45,7 +45,7 @@ var stripe_setup_default = "import { StripeSync, runMigrations, VERSION } from '
|
|
|
45
45
|
var stripe_webhook_default = "import { StripeSync } from 'npm:@paymentsdb/sync-engine'\n\nDeno.serve(async (req) => {\n if (req.method !== 'POST') {\n return new Response('Method not allowed', { status: 405 })\n }\n\n const sig = req.headers.get('stripe-signature')\n if (!sig) {\n return new Response('Missing stripe-signature header', { status: 400 })\n }\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n const stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n\n try {\n const rawBody = new Uint8Array(await req.arrayBuffer())\n await stripeSync.processWebhook(rawBody, sig)\n return new Response(JSON.stringify({ received: true }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n } catch (error) {\n console.error('Webhook processing error:', error)\n const isSignatureError =\n error.message?.includes('signature') || error.type === 'StripeSignatureVerificationError'\n const status = isSignatureError ? 400 : 500\n return new Response(JSON.stringify({ error: error.message }), {\n status,\n headers: { 'Content-Type': 'application/json' },\n })\n } finally {\n await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
46
46
|
|
|
47
47
|
// raw-ts:/Users/prasoon/work/paymentsdb-sync-engine/packages/sync-engine/src/supabase/edge-functions/stripe-worker.ts
|
|
48
|
-
var stripe_worker_default = "/**\n * Stripe Sync Worker\n *\n * Triggered by pg_cron at a configurable interval (default: 60 seconds). Uses pgmq for durable work queue.\n *\n * Flow:\n * 1. Read batch of messages from pgmq (qty=10, vt=60s)\n * 2. If queue empty: enqueue all objects (continuous sync), then self-trigger\n * 3. Process messages in parallel (Promise.all):\n * - processNext(object)\n * - Delete message on success\n * - Re-enqueue if hasMore\n * 4. Self-trigger if work was re-enqueued (with progress) and run isn't too old\n * 5. Return results summary\n *\n * Concurrency:\n * - Multiple workers can run concurrently via overlapping pg_cron triggers.\n * - Each worker processes its batch of messages in parallel (Promise.all).\n * - pgmq visibility timeout prevents duplicate message reads across workers.\n * - processNext() is idempotent (uses internal cursor tracking), so duplicate\n * processing on timeout/crash is safe.\n *\n * Self-triggering:\n * - Accelerates initial sync by not waiting for next cron tick.\n * - Only triggers when work was re-enqueued AND progress was made.\n * - Stops after MAX_RUN_AGE_MS to prevent runaway loops.\n * - Cron remains as fallback for errors/timeouts.\n */\n\nimport { StripeSync } from 'npm:@paymentsdb/sync-engine'\nimport postgres from 'npm:postgres'\n\nconst QUEUE_NAME = 'stripe_sync_work'\nconst VISIBILITY_TIMEOUT = 60 // seconds\nconst BATCH_SIZE = 10\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000 // 6 hours - stop self-triggering after this\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7) // Remove 'Bearer '\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql\n let stripeSync\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return new Response(\n JSON.stringify({\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Validate that the token matches the unique worker secret stored in vault\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sync_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: (Deno.env.get('ENABLE_SIGMA') ?? 'false') === 'true',\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n } catch (error) {\n await sql.end()\n return new Response(\n JSON.stringify({\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Read batch of messages from queue\n const messages = await sql`\n SELECT * FROM pgmq.read(${QUEUE_NAME}::text, ${VISIBILITY_TIMEOUT}::int, ${BATCH_SIZE}::int)\n `\n\n // If queue empty, enqueue all objects for continuous sync\n if (messages.length === 0) {\n // Create sync run to make enqueued work visible (status='pending')\n const { runKey, objects } = await stripeSync.joinOrCreateSyncRun('worker')\n const msgs = objects.map((object) => JSON.stringify({ object }))\n\n await sql`\n SELECT pgmq.send_batch(\n ${QUEUE_NAME}::text,\n ${sql.array(msgs)}::jsonb[]\n )\n `\n\n // Self-trigger to start processing immediately (don't wait for cron)\n // But only if run isn't too old (prevents runaway on stale runs)\n let selfTriggered = false\n if (objects.length > 0) {\n const runAgeMs = Date.now() - runKey.runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after enqueue:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering after enqueue`\n )\n }\n }\n\n return new Response(JSON.stringify({ enqueued: objects.length, objects, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n }\n\n //
|
|
48
|
+
var stripe_worker_default = "/**\n * Stripe Sync Worker\n *\n * Triggered by pg_cron at a configurable interval (default: 60 seconds). Uses pgmq for durable work queue.\n *\n * Flow:\n * 1. Read batch of messages from pgmq (qty=10, vt=60s)\n * 2. If queue empty: enqueue all objects (continuous sync), then self-trigger\n * 3. Process messages in parallel (Promise.all):\n * - processNext(object)\n * - Delete message on success\n * - Re-enqueue if hasMore\n * 4. Self-trigger if work was re-enqueued (with progress) and run isn't too old\n * 5. Return results summary\n *\n * Concurrency:\n * - Multiple workers can run concurrently via overlapping pg_cron triggers.\n * - Each worker processes its batch of messages in parallel (Promise.all).\n * - pgmq visibility timeout prevents duplicate message reads across workers.\n * - processNext() is idempotent (uses internal cursor tracking), so duplicate\n * processing on timeout/crash is safe.\n *\n * Self-triggering:\n * - Accelerates initial sync by not waiting for next cron tick.\n * - Only triggers when work was re-enqueued AND progress was made.\n * - Stops after MAX_RUN_AGE_MS to prevent runaway loops.\n * - Cron remains as fallback for errors/timeouts.\n */\n\nimport { StripeSync } from 'npm:@paymentsdb/sync-engine'\nimport postgres from 'npm:postgres'\n\nconst QUEUE_NAME = 'stripe_sync_work'\nconst VISIBILITY_TIMEOUT = 60 // seconds\nconst BATCH_SIZE = 10\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000 // 6 hours - stop self-triggering after this\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7) // Remove 'Bearer '\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return new Response(JSON.stringify({ error: 'SUPABASE_DB_URL not set' }), { status: 500 })\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql\n let stripeSync\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return new Response(\n JSON.stringify({\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Validate that the token matches the unique worker secret stored in vault\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sync_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: (Deno.env.get('ENABLE_SIGMA') ?? 'false') === 'true',\n appName: Deno.env.get('STRIPE_APP_NAME') || 'PaymentsDB',\n })\n } catch (error) {\n await sql.end()\n return new Response(\n JSON.stringify({\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n }),\n { status: 500, headers: { 'Content-Type': 'application/json' } }\n )\n }\n\n try {\n // Read batch of messages from queue\n const messages = await sql`\n SELECT * FROM pgmq.read(${QUEUE_NAME}::text, ${VISIBILITY_TIMEOUT}::int, ${BATCH_SIZE}::int)\n `\n\n // If queue empty, enqueue all objects for continuous sync\n if (messages.length === 0) {\n // Create sync run to make enqueued work visible (status='pending')\n const { runKey, objects } = await stripeSync.joinOrCreateSyncRun('worker')\n const msgs = objects.map((object) => JSON.stringify({ object }))\n\n await sql`\n SELECT pgmq.send_batch(\n ${QUEUE_NAME}::text,\n ${sql.array(msgs)}::jsonb[]\n )\n `\n\n // Self-trigger to start processing immediately (don't wait for cron)\n // But only if run isn't too old (prevents runaway on stale runs)\n let selfTriggered = false\n if (objects.length > 0) {\n const runAgeMs = Date.now() - runKey.runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after enqueue:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering after enqueue`\n )\n }\n }\n\n return new Response(JSON.stringify({ enqueued: objects.length, objects, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n }\n\n // Deduplicate messages by object type to prevent parallel processing of the same object.\n // Multiple messages for the same object can accumulate if previous workers re-enqueued\n // before completing, leading to race conditions where the same page is fetched multiple times.\n const messagesByObject = new Map<string, typeof messages>()\n for (const msg of messages) {\n const { object } = msg.message as { object: string }\n if (!messagesByObject.has(object)) {\n messagesByObject.set(object, [])\n }\n messagesByObject.get(object)!.push(msg)\n }\n\n // Process one message per unique object type in parallel\n // Pass triggeredBy: 'worker' so all processing uses the same run as joinOrCreateSyncRun('worker')\n const results = await Promise.all(\n Array.from(messagesByObject.entries()).map(async ([object, msgs]) => {\n try {\n const result = await stripeSync.processNext(object, { triggeredBy: 'worker' })\n\n // Delete ALL messages for this object (not just one) to prevent duplicate processing\n const msgIds = msgs.map((m) => m.msg_id)\n await sql`SELECT pgmq.delete(${QUEUE_NAME}::text, ${sql.array(msgIds)}::bigint[])`\n\n // Re-enqueue only ONCE if more pages (not once per message)\n if (result.hasMore) {\n await sql`SELECT pgmq.send(${QUEUE_NAME}::text, ${sql.json({ object })}::jsonb)`\n }\n\n return { object, ...result, reenqueued: result.hasMore, deduplicatedCount: msgs.length }\n } catch (error) {\n // Log error but continue to next object\n // Messages will become visible again after visibility timeout\n console.error(`Error processing ${object}:`, error)\n return {\n object,\n processed: 0,\n hasMore: false,\n reenqueued: false,\n runStartedAt: null,\n deduplicatedCount: msgs.length,\n error: error.message,\n stack: error.stack,\n }\n }\n })\n )\n\n // Self-trigger if work was re-enqueued with progress, and run isn't too old\n let selfTriggered = false\n const successfulResults = results.filter((r) => r.reenqueued && r.processed > 0 && !r.error)\n\n if (successfulResults.length > 0) {\n // Use runStartedAt from the first successful result (all use same run)\n const runStartedAt = successfulResults[0].runStartedAt\n if (runStartedAt) {\n const runAgeMs = Date.now() - runStartedAt.getTime()\n if (runAgeMs <= MAX_RUN_AGE_MS) {\n try {\n await sql`SELECT stripe.trigger_worker()`\n selfTriggered = true\n } catch (err) {\n console.warn('Failed to self-trigger after processing:', err)\n }\n } else {\n console.warn(\n `Worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), not self-triggering`\n )\n }\n }\n }\n\n return new Response(JSON.stringify({ results, selfTriggered }), {\n status: 200,\n headers: { 'Content-Type': 'application/json' },\n })\n } catch (error) {\n console.error('Worker error:', error)\n return new Response(JSON.stringify({ error: error.message, stack: error.stack }), {\n status: 500,\n headers: { 'Content-Type': 'application/json' },\n })\n } finally {\n if (sql) await sql.end()\n if (stripeSync) await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
49
49
|
|
|
50
50
|
// raw-ts:/Users/prasoon/work/paymentsdb-sync-engine/packages/sync-engine/src/supabase/edge-functions/sigma-data-worker.ts
|
|
51
51
|
var sigma_data_worker_default = "/**\n * Stripe Sigma Data Worker.\n *\n * Hourly cron starts a run; self-trigger continues until all objects finish.\n * Progress persists in _sync_runs and _sync_obj_runs across invocations.\n */\n\nimport { StripeSync } from 'npm:stripe-experiment-sync'\nimport postgres from 'npm:postgres'\n\nconst BATCH_SIZE = 1\nconst MAX_RUN_AGE_MS = 6 * 60 * 60 * 1000\nconst jsonResponse = (body: unknown, status = 200) =>\n new Response(JSON.stringify(body), {\n status,\n headers: { 'Content-Type': 'application/json' },\n })\n\nDeno.serve(async (req) => {\n const authHeader = req.headers.get('Authorization')\n if (!authHeader?.startsWith('Bearer ')) {\n return new Response('Unauthorized', { status: 401 })\n }\n\n const token = authHeader.substring(7)\n\n const rawDbUrl = Deno.env.get('SUPABASE_DB_URL')\n if (!rawDbUrl) {\n return jsonResponse({ error: 'SUPABASE_DB_URL not set' }, 500)\n }\n const dbUrl = rawDbUrl.replace(/[?&]sslmode=[^&]*/g, '').replace(/[?&]$/, '')\n\n let sql: ReturnType<typeof postgres> | undefined\n let stripeSync: StripeSync | undefined\n\n try {\n sql = postgres(dbUrl, { max: 1, prepare: false })\n } catch (error) {\n return jsonResponse(\n {\n error: 'Failed to create postgres connection',\n details: error.message,\n stack: error.stack,\n },\n 500\n )\n }\n\n try {\n // Validate the token against vault secret\n const vaultResult = await sql`\n SELECT decrypted_secret\n FROM vault.decrypted_secrets\n WHERE name = 'stripe_sigma_worker_secret'\n `\n\n if (vaultResult.length === 0) {\n await sql.end()\n return new Response('Sigma worker secret not configured in vault', { status: 500 })\n }\n\n const storedSecret = vaultResult[0].decrypted_secret\n if (token !== storedSecret) {\n await sql.end()\n return new Response('Forbidden: Invalid sigma worker secret', { status: 403 })\n }\n\n stripeSync = new StripeSync({\n poolConfig: { connectionString: dbUrl, max: 1 },\n stripeSecretKey: Deno.env.get('STRIPE_SECRET_KEY')!,\n enableSigma: true,\n sigmaPageSizeOverride: 1000,\n })\n } catch (error) {\n await sql.end()\n return jsonResponse(\n {\n error: 'Failed to create StripeSync',\n details: error.message,\n stack: error.stack,\n },\n 500\n )\n }\n\n try {\n const accountId = await stripeSync.getAccountId()\n const sigmaObjects = stripeSync.getSupportedSigmaObjects()\n\n if (sigmaObjects.length === 0) {\n return jsonResponse({ message: 'No Sigma objects configured for sync' })\n }\n\n // Get or create sync run for sigma-worker (isolated from stripe-worker)\n const runResult = await stripeSync.postgresClient.getOrCreateSyncRun(accountId, 'sigma-worker')\n const runStartedAt =\n runResult?.runStartedAt ??\n (await stripeSync.postgresClient.getActiveSyncRun(accountId, 'sigma-worker'))?.runStartedAt\n\n if (!runStartedAt) {\n throw new Error('Failed to get or create sync run for sigma worker')\n }\n\n // Legacy cleanup: remove any prefixed sigma object runs that can block concurrency.\n // Previous versions stored objects as \"sigma.<table>\" which no longer matches processNext.\n await stripeSync.postgresClient.query(\n `UPDATE \"stripe\".\"_sync_obj_runs\"\n SET status = 'error',\n error_message = 'Legacy sigma worker prefix run (sigma.*); superseded by unprefixed runs',\n completed_at = now()\n WHERE \"_account_id\" = $1\n AND run_started_at = $2\n AND object LIKE 'sigma.%'\n AND status IN ('pending', 'running')`,\n [accountId, runStartedAt]\n )\n\n // Stop self-triggering after MAX_RUN_AGE_MS.\n const runAgeMs = Date.now() - runStartedAt.getTime()\n if (runAgeMs > MAX_RUN_AGE_MS) {\n console.warn(\n `Sigma worker: run too old (${Math.round(runAgeMs / 1000 / 60)} min), closing without self-trigger`\n )\n await stripeSync.postgresClient.closeSyncRun(accountId, runStartedAt)\n return jsonResponse({\n message: 'Sigma run exceeded max age, closed without processing',\n runAgeMinutes: Math.round(runAgeMs / 1000 / 60),\n selfTriggered: false,\n })\n }\n\n // Create object runs for all sigma objects (idempotent).\n await stripeSync.postgresClient.createObjectRuns(accountId, runStartedAt, sigmaObjects)\n await stripeSync.postgresClient.ensureSyncRunMaxConcurrent(accountId, runStartedAt, BATCH_SIZE)\n\n // Prefer running objects; otherwise claim pending ones.\n const runningObjects = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'running',\n sigmaObjects\n )\n\n const objectsToProcess = runningObjects.slice(0, BATCH_SIZE)\n let pendingObjects: string[] = []\n\n if (objectsToProcess.length === 0) {\n pendingObjects = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'pending',\n sigmaObjects\n )\n\n for (const objectKey of pendingObjects) {\n if (objectsToProcess.length >= BATCH_SIZE) break\n const started = await stripeSync.postgresClient.tryStartObjectSync(\n accountId,\n runStartedAt,\n objectKey\n )\n if (started) {\n objectsToProcess.push(objectKey)\n }\n }\n }\n\n if (objectsToProcess.length === 0) {\n if (pendingObjects.length === 0) {\n console.info('Sigma worker: all objects complete or errored - run finished')\n return jsonResponse({ message: 'Sigma sync run complete', selfTriggered: false })\n }\n\n console.info('Sigma worker: at concurrency limit, will self-trigger', {\n pendingCount: pendingObjects.length,\n })\n let selfTriggered = false\n try {\n await sql`SELECT stripe.trigger_sigma_worker()`\n selfTriggered = true\n } catch (error) {\n console.warn('Failed to self-trigger sigma worker:', error.message)\n }\n\n return jsonResponse({\n message: 'At concurrency limit',\n pendingCount: pendingObjects.length,\n selfTriggered,\n })\n }\n\n // Process objects sequentially (one lifecycle per invocation).\n const results: Array<Record<string, unknown>> = []\n\n for (const object of objectsToProcess) {\n const objectKey = object\n try {\n console.info(`Sigma worker: processing ${object}`)\n\n // Process one sigma page and upsert results.\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const result = await stripeSync.processNext(object as any, {\n runStartedAt,\n triggeredBy: 'sigma-worker',\n })\n\n results.push({\n object,\n processed: result.processed,\n hasMore: result.hasMore,\n status: 'success',\n })\n\n if (result.hasMore) {\n console.info(\n `Sigma worker: ${object} has more pages, processed ${result.processed} rows so far`\n )\n } else {\n console.info(`Sigma worker: ${object} complete, processed ${result.processed} rows`)\n }\n } catch (error) {\n console.error(`Sigma worker: error processing ${object}:`, error)\n\n // Mark object as failed and move on (no retries)\n await stripeSync.postgresClient.failObjectSync(\n accountId,\n runStartedAt,\n objectKey,\n error.message ?? 'Unknown error'\n )\n\n results.push({\n object,\n processed: 0,\n hasMore: false,\n status: 'error',\n error: error.message,\n })\n }\n }\n\n // Determine if self-trigger is needed\n const pendingAfter = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'pending',\n sigmaObjects\n )\n const runningAfter = await stripeSync.postgresClient.listObjectsByStatus(\n accountId,\n runStartedAt,\n 'running',\n sigmaObjects\n )\n\n // Calculate remaining run time for logging\n const remainingMs = MAX_RUN_AGE_MS - (Date.now() - runStartedAt.getTime())\n const remainingMinutes = Math.round(remainingMs / 1000 / 60)\n\n // Only self-trigger if there are pending or running objects AND run hasn't timed out\n const shouldSelfTrigger =\n (pendingAfter.length > 0 || runningAfter.length > 0) && remainingMs > 0\n\n let selfTriggered = false\n if (shouldSelfTrigger) {\n console.info('Sigma worker: more work remains, self-triggering', {\n pending: pendingAfter.length,\n running: runningAfter.length,\n remainingMinutes,\n })\n try {\n await sql`SELECT stripe.trigger_sigma_worker()`\n selfTriggered = true\n } catch (error) {\n console.warn('Failed to self-trigger sigma worker:', error.message)\n }\n } else if (pendingAfter.length > 0 || runningAfter.length > 0) {\n // Would self-trigger but run timed out\n console.warn('Sigma worker: work remains but run timed out, closing', {\n pending: pendingAfter.length,\n running: runningAfter.length,\n })\n await stripeSync.postgresClient.closeSyncRun(accountId, runStartedAt)\n } else {\n console.info('Sigma worker: no more work, run complete')\n }\n\n return jsonResponse({\n results,\n selfTriggered,\n remaining: { pending: pendingAfter.length, running: runningAfter.length },\n })\n } catch (error) {\n console.error('Sigma worker error:', error)\n return jsonResponse({ error: error.message, stack: error.stack }, 500)\n } finally {\n if (sql) await sql.end()\n if (stripeSync) await stripeSync.postgresClient.pool.end()\n }\n})\n";
|
|
@@ -59,7 +59,7 @@ var sigmaWorkerFunctionCode = sigma_data_worker_default;
|
|
|
59
59
|
// package.json
|
|
60
60
|
var package_default = {
|
|
61
61
|
name: "@paymentsdb/sync-engine",
|
|
62
|
-
version: "0.0.
|
|
62
|
+
version: "0.0.10",
|
|
63
63
|
private: false,
|
|
64
64
|
description: "Stripe Sync Engine to sync Stripe data to Postgres",
|
|
65
65
|
type: "module",
|
package/dist/supabase/index.js
CHANGED
|
@@ -10,8 +10,8 @@ import {
|
|
|
10
10
|
uninstall,
|
|
11
11
|
webhookFunctionCode,
|
|
12
12
|
workerFunctionCode
|
|
13
|
-
} from "../chunk-
|
|
14
|
-
import "../chunk-
|
|
13
|
+
} from "../chunk-JYLI5JEV.js";
|
|
14
|
+
import "../chunk-CW2GRCQD.js";
|
|
15
15
|
export {
|
|
16
16
|
INSTALLATION_ERROR_SUFFIX,
|
|
17
17
|
INSTALLATION_INSTALLED_SUFFIX,
|