@hot-updater/supabase 0.28.0 → 0.29.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,779 @@
1
+ import { spawnSync } from "node:child_process";
2
+ import { createHmac } from "node:crypto";
3
+ import {
4
+ access,
5
+ mkdir,
6
+ mkdtemp,
7
+ readdir,
8
+ readFile,
9
+ rm,
10
+ symlink,
11
+ writeFile,
12
+ } from "node:fs/promises";
13
+ import path from "node:path";
14
+ import { fileURLToPath, pathToFileURL } from "node:url";
15
+ import { transformEnv } from "@hot-updater/cli-tools";
16
+ import { type Bundle, type GetBundlesArgs, NIL_UUID } from "@hot-updater/core";
17
+ import { createHotUpdater } from "@hot-updater/server/runtime";
18
+ import { setupGetUpdateInfoTestSuite } from "@hot-updater/test-utils";
19
+ import { createClient } from "@supabase/supabase-js";
20
+ import { afterAll, beforeAll, beforeEach, describe, expect, it } from "vitest";
21
+ import {
22
+ assertDockerComposeAvailable,
23
+ findOpenPort,
24
+ runCheckedCommand,
25
+ spawnRuntime,
26
+ stopRuntime,
27
+ waitForHttpOk,
28
+ } from "../../../../packages/test-utils/src/runtimeProcess";
29
+ import { supabaseDatabase } from "../../src/supabaseDatabase";
30
+ import { supabaseStorage } from "../../src/supabaseStorage";
31
+
32
+ const __filename = fileURLToPath(import.meta.url);
33
+ const __dirname = path.dirname(__filename);
34
+ const WORKSPACE_ROOT = path.resolve(__dirname, "../../../..");
35
+ const FUNCTION_NAME = "hot-updater-function";
36
+ const FUNCTION_BASE_PATH = `/${FUNCTION_NAME}`;
37
+ const HOT_UPDATER_BASE_PATH = "/";
38
+ const LEGACY_HOT_UPDATER_BASE_PATH = "/api/check-update";
39
+ const BUCKET_NAME = "hot-updater-bundles";
40
+ const DENO_DOCKER_IMAGE = "denoland/deno:alpine";
41
+ const DENO_CACHE_VOLUME = "hot-updater-supabase-deno-cache";
42
+ const POSTGRES_IMAGE = "postgres:15-alpine";
43
+ const POSTGREST_IMAGE = "postgrest/postgrest:v14.6";
44
+ const STORAGE_IMAGE = "supabase/storage-api:v1.44.2";
45
+ const IMGPROXY_IMAGE = "darthsim/imgproxy:v3.30.1";
46
+ const NGINX_IMAGE = "nginx:1.27-alpine";
47
+ const POSTGRES_PASSWORD = "postgres";
48
+ const POSTGRES_DB = "postgres";
49
+ const JWT_SECRET = "super-secret-jwt-token-with-at-least-32-chars";
50
+ const JWT_EXPIRY_SECONDS = 60 * 60 * 24 * 365;
51
+ const ANON_KEY = createLegacyJwt("anon");
52
+ const SERVICE_ROLE_KEY = createLegacyJwt("service_role");
53
+ const REQUIRED_BUILD_ARTIFACTS = [
54
+ {
55
+ command: "pnpm --filter @hot-updater/core build",
56
+ path: path.join(WORKSPACE_ROOT, "packages/core/dist/index.mjs"),
57
+ },
58
+ {
59
+ command: "pnpm --filter @hot-updater/server build",
60
+ path: path.join(WORKSPACE_ROOT, "packages/server/dist/runtime.mjs"),
61
+ },
62
+ {
63
+ command: "pnpm --filter @hot-updater/plugin-core build",
64
+ path: path.join(WORKSPACE_ROOT, "plugins/plugin-core/dist/index.mjs"),
65
+ },
66
+ {
67
+ command: "pnpm --filter @hot-updater/supabase build",
68
+ path: path.join(WORKSPACE_ROOT, "plugins/supabase/dist/index.mjs"),
69
+ },
70
+ ] as const;
71
+
72
+ assertDockerComposeAvailable(
73
+ "supabase edge runtime acceptance requires Docker Compose and a running Docker daemon.",
74
+ );
75
+
76
+ const ensureBuiltArtifacts = async (
77
+ artifacts: ReadonlyArray<{ command: string; path: string }>,
78
+ ) => {
79
+ for (const artifact of artifacts) {
80
+ try {
81
+ await access(artifact.path);
82
+ } catch {
83
+ throw new Error(
84
+ `Missing built artifact at ${artifact.path}. Run \`${artifact.command}\` before running this test.`,
85
+ );
86
+ }
87
+ }
88
+ };
89
+
90
+ const createCanonicalPath = (args: GetBundlesArgs) => {
91
+ const channel = args.channel ?? "production";
92
+ const minBundleId = args.minBundleId ?? NIL_UUID;
93
+ const cohortSegment = args.cohort
94
+ ? `/${encodeURIComponent(args.cohort)}`
95
+ : "";
96
+ const joinHotUpdaterPath = (routePath: string) =>
97
+ HOT_UPDATER_BASE_PATH === "/"
98
+ ? routePath
99
+ : `${HOT_UPDATER_BASE_PATH}${routePath}`;
100
+
101
+ if (args._updateStrategy === "appVersion") {
102
+ return joinHotUpdaterPath(
103
+ `/app-version/${encodeURIComponent(args.platform)}/${encodeURIComponent(args.appVersion)}/${encodeURIComponent(channel)}/${encodeURIComponent(minBundleId)}/${encodeURIComponent(args.bundleId)}${cohortSegment}`,
104
+ );
105
+ }
106
+
107
+ return joinHotUpdaterPath(
108
+ `/fingerprint/${encodeURIComponent(args.platform)}/${encodeURIComponent(args.fingerprintHash)}/${encodeURIComponent(channel)}/${encodeURIComponent(minBundleId)}/${encodeURIComponent(args.bundleId)}${cohortSegment}`,
109
+ );
110
+ };
111
+
112
+ const toRuntimeBundle = (bundle: Bundle): Bundle => {
113
+ return {
114
+ ...bundle,
115
+ storageUri: `supabase-storage://${BUCKET_NAME}/${bundle.id}/bundle.zip`,
116
+ };
117
+ };
118
+
119
+ describe.sequential("supabase edge runtime acceptance", () => {
120
+ let runtimeRoot: string | undefined;
121
+ let storageRepoPath = "";
122
+ let composeFilePath = "";
123
+ let composeProjectName = "";
124
+ let gatewayPort = 0;
125
+ let edgePort = 0;
126
+ let gatewayBaseUrl = "";
127
+ let edgeRuntime: ReturnType<typeof spawnRuntime> | undefined;
128
+ let seedHotUpdater: ReturnType<typeof createHotUpdater>;
129
+ let supabaseAdmin: ReturnType<typeof createClient>;
130
+
131
+ beforeAll(async () => {
132
+ await ensureBuiltArtifacts(REQUIRED_BUILD_ARTIFACTS);
133
+
134
+ runtimeRoot = await mkdtemp(
135
+ path.join(WORKSPACE_ROOT, "plugins/supabase/runtime-acceptance-"),
136
+ );
137
+ storageRepoPath = path.join(runtimeRoot, "storage-repo");
138
+ gatewayPort = await findOpenPort();
139
+ edgePort = await findOpenPort();
140
+ gatewayBaseUrl = `http://127.0.0.1:${gatewayPort}`;
141
+ composeProjectName = `hot-updater-supabase-${process.pid}-${Date.now()}`;
142
+ composeFilePath = path.join(runtimeRoot, "docker-compose.yml");
143
+
144
+ runCheckedCommand({
145
+ command: "git",
146
+ args: [
147
+ "clone",
148
+ "--depth",
149
+ "1",
150
+ "https://github.com/supabase/storage.git",
151
+ storageRepoPath,
152
+ ],
153
+ cwd: WORKSPACE_ROOT,
154
+ });
155
+
156
+ await writeSupabaseRuntimeFiles({
157
+ runtimeRoot,
158
+ gatewayPort,
159
+ storageRepoPath,
160
+ });
161
+
162
+ try {
163
+ runCheckedCommand({
164
+ command: "docker",
165
+ args: [
166
+ "compose",
167
+ "-p",
168
+ composeProjectName,
169
+ "-f",
170
+ composeFilePath,
171
+ "up",
172
+ "-d",
173
+ ],
174
+ cwd: WORKSPACE_ROOT,
175
+ });
176
+ } catch (error) {
177
+ let dbLogs = "";
178
+
179
+ try {
180
+ const result = spawnSync(
181
+ "docker",
182
+ [
183
+ "compose",
184
+ "-p",
185
+ composeProjectName,
186
+ "-f",
187
+ composeFilePath,
188
+ "logs",
189
+ "--no-color",
190
+ "db",
191
+ ],
192
+ {
193
+ cwd: WORKSPACE_ROOT,
194
+ encoding: "utf8",
195
+ },
196
+ );
197
+ dbLogs = [result.stdout, result.stderr].filter(Boolean).join("\n");
198
+ } catch {
199
+ dbLogs = "failed to collect database logs";
200
+ }
201
+
202
+ throw new Error(
203
+ [
204
+ error instanceof Error ? error.message : String(error),
205
+ "",
206
+ "Database logs:",
207
+ dbLogs,
208
+ ].join("\n"),
209
+ );
210
+ }
211
+
212
+ await waitForUrlOk(`${gatewayBaseUrl}/storage/v1/status`);
213
+
214
+ supabaseAdmin = createClient(gatewayBaseUrl, SERVICE_ROLE_KEY);
215
+ await ensureBucketExists(supabaseAdmin);
216
+
217
+ seedHotUpdater = createHotUpdater({
218
+ database: supabaseDatabase({
219
+ supabaseUrl: gatewayBaseUrl,
220
+ supabaseAnonKey: SERVICE_ROLE_KEY,
221
+ }),
222
+ storages: [
223
+ supabaseStorage({
224
+ supabaseUrl: gatewayBaseUrl,
225
+ supabaseAnonKey: SERVICE_ROLE_KEY,
226
+ bucketName: BUCKET_NAME,
227
+ }),
228
+ ],
229
+ basePath: HOT_UPDATER_BASE_PATH,
230
+ routes: {
231
+ updateCheck: true,
232
+ bundles: false,
233
+ },
234
+ });
235
+
236
+ edgeRuntime = spawnRuntime({
237
+ command: "docker",
238
+ args: [
239
+ "run",
240
+ "--rm",
241
+ "--network",
242
+ `${composeProjectName}_default`,
243
+ "-p",
244
+ `127.0.0.1:${edgePort}:8000`,
245
+ "-e",
246
+ `SUPABASE_URL=http://gateway:8000`,
247
+ "-e",
248
+ `SUPABASE_SERVICE_ROLE_KEY=${SERVICE_ROLE_KEY}`,
249
+ "-e",
250
+ "DENO_DIR=/deno-dir",
251
+ "-v",
252
+ `${WORKSPACE_ROOT}:${WORKSPACE_ROOT}:ro`,
253
+ "-v",
254
+ `${runtimeRoot}:${runtimeRoot}`,
255
+ "-v",
256
+ `${DENO_CACHE_VOLUME}:/deno-dir`,
257
+ "-w",
258
+ runtimeRoot,
259
+ DENO_DOCKER_IMAGE,
260
+ "run",
261
+ "--no-lock",
262
+ "--node-modules-dir=manual",
263
+ "--allow-env",
264
+ "--allow-net",
265
+ "--allow-read",
266
+ "--allow-sys",
267
+ "--unstable-sloppy-imports",
268
+ "--import-map",
269
+ path.join(runtimeRoot, "import_map.json"),
270
+ path.join(runtimeRoot, "supabase/edge-functions/index.ts"),
271
+ ],
272
+ cwd: WORKSPACE_ROOT,
273
+ });
274
+
275
+ await waitForHttpOk({
276
+ url: `http://127.0.0.1:${edgePort}${FUNCTION_BASE_PATH}/ping`,
277
+ child: edgeRuntime.child,
278
+ logs: edgeRuntime.logs,
279
+ timeoutMs: 90_000,
280
+ });
281
+ }, 180_000);
282
+
283
+ beforeEach(async () => {
284
+ if (!supabaseAdmin) {
285
+ throw new Error("Supabase admin client was not initialized.");
286
+ }
287
+
288
+ const { error } = await supabaseAdmin
289
+ .from("bundles")
290
+ .delete()
291
+ .neq("id", NIL_UUID);
292
+
293
+ if (error) {
294
+ throw error;
295
+ }
296
+ });
297
+
298
+ afterAll(async () => {
299
+ if (edgeRuntime) {
300
+ await stopRuntime(edgeRuntime.child);
301
+ }
302
+
303
+ if (composeFilePath) {
304
+ try {
305
+ runCheckedCommand({
306
+ command: "docker",
307
+ args: [
308
+ "compose",
309
+ "-p",
310
+ composeProjectName,
311
+ "-f",
312
+ composeFilePath,
313
+ "down",
314
+ "-v",
315
+ "--remove-orphans",
316
+ ],
317
+ cwd: WORKSPACE_ROOT,
318
+ });
319
+ } catch {
320
+ // ignore cleanup failures
321
+ }
322
+ }
323
+
324
+ if (runtimeRoot) {
325
+ await rm(runtimeRoot, { recursive: true, force: true });
326
+ }
327
+ }, 60_000);
328
+
329
+ const getUpdateInfo = async (bundles: Bundle[], args: GetBundlesArgs) => {
330
+ if (!supabaseAdmin) {
331
+ throw new Error("Supabase admin client was not initialized.");
332
+ }
333
+
334
+ for (const bundle of bundles.map(toRuntimeBundle)) {
335
+ await uploadBundleObject(supabaseAdmin, bundle.id);
336
+ await seedHotUpdater.insertBundle(bundle);
337
+ }
338
+
339
+ const response = await fetch(
340
+ `http://127.0.0.1:${edgePort}${FUNCTION_BASE_PATH}${createCanonicalPath(args)}`,
341
+ );
342
+
343
+ return (await response.json()) as any;
344
+ };
345
+
346
+ setupGetUpdateInfoTestSuite({ getUpdateInfo });
347
+
348
+ it("serves canonical routes from the edge function entrypoint", async () => {
349
+ const bundle = toRuntimeBundle({
350
+ id: "00000000-0000-0000-0000-000000000001",
351
+ platform: "ios",
352
+ targetAppVersion: "1.0",
353
+ shouldForceUpdate: false,
354
+ enabled: true,
355
+ fileHash: "hash",
356
+ gitCommitHash: null,
357
+ message: "hello",
358
+ channel: "production",
359
+ storageUri: "storage://unused",
360
+ fingerprintHash: null,
361
+ });
362
+
363
+ await uploadBundleObject(supabaseAdmin, bundle.id);
364
+ await seedHotUpdater.insertBundle(bundle);
365
+
366
+ const response = await fetch(
367
+ `http://127.0.0.1:${edgePort}${FUNCTION_BASE_PATH}${createCanonicalPath({
368
+ appVersion: "1.0",
369
+ bundleId: NIL_UUID,
370
+ platform: "ios",
371
+ _updateStrategy: "appVersion",
372
+ })}`,
373
+ );
374
+
375
+ expect(response.ok).toBe(true);
376
+ await expect(response.json()).resolves.toMatchObject({
377
+ id: "00000000-0000-0000-0000-000000000001",
378
+ status: "UPDATE",
379
+ });
380
+ });
381
+
382
+ it("does not support the legacy exact path", async () => {
383
+ const response = await fetch(
384
+ `http://127.0.0.1:${edgePort}${FUNCTION_BASE_PATH}${LEGACY_HOT_UPDATER_BASE_PATH}`,
385
+ );
386
+
387
+ expect(response.status).toBe(404);
388
+ });
389
+
390
+ it("does not expose management routes from the edge function entrypoint", async () => {
391
+ const response = await fetch(
392
+ `http://127.0.0.1:${edgePort}${FUNCTION_BASE_PATH}/api/bundles`,
393
+ );
394
+
395
+ expect(response.status).toBe(404);
396
+ await expect(response.json()).resolves.toEqual({
397
+ error: "Not found",
398
+ });
399
+ });
400
+ });
401
+
402
+ function base64UrlEncode(value: string | Buffer) {
403
+ return Buffer.from(value)
404
+ .toString("base64")
405
+ .replace(/\+/g, "-")
406
+ .replace(/\//g, "_")
407
+ .replace(/=+$/g, "");
408
+ }
409
+
410
+ function createLegacyJwt(role: "anon" | "service_role") {
411
+ const header = base64UrlEncode(JSON.stringify({ alg: "HS256", typ: "JWT" }));
412
+ const payload = base64UrlEncode(
413
+ JSON.stringify({
414
+ role,
415
+ iss: "supabase-test",
416
+ iat: Math.floor(Date.now() / 1000),
417
+ exp: Math.floor(Date.now() / 1000) + JWT_EXPIRY_SECONDS,
418
+ }),
419
+ );
420
+ const signature = createHmac("sha256", JWT_SECRET)
421
+ .update(`${header}.${payload}`)
422
+ .digest("base64")
423
+ .replace(/\+/g, "-")
424
+ .replace(/\//g, "_")
425
+ .replace(/=+$/g, "");
426
+
427
+ return `${header}.${payload}.${signature}`;
428
+ }
429
+
430
+ const waitForUrlOk = async (url: string, timeoutMs = 90_000) => {
431
+ const deadline = Date.now() + timeoutMs;
432
+ let lastError = "no response";
433
+
434
+ while (Date.now() < deadline) {
435
+ try {
436
+ const response = await fetch(url);
437
+ if (response.ok) {
438
+ return;
439
+ }
440
+
441
+ lastError = `${response.status} ${response.statusText}`;
442
+ } catch (error) {
443
+ lastError = error instanceof Error ? error.message : String(error);
444
+ }
445
+
446
+ await sleep(500);
447
+ }
448
+
449
+ throw new Error(`Timed out waiting for ${url}: ${lastError}`);
450
+ };
451
+
452
+ const sleep = async (ms: number) => {
453
+ await new Promise((resolve) => setTimeout(resolve, ms));
454
+ };
455
+
456
+ const ensureBucketExists = async (
457
+ supabaseAdmin: ReturnType<typeof createClient>,
458
+ ) => {
459
+ const { data: buckets, error: listError } =
460
+ await supabaseAdmin.storage.listBuckets();
461
+
462
+ if (listError) {
463
+ throw listError;
464
+ }
465
+
466
+ if (buckets.some((bucket) => bucket.name === BUCKET_NAME)) {
467
+ return;
468
+ }
469
+
470
+ const { error } = await supabaseAdmin.storage.createBucket(BUCKET_NAME);
471
+
472
+ if (error) {
473
+ throw error;
474
+ }
475
+ };
476
+
477
+ const uploadBundleObject = async (
478
+ supabaseAdmin: ReturnType<typeof createClient>,
479
+ bundleId: string,
480
+ ) => {
481
+ const { error } = await supabaseAdmin.storage
482
+ .from(BUCKET_NAME)
483
+ .upload(`${bundleId}/bundle.zip`, Buffer.from("zip"), {
484
+ contentType: "application/zip",
485
+ cacheControl: "31536000",
486
+ upsert: true,
487
+ });
488
+
489
+ if (error) {
490
+ throw error;
491
+ }
492
+ };
493
+
494
+ const loadSupabaseInitSql = async (storageRepoPath: string) => {
495
+ const storageMigrationsDir = path.join(storageRepoPath, "migrations/tenant");
496
+ const storageMigrationFiles = (await readdir(storageMigrationsDir))
497
+ .filter((file) => file.endsWith(".sql"))
498
+ .sort((a, b) => a.localeCompare(b, undefined, { numeric: true }));
499
+ const storageMigrations = await Promise.all(
500
+ storageMigrationFiles.map(async (file) => {
501
+ const contents = await readFile(
502
+ path.join(storageMigrationsDir, file),
503
+ "utf8",
504
+ );
505
+ const trimmed = contents.trimEnd();
506
+ return trimmed.endsWith(";") ? trimmed : `${trimmed};`;
507
+ }),
508
+ );
509
+
510
+ const migrationsDir = path.join(
511
+ WORKSPACE_ROOT,
512
+ "plugins/supabase/supabase/migrations",
513
+ );
514
+ const migrationFiles = (await readdir(migrationsDir))
515
+ .filter((file) => file.endsWith(".sql"))
516
+ .sort();
517
+ const migrations = await Promise.all(
518
+ migrationFiles.map(async (file) => {
519
+ const contents = await readFile(path.join(migrationsDir, file), "utf8");
520
+ return contents.replaceAll("%%BUCKET_NAME%%", BUCKET_NAME);
521
+ }),
522
+ );
523
+
524
+ return `
525
+ CREATE EXTENSION IF NOT EXISTS pgcrypto;
526
+
527
+ DO $$
528
+ BEGIN
529
+ IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'anon') THEN
530
+ CREATE ROLE anon NOLOGIN NOINHERIT;
531
+ END IF;
532
+
533
+ IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'authenticated') THEN
534
+ CREATE ROLE authenticated NOLOGIN NOINHERIT;
535
+ END IF;
536
+
537
+ IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'service_role') THEN
538
+ CREATE ROLE service_role NOLOGIN NOINHERIT BYPASSRLS;
539
+ END IF;
540
+
541
+ IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'authenticator') THEN
542
+ CREATE ROLE authenticator LOGIN PASSWORD '${POSTGRES_PASSWORD}' NOINHERIT;
543
+ END IF;
544
+
545
+ IF NOT EXISTS (
546
+ SELECT 1 FROM pg_roles WHERE rolname = 'supabase_storage_admin'
547
+ ) THEN
548
+ CREATE ROLE supabase_storage_admin LOGIN PASSWORD '${POSTGRES_PASSWORD}' SUPERUSER;
549
+ END IF;
550
+ END $$;
551
+
552
+ GRANT anon TO authenticator;
553
+ GRANT authenticated TO authenticator;
554
+ GRANT service_role TO authenticator;
555
+
556
+ ${migrations.join("\n\n")}
557
+
558
+ SET search_path TO storage, public, extensions;
559
+
560
+ ${storageMigrations.join("\n\n")}
561
+
562
+ SET search_path TO public;
563
+
564
+ GRANT USAGE ON SCHEMA public TO anon, authenticated, service_role;
565
+ GRANT USAGE ON TYPE platforms TO anon, authenticated, service_role;
566
+ GRANT SELECT ON ALL TABLES IN SCHEMA public TO anon, authenticated;
567
+ GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO service_role;
568
+ GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO anon, authenticated, service_role;
569
+ GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO anon, authenticated, service_role;
570
+
571
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public
572
+ GRANT SELECT ON TABLES TO anon, authenticated;
573
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public
574
+ GRANT ALL PRIVILEGES ON TABLES TO service_role;
575
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public
576
+ GRANT USAGE, SELECT ON SEQUENCES TO anon, authenticated, service_role;
577
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public
578
+ GRANT EXECUTE ON FUNCTIONS TO anon, authenticated, service_role;
579
+ `.trim();
580
+ };
581
+
582
+ const createComposeFile = ({
583
+ gatewayPort,
584
+ runtimeRoot,
585
+ }: {
586
+ gatewayPort: number;
587
+ runtimeRoot: string;
588
+ }) => {
589
+ return `
590
+ services:
591
+ db:
592
+ image: ${POSTGRES_IMAGE}
593
+ environment:
594
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
595
+ POSTGRES_DB: ${POSTGRES_DB}
596
+ healthcheck:
597
+ test: ["CMD-SHELL", "pg_isready -U postgres -d ${POSTGRES_DB}"]
598
+ interval: 5s
599
+ timeout: 5s
600
+ retries: 20
601
+ volumes:
602
+ - ${path.join(runtimeRoot, "db-init")}:/docker-entrypoint-initdb.d:ro
603
+
604
+ rest:
605
+ image: ${POSTGREST_IMAGE}
606
+ depends_on:
607
+ db:
608
+ condition: service_healthy
609
+ environment:
610
+ PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
611
+ PGRST_DB_SCHEMAS: public,storage
612
+ PGRST_DB_MAX_ROWS: 1000
613
+ PGRST_DB_EXTRA_SEARCH_PATH: public
614
+ PGRST_DB_ANON_ROLE: anon
615
+ PGRST_JWT_SECRET: ${JWT_SECRET}
616
+ PGRST_DB_USE_LEGACY_GUCS: "false"
617
+ PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
618
+ PGRST_APP_SETTINGS_JWT_EXP: "3600"
619
+
620
+ imgproxy:
621
+ image: ${IMGPROXY_IMAGE}
622
+ environment:
623
+ IMGPROXY_BIND: ":5001"
624
+ IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
625
+ IMGPROXY_USE_ETAG: "true"
626
+
627
+ storage:
628
+ image: ${STORAGE_IMAGE}
629
+ depends_on:
630
+ db:
631
+ condition: service_healthy
632
+ rest:
633
+ condition: service_started
634
+ imgproxy:
635
+ condition: service_started
636
+ environment:
637
+ ANON_KEY: ${ANON_KEY}
638
+ SERVICE_KEY: ${SERVICE_ROLE_KEY}
639
+ POSTGREST_URL: http://rest:3000
640
+ AUTH_JWT_SECRET: ${JWT_SECRET}
641
+ DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
642
+ STORAGE_PUBLIC_URL: http://gateway:8000
643
+ REQUEST_ALLOW_X_FORWARDED_PATH: "true"
644
+ FILE_SIZE_LIMIT: 52428800
645
+ STORAGE_BACKEND: file
646
+ GLOBAL_S3_BUCKET: ${BUCKET_NAME}
647
+ FILE_STORAGE_BACKEND_PATH: /var/lib/storage
648
+ TENANT_ID: stub
649
+ REGION: stub
650
+ ENABLE_IMAGE_TRANSFORMATION: "false"
651
+ IMGPROXY_URL: http://imgproxy:5001
652
+ S3_PROTOCOL_ACCESS_KEY_ID: stub
653
+ S3_PROTOCOL_ACCESS_KEY_SECRET: stub
654
+ volumes:
655
+ - storage-data:/var/lib/storage
656
+
657
+ gateway:
658
+ image: ${NGINX_IMAGE}
659
+ depends_on:
660
+ storage:
661
+ condition: service_started
662
+ rest:
663
+ condition: service_started
664
+ ports:
665
+ - "127.0.0.1:${gatewayPort}:8000"
666
+ volumes:
667
+ - ${path.join(runtimeRoot, "nginx.conf")}:/etc/nginx/nginx.conf:ro
668
+
669
+ volumes:
670
+ storage-data:
671
+ `.trim();
672
+ };
673
+
674
+ const createNginxConfig = () => {
675
+ return `
676
+ events {}
677
+
678
+ http {
679
+ client_max_body_size 100m;
680
+
681
+ server {
682
+ listen 8000;
683
+
684
+ location /rest/v1/ {
685
+ proxy_pass http://rest:3000/;
686
+ proxy_http_version 1.1;
687
+ proxy_set_header Host $host;
688
+ proxy_set_header Authorization $http_authorization;
689
+ proxy_set_header apikey $http_apikey;
690
+ proxy_set_header Content-Profile $http_content_profile;
691
+ proxy_set_header Accept-Profile $http_accept_profile;
692
+ proxy_set_header Prefer $http_prefer;
693
+ proxy_set_header Range $http_range;
694
+ proxy_set_header Range-Unit $http_range_unit;
695
+ proxy_set_header Content-Type $http_content_type;
696
+ }
697
+
698
+ location /storage/v1/ {
699
+ proxy_pass http://storage:5000/;
700
+ proxy_http_version 1.1;
701
+ proxy_set_header Host $host;
702
+ proxy_set_header Authorization $http_authorization;
703
+ proxy_set_header apikey $http_apikey;
704
+ proxy_set_header x-forwarded-path $request_uri;
705
+ proxy_set_header Content-Type $http_content_type;
706
+ proxy_set_header Content-Length $content_length;
707
+ }
708
+ }
709
+ }
710
+ `.trim();
711
+ };
712
+
713
+ const writeSupabaseRuntimeFiles = async ({
714
+ runtimeRoot,
715
+ gatewayPort,
716
+ storageRepoPath,
717
+ }: {
718
+ runtimeRoot: string;
719
+ gatewayPort: number;
720
+ storageRepoPath: string;
721
+ }) => {
722
+ await mkdir(path.join(runtimeRoot, "db-init"), { recursive: true });
723
+ await mkdir(path.join(runtimeRoot, "supabase/edge-functions"), {
724
+ recursive: true,
725
+ });
726
+ await symlink(
727
+ path.join(WORKSPACE_ROOT, "plugins/supabase/src"),
728
+ path.join(runtimeRoot, "src"),
729
+ );
730
+ await symlink(
731
+ path.join(WORKSPACE_ROOT, "plugins/supabase/node_modules"),
732
+ path.join(runtimeRoot, "node_modules"),
733
+ );
734
+
735
+ const transformedEntry = transformEnv(
736
+ path.join(
737
+ WORKSPACE_ROOT,
738
+ "plugins/supabase/supabase/edge-functions/index.ts",
739
+ ),
740
+ {
741
+ FUNCTION_NAME,
742
+ },
743
+ );
744
+ const importMap = {
745
+ imports: {
746
+ "@hot-updater/server/runtime": pathToFileURL(
747
+ path.join(WORKSPACE_ROOT, "packages/server/dist/runtime.mjs"),
748
+ ).href,
749
+ "@hot-updater/supabase": pathToFileURL(
750
+ path.join(runtimeRoot, "hot-updater-supabase-edge.ts"),
751
+ ).href,
752
+ },
753
+ };
754
+
755
+ await writeFile(
756
+ path.join(runtimeRoot, "hot-updater-supabase-edge.ts"),
757
+ `
758
+ export { supabaseEdgeFunctionDatabase } from ${JSON.stringify(pathToFileURL(path.join(WORKSPACE_ROOT, "plugins/supabase/src/supabaseEdgeFunctionDatabase.ts")).href)};
759
+ export { supabaseEdgeFunctionStorage } from ${JSON.stringify(pathToFileURL(path.join(WORKSPACE_ROOT, "plugins/supabase/src/supabaseEdgeFunctionStorage.ts")).href)};
760
+ `.trim(),
761
+ );
762
+ await writeFile(
763
+ path.join(runtimeRoot, "supabase/edge-functions/index.ts"),
764
+ transformedEntry,
765
+ );
766
+ await writeFile(
767
+ path.join(runtimeRoot, "import_map.json"),
768
+ JSON.stringify(importMap),
769
+ );
770
+ await writeFile(
771
+ path.join(runtimeRoot, "db-init/00-init.sql"),
772
+ await loadSupabaseInitSql(storageRepoPath),
773
+ );
774
+ await writeFile(
775
+ path.join(runtimeRoot, "docker-compose.yml"),
776
+ createComposeFile({ runtimeRoot, gatewayPort }),
777
+ );
778
+ await writeFile(path.join(runtimeRoot, "nginx.conf"), createNginxConfig());
779
+ };