@wopr-network/platform-core 1.68.0 → 1.70.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. package/dist/backup/types.d.ts +1 -1
  2. package/dist/db/schema/pool-config.d.ts +41 -0
  3. package/dist/db/schema/pool-config.js +5 -0
  4. package/dist/db/schema/pool-instances.d.ts +126 -0
  5. package/dist/db/schema/pool-instances.js +10 -0
  6. package/dist/server/__tests__/build-container.test.d.ts +1 -0
  7. package/dist/server/__tests__/build-container.test.js +339 -0
  8. package/dist/server/__tests__/container.test.d.ts +1 -0
  9. package/dist/server/__tests__/container.test.js +173 -0
  10. package/dist/server/__tests__/lifecycle.test.d.ts +1 -0
  11. package/dist/server/__tests__/lifecycle.test.js +90 -0
  12. package/dist/server/__tests__/mount-routes.test.d.ts +1 -0
  13. package/dist/server/__tests__/mount-routes.test.js +151 -0
  14. package/dist/server/boot-config.d.ts +51 -0
  15. package/dist/server/boot-config.js +7 -0
  16. package/dist/server/container.d.ts +97 -0
  17. package/dist/server/container.js +148 -0
  18. package/dist/server/index.d.ts +33 -0
  19. package/dist/server/index.js +66 -0
  20. package/dist/server/lifecycle.d.ts +25 -0
  21. package/dist/server/lifecycle.js +56 -0
  22. package/dist/server/middleware/__tests__/admin-auth.test.d.ts +1 -0
  23. package/dist/server/middleware/__tests__/admin-auth.test.js +59 -0
  24. package/dist/server/middleware/__tests__/tenant-proxy.test.d.ts +1 -0
  25. package/dist/server/middleware/__tests__/tenant-proxy.test.js +268 -0
  26. package/dist/server/middleware/admin-auth.d.ts +18 -0
  27. package/dist/server/middleware/admin-auth.js +38 -0
  28. package/dist/server/middleware/tenant-proxy.d.ts +56 -0
  29. package/dist/server/middleware/tenant-proxy.js +162 -0
  30. package/dist/server/mount-routes.d.ts +30 -0
  31. package/dist/server/mount-routes.js +74 -0
  32. package/dist/server/routes/__tests__/admin.test.d.ts +1 -0
  33. package/dist/server/routes/__tests__/admin.test.js +267 -0
  34. package/dist/server/routes/__tests__/crypto-webhook.test.d.ts +1 -0
  35. package/dist/server/routes/__tests__/crypto-webhook.test.js +137 -0
  36. package/dist/server/routes/__tests__/provision-webhook.test.d.ts +1 -0
  37. package/dist/server/routes/__tests__/provision-webhook.test.js +212 -0
  38. package/dist/server/routes/__tests__/stripe-webhook.test.d.ts +1 -0
  39. package/dist/server/routes/__tests__/stripe-webhook.test.js +65 -0
  40. package/dist/server/routes/admin.d.ts +129 -0
  41. package/dist/server/routes/admin.js +294 -0
  42. package/dist/server/routes/crypto-webhook.d.ts +23 -0
  43. package/dist/server/routes/crypto-webhook.js +82 -0
  44. package/dist/server/routes/provision-webhook.d.ts +38 -0
  45. package/dist/server/routes/provision-webhook.js +160 -0
  46. package/dist/server/routes/stripe-webhook.d.ts +10 -0
  47. package/dist/server/routes/stripe-webhook.js +29 -0
  48. package/dist/server/services/hot-pool-claim.d.ts +30 -0
  49. package/dist/server/services/hot-pool-claim.js +92 -0
  50. package/dist/server/services/hot-pool.d.ts +25 -0
  51. package/dist/server/services/hot-pool.js +129 -0
  52. package/dist/server/services/pool-repository.d.ts +44 -0
  53. package/dist/server/services/pool-repository.js +72 -0
  54. package/dist/server/test-container.d.ts +15 -0
  55. package/dist/server/test-container.js +103 -0
  56. package/dist/trpc/auth-helpers.d.ts +17 -0
  57. package/dist/trpc/auth-helpers.js +26 -0
  58. package/dist/trpc/container-factories.d.ts +300 -0
  59. package/dist/trpc/container-factories.js +80 -0
  60. package/dist/trpc/index.d.ts +2 -0
  61. package/dist/trpc/index.js +2 -0
  62. package/drizzle/migrations/0025_hot_pool_tables.sql +29 -0
  63. package/package.json +5 -1
  64. package/src/db/schema/pool-config.ts +6 -0
  65. package/src/db/schema/pool-instances.ts +11 -0
  66. package/src/server/__tests__/build-container.test.ts +402 -0
  67. package/src/server/__tests__/container.test.ts +207 -0
  68. package/src/server/__tests__/lifecycle.test.ts +106 -0
  69. package/src/server/__tests__/mount-routes.test.ts +169 -0
  70. package/src/server/boot-config.ts +84 -0
  71. package/src/server/container.ts +264 -0
  72. package/src/server/index.ts +92 -0
  73. package/src/server/lifecycle.ts +72 -0
  74. package/src/server/middleware/__tests__/admin-auth.test.ts +67 -0
  75. package/src/server/middleware/__tests__/tenant-proxy.test.ts +308 -0
  76. package/src/server/middleware/admin-auth.ts +51 -0
  77. package/src/server/middleware/tenant-proxy.ts +192 -0
  78. package/src/server/mount-routes.ts +113 -0
  79. package/src/server/routes/__tests__/admin.test.ts +320 -0
  80. package/src/server/routes/__tests__/crypto-webhook.test.ts +167 -0
  81. package/src/server/routes/__tests__/provision-webhook.test.ts +323 -0
  82. package/src/server/routes/__tests__/stripe-webhook.test.ts +73 -0
  83. package/src/server/routes/admin.ts +360 -0
  84. package/src/server/routes/crypto-webhook.ts +110 -0
  85. package/src/server/routes/provision-webhook.ts +212 -0
  86. package/src/server/routes/stripe-webhook.ts +36 -0
  87. package/src/server/services/hot-pool-claim.ts +130 -0
  88. package/src/server/services/hot-pool.ts +174 -0
  89. package/src/server/services/pool-repository.ts +107 -0
  90. package/src/server/test-container.ts +120 -0
  91. package/src/trpc/auth-helpers.ts +28 -0
  92. package/src/trpc/container-factories.ts +114 -0
  93. package/src/trpc/index.ts +9 -0
@@ -0,0 +1,92 @@
1
+ /**
2
+ * Atomic hot pool claiming.
3
+ *
4
+ * Uses IPoolRepository for all DB operations. No raw pool.query().
5
+ */
6
+ import { randomBytes } from "node:crypto";
7
+ import { logger } from "../../config/logger.js";
8
+ import { replenishPool } from "./hot-pool.js";
9
+ /**
10
+ * Claim a warm pool instance, rename it, create a fleet profile,
11
+ * and register the proxy route.
12
+ *
13
+ * Returns the claim result on success, or null if the pool is empty.
14
+ */
15
+ export async function claimPoolInstance(container, repo, name, tenantId, _adminUser, config) {
16
+ if (!container.fleet)
17
+ throw new Error("Fleet services required for pool claim");
18
+ const pc = container.productConfig;
19
+ const containerPort = pc.fleet?.containerPort ?? 3100;
20
+ const containerImage = pc.fleet?.containerImage ?? "ghcr.io/wopr-network/platform:latest";
21
+ const platformDomain = pc.product?.domain ?? "localhost";
22
+ const prefix = config?.containerPrefix ?? "wopr";
23
+ // ---- Step 1: Atomically claim a warm instance ----
24
+ const claimed = await repo.claimWarm(tenantId, name);
25
+ if (!claimed)
26
+ return null;
27
+ const { id: instanceId, containerId } = claimed;
28
+ // ---- Step 2: Rename Docker container ----
29
+ const docker = container.fleet.docker;
30
+ const containerName = `${prefix}-${name}`;
31
+ try {
32
+ const c = docker.getContainer(containerId);
33
+ await c.rename({ name: containerName });
34
+ logger.info(`Pool claim: renamed container to ${containerName}`);
35
+ }
36
+ catch (err) {
37
+ logger.error("Pool claim: rename failed", { error: err.message });
38
+ await repo.markDead(instanceId);
39
+ return null;
40
+ }
41
+ // ---- Step 3: Create fleet profile ----
42
+ const serviceKeyRepo = container.fleet.serviceKeyRepo;
43
+ const gatewayKey = serviceKeyRepo ? await serviceKeyRepo.generate(tenantId, instanceId) : crypto.randomUUID();
44
+ const store = container.fleet.profileStore;
45
+ const profile = {
46
+ id: instanceId,
47
+ name,
48
+ tenantId,
49
+ image: containerImage,
50
+ description: `Managed instance: ${name}`,
51
+ env: {
52
+ PORT: String(containerPort),
53
+ HOST: "0.0.0.0",
54
+ NODE_ENV: "production",
55
+ PROVISION_SECRET: config?.provisionSecret ?? "",
56
+ BETTER_AUTH_SECRET: randomBytes(32).toString("hex"),
57
+ DATA_HOME: "/data",
58
+ HOSTED_MODE: "true",
59
+ DEPLOYMENT_MODE: "hosted_proxy",
60
+ DEPLOYMENT_EXPOSURE: "private",
61
+ MIGRATION_AUTO_APPLY: "true",
62
+ GATEWAY_KEY: gatewayKey,
63
+ },
64
+ restartPolicy: "unless-stopped",
65
+ releaseChannel: "stable",
66
+ updatePolicy: "manual",
67
+ };
68
+ await store.save(profile);
69
+ logger.info(`Pool claim: saved fleet profile for ${name} (${instanceId})`);
70
+ // ---- Step 4: Register proxy route ----
71
+ try {
72
+ if (container.fleet.proxy.addRoute) {
73
+ await container.fleet.proxy.addRoute({
74
+ instanceId,
75
+ subdomain: name,
76
+ upstreamHost: containerName,
77
+ upstreamPort: containerPort,
78
+ healthy: true,
79
+ });
80
+ logger.info(`Pool claim: registered proxy route ${name}.${platformDomain}`);
81
+ }
82
+ }
83
+ catch (err) {
84
+ logger.error("Pool claim: proxy route registration failed", { error: err.message });
85
+ }
86
+ // ---- Step 5: Replenish pool in background ----
87
+ replenishPool(container, repo, { provisionSecret: config?.provisionSecret ?? "" }).catch((err) => {
88
+ logger.error("Pool replenish after claim failed", { error: err.message });
89
+ });
90
+ const subdomain = `${name}.${platformDomain}`;
91
+ return { id: instanceId, name, subdomain };
92
+ }
@@ -0,0 +1,25 @@
1
+ /**
2
+ * Hot pool manager — pre-provisions warm containers for instant claiming.
3
+ *
4
+ * Reads desired pool size from DB (`pool_config` table) via IPoolRepository.
5
+ * Periodically replenishes the pool and cleans up dead containers.
6
+ *
7
+ * All config is DB-driven — no env vars for pool size, container image,
8
+ * or port. Admin API updates pool_config, this reads it.
9
+ */
10
+ import type { PlatformContainer } from "../container.js";
11
+ import type { IPoolRepository } from "./pool-repository.js";
12
+ export interface HotPoolConfig {
13
+ /** Shared secret for provision auth between platform and managed instances. */
14
+ provisionSecret: string;
15
+ /** Replenish interval in ms. Default: 60_000. */
16
+ replenishIntervalMs?: number;
17
+ }
18
+ export interface HotPoolHandles {
19
+ replenishTimer: ReturnType<typeof setInterval>;
20
+ stop: () => void;
21
+ }
22
+ export declare function getPoolSize(repo: IPoolRepository): Promise<number>;
23
+ export declare function setPoolSize(repo: IPoolRepository, size: number): Promise<void>;
24
+ export declare function replenishPool(container: PlatformContainer, repo: IPoolRepository, config: HotPoolConfig): Promise<void>;
25
+ export declare function startHotPool(container: PlatformContainer, repo: IPoolRepository, config: HotPoolConfig): Promise<HotPoolHandles>;
@@ -0,0 +1,129 @@
1
+ /**
2
+ * Hot pool manager — pre-provisions warm containers for instant claiming.
3
+ *
4
+ * Reads desired pool size from DB (`pool_config` table) via IPoolRepository.
5
+ * Periodically replenishes the pool and cleans up dead containers.
6
+ *
7
+ * All config is DB-driven — no env vars for pool size, container image,
8
+ * or port. Admin API updates pool_config, this reads it.
9
+ */
10
+ import { logger } from "../../config/logger.js";
11
+ // ---------------------------------------------------------------------------
12
+ // Pool size — delegates to repository
13
+ // ---------------------------------------------------------------------------
14
+ export async function getPoolSize(repo) {
15
+ return repo.getPoolSize();
16
+ }
17
+ export async function setPoolSize(repo, size) {
18
+ return repo.setPoolSize(size);
19
+ }
20
+ // ---------------------------------------------------------------------------
21
+ // Warm container management
22
+ // ---------------------------------------------------------------------------
23
+ async function createWarmContainer(container, repo, config) {
24
+ if (!container.fleet)
25
+ throw new Error("Fleet services required for hot pool");
26
+ const pc = container.productConfig;
27
+ const containerImage = pc.fleet?.containerImage ?? "ghcr.io/wopr-network/platform:latest";
28
+ const containerPort = pc.fleet?.containerPort ?? 3100;
29
+ const provisionSecret = config.provisionSecret;
30
+ const dockerNetwork = pc.fleet?.dockerNetwork ?? "";
31
+ const docker = container.fleet.docker;
32
+ const id = crypto.randomUUID();
33
+ const containerName = `pool-${id.slice(0, 8)}`;
34
+ const volumeName = `pool-${id.slice(0, 8)}`;
35
+ try {
36
+ // Init volume permissions
37
+ const init = await docker.createContainer({
38
+ Image: containerImage,
39
+ Entrypoint: ["/bin/sh", "-c"],
40
+ Cmd: ["chown -R 999:999 /data"],
41
+ User: "root",
42
+ HostConfig: { Binds: [`${volumeName}:/data`] },
43
+ });
44
+ await init.start();
45
+ await init.wait();
46
+ await init.remove();
47
+ const warmContainer = await docker.createContainer({
48
+ Image: containerImage,
49
+ name: containerName,
50
+ Env: [`PORT=${containerPort}`, `PROVISION_SECRET=${provisionSecret}`, "HOME=/data"],
51
+ HostConfig: {
52
+ Binds: [`${volumeName}:/data`],
53
+ RestartPolicy: { Name: "unless-stopped" },
54
+ },
55
+ });
56
+ await warmContainer.start();
57
+ if (dockerNetwork) {
58
+ const network = docker.getNetwork(dockerNetwork);
59
+ await network.connect({ Container: warmContainer.id });
60
+ }
61
+ await repo.insertWarm(id, warmContainer.id);
62
+ logger.info(`Hot pool: created warm container ${containerName} (${id})`);
63
+ }
64
+ catch (err) {
65
+ logger.error("Hot pool: failed to create warm container", {
66
+ error: err.message,
67
+ });
68
+ }
69
+ }
70
+ export async function replenishPool(container, repo, config) {
71
+ const desired = await repo.getPoolSize();
72
+ const current = await repo.warmCount();
73
+ const deficit = desired - current;
74
+ if (deficit <= 0)
75
+ return;
76
+ logger.info(`Hot pool: replenishing ${deficit} container(s) (have ${current}, want ${desired})`);
77
+ for (let i = 0; i < deficit; i++) {
78
+ await createWarmContainer(container, repo, config);
79
+ }
80
+ }
81
+ async function cleanupDead(container, repo) {
82
+ if (!container.fleet)
83
+ return;
84
+ const docker = container.fleet.docker;
85
+ const warmInstances = await repo.listWarm();
86
+ for (const instance of warmInstances) {
87
+ try {
88
+ const c = docker.getContainer(instance.containerId);
89
+ const info = await c.inspect();
90
+ if (!info.State.Running) {
91
+ await repo.markDead(instance.id);
92
+ try {
93
+ await c.remove({ force: true });
94
+ }
95
+ catch {
96
+ /* already gone */
97
+ }
98
+ logger.warn(`Hot pool: marked dead container ${instance.id}`);
99
+ }
100
+ }
101
+ catch {
102
+ await repo.markDead(instance.id);
103
+ logger.warn(`Hot pool: marked missing container ${instance.id} as dead`);
104
+ }
105
+ }
106
+ await repo.deleteDead();
107
+ }
108
+ // ---------------------------------------------------------------------------
109
+ // Lifecycle
110
+ // ---------------------------------------------------------------------------
111
+ export async function startHotPool(container, repo, config) {
112
+ await cleanupDead(container, repo);
113
+ await replenishPool(container, repo, config);
114
+ const intervalMs = config.replenishIntervalMs ?? 60_000;
115
+ const replenishTimer = setInterval(async () => {
116
+ try {
117
+ await cleanupDead(container, repo);
118
+ await replenishPool(container, repo, config);
119
+ }
120
+ catch (err) {
121
+ logger.error("Hot pool tick failed", { error: err.message });
122
+ }
123
+ }, intervalMs);
124
+ logger.info("Hot pool manager started");
125
+ return {
126
+ replenishTimer,
127
+ stop: () => clearInterval(replenishTimer),
128
+ };
129
+ }
@@ -0,0 +1,44 @@
1
+ /**
2
+ * Repository for hot pool database operations.
3
+ *
4
+ * Encapsulates all pool_config and pool_instances queries behind
5
+ * a testable interface. No raw pool.query() outside this file.
6
+ */
7
+ import type { Pool } from "pg";
8
+ export interface PoolInstance {
9
+ id: string;
10
+ containerId: string;
11
+ status: string;
12
+ tenantId: string | null;
13
+ name: string | null;
14
+ }
15
+ export interface IPoolRepository {
16
+ getPoolSize(): Promise<number>;
17
+ setPoolSize(size: number): Promise<void>;
18
+ warmCount(): Promise<number>;
19
+ insertWarm(id: string, containerId: string): Promise<void>;
20
+ listWarm(): Promise<PoolInstance[]>;
21
+ markDead(id: string): Promise<void>;
22
+ deleteDead(): Promise<void>;
23
+ claimWarm(tenantId: string, name: string): Promise<{
24
+ id: string;
25
+ containerId: string;
26
+ } | null>;
27
+ updateInstanceStatus(id: string, status: string): Promise<void>;
28
+ }
29
+ export declare class DrizzlePoolRepository implements IPoolRepository {
30
+ private pool;
31
+ constructor(pool: Pool);
32
+ getPoolSize(): Promise<number>;
33
+ setPoolSize(size: number): Promise<void>;
34
+ warmCount(): Promise<number>;
35
+ insertWarm(id: string, containerId: string): Promise<void>;
36
+ listWarm(): Promise<PoolInstance[]>;
37
+ markDead(id: string): Promise<void>;
38
+ deleteDead(): Promise<void>;
39
+ claimWarm(tenantId: string, name: string): Promise<{
40
+ id: string;
41
+ containerId: string;
42
+ } | null>;
43
+ updateInstanceStatus(id: string, status: string): Promise<void>;
44
+ }
@@ -0,0 +1,72 @@
1
+ /**
2
+ * Repository for hot pool database operations.
3
+ *
4
+ * Encapsulates all pool_config and pool_instances queries behind
5
+ * a testable interface. No raw pool.query() outside this file.
6
+ */
7
+ export class DrizzlePoolRepository {
8
+ pool;
9
+ constructor(pool) {
10
+ this.pool = pool;
11
+ }
12
+ async getPoolSize() {
13
+ try {
14
+ const res = await this.pool.query("SELECT pool_size FROM pool_config WHERE id = 1");
15
+ return res.rows[0]?.pool_size ?? 2;
16
+ }
17
+ catch {
18
+ return 2;
19
+ }
20
+ }
21
+ async setPoolSize(size) {
22
+ await this.pool.query("INSERT INTO pool_config (id, pool_size) VALUES (1, $1) ON CONFLICT (id) DO UPDATE SET pool_size = $1", [size]);
23
+ }
24
+ async warmCount() {
25
+ const res = await this.pool.query("SELECT COUNT(*)::int AS count FROM pool_instances WHERE status = 'warm'");
26
+ return res.rows[0].count;
27
+ }
28
+ async insertWarm(id, containerId) {
29
+ await this.pool.query("INSERT INTO pool_instances (id, container_id, status) VALUES ($1, $2, 'warm')", [
30
+ id,
31
+ containerId,
32
+ ]);
33
+ }
34
+ async listWarm() {
35
+ const res = await this.pool.query("SELECT id, container_id, status, tenant_id, name FROM pool_instances WHERE status = 'warm'");
36
+ return res.rows.map((r) => ({
37
+ id: r.id,
38
+ containerId: r.container_id,
39
+ status: r.status,
40
+ tenantId: r.tenant_id ?? null,
41
+ name: r.name ?? null,
42
+ }));
43
+ }
44
+ async markDead(id) {
45
+ await this.pool.query("UPDATE pool_instances SET status = 'dead' WHERE id = $1", [id]);
46
+ }
47
+ async deleteDead() {
48
+ await this.pool.query("DELETE FROM pool_instances WHERE status = 'dead'");
49
+ }
50
+ async claimWarm(tenantId, name) {
51
+ const res = await this.pool.query(`UPDATE pool_instances
52
+ SET status = 'claimed',
53
+ claimed_at = NOW(),
54
+ tenant_id = $1,
55
+ name = $2
56
+ WHERE id = (
57
+ SELECT id FROM pool_instances
58
+ WHERE status = 'warm'
59
+ ORDER BY created_at ASC
60
+ LIMIT 1
61
+ FOR UPDATE SKIP LOCKED
62
+ )
63
+ RETURNING id, container_id`, [tenantId, name]);
64
+ if (res.rowCount === 0)
65
+ return null;
66
+ const row = res.rows[0];
67
+ return { id: row.id, containerId: row.container_id };
68
+ }
69
+ async updateInstanceStatus(id, status) {
70
+ await this.pool.query("UPDATE pool_instances SET status = $1 WHERE id = $2", [status, id]);
71
+ }
72
+ }
@@ -0,0 +1,15 @@
1
+ /**
2
+ * createTestContainer — builds a PlatformContainer with sensible mock
3
+ * defaults for unit tests. All feature sub-containers default to null.
4
+ * Core services get minimal stubs that satisfy their interfaces.
5
+ *
6
+ * Usage:
7
+ * const c = createTestContainer();
8
+ * const c2 = createTestContainer({ creditLedger: myCustomLedger });
9
+ */
10
+ import type { PlatformContainer } from "./container.js";
11
+ /**
12
+ * Create a PlatformContainer pre-filled with no-op stubs.
13
+ * Pass overrides for any field you need to customize in your test.
14
+ */
15
+ export declare function createTestContainer(overrides?: Partial<PlatformContainer>): PlatformContainer;
@@ -0,0 +1,103 @@
1
+ /**
2
+ * createTestContainer — builds a PlatformContainer with sensible mock
3
+ * defaults for unit tests. All feature sub-containers default to null.
4
+ * Core services get minimal stubs that satisfy their interfaces.
5
+ *
6
+ * Usage:
7
+ * const c = createTestContainer();
8
+ * const c2 = createTestContainer({ creditLedger: myCustomLedger });
9
+ */
10
+ // ---------------------------------------------------------------------------
11
+ // Stub factories (satisfy interface contracts with no-op implementations)
12
+ // ---------------------------------------------------------------------------
13
+ function stubLedger() {
14
+ const zero = 0;
15
+ const emptyEntry = {};
16
+ return {
17
+ post: async () => emptyEntry,
18
+ credit: async () => emptyEntry,
19
+ debit: async () => emptyEntry,
20
+ balance: async () => zero,
21
+ hasReferenceId: async () => false,
22
+ history: async () => [],
23
+ tenantsWithBalance: async () => [],
24
+ memberUsage: async () => [],
25
+ lifetimeSpend: async () => zero,
26
+ lifetimeSpendBatch: async () => new Map(),
27
+ expiredCredits: async () => [],
28
+ trialBalance: async () => ({ balanced: true }),
29
+ accountBalance: async () => zero,
30
+ seedSystemAccounts: async () => { },
31
+ existsByReferenceIdLike: async () => false,
32
+ sumPurchasesForPeriod: async () => zero,
33
+ getActiveTenantIdsInWindow: async () => [],
34
+ debitCapped: async () => null,
35
+ };
36
+ }
37
+ function stubOrgMemberRepo() {
38
+ return {
39
+ listMembers: async () => [],
40
+ addMember: async () => { },
41
+ updateMemberRole: async () => { },
42
+ removeMember: async () => { },
43
+ findMember: async () => null,
44
+ countAdminsAndOwners: async () => 0,
45
+ listInvites: async () => [],
46
+ createInvite: async () => { },
47
+ findInviteById: async () => null,
48
+ findInviteByToken: async () => null,
49
+ deleteInvite: async () => { },
50
+ deleteAllMembers: async () => { },
51
+ deleteAllInvites: async () => { },
52
+ listOrgsByUser: async () => [],
53
+ markInviteAccepted: async () => { },
54
+ };
55
+ }
56
+ function stubUserRoleRepo() {
57
+ return {
58
+ getTenantIdByUserId: async () => null,
59
+ grantRole: async () => { },
60
+ revokeRole: async () => false,
61
+ listRolesByUser: async () => [],
62
+ listUsersByRole: async () => [],
63
+ isPlatformAdmin: async () => false,
64
+ };
65
+ }
66
+ function stubProductConfig() {
67
+ return {
68
+ product: {
69
+ slug: "test",
70
+ name: "Test Product",
71
+ },
72
+ navItems: [],
73
+ domains: [],
74
+ features: null,
75
+ fleet: null,
76
+ billing: null,
77
+ };
78
+ }
79
+ // ---------------------------------------------------------------------------
80
+ // Public API
81
+ // ---------------------------------------------------------------------------
82
+ /**
83
+ * Create a PlatformContainer pre-filled with no-op stubs.
84
+ * Pass overrides for any field you need to customize in your test.
85
+ */
86
+ export function createTestContainer(overrides) {
87
+ const defaults = {
88
+ db: {},
89
+ pool: { end: async () => { } },
90
+ productConfig: stubProductConfig(),
91
+ creditLedger: stubLedger(),
92
+ orgMemberRepo: stubOrgMemberRepo(),
93
+ orgService: {},
94
+ userRoleRepo: stubUserRoleRepo(),
95
+ // Feature sub-containers default to null (not enabled)
96
+ fleet: null,
97
+ crypto: null,
98
+ stripe: null,
99
+ gateway: null,
100
+ hotPool: null,
101
+ };
102
+ return { ...defaults, ...overrides };
103
+ }
@@ -0,0 +1,17 @@
1
+ /**
2
+ * Shared auth helper factories for tRPC routers.
3
+ *
4
+ * These helpers can be constructed with explicit deps (for container-based DI)
5
+ * instead of relying on module-level singletons.
6
+ */
7
+ import type { IOrgMemberRepository } from "../tenancy/org-member-repository.js";
8
+ /**
9
+ * Creates an assertOrgAdminOrOwner function closed over the given repository.
10
+ *
11
+ * Usage:
12
+ * ```ts
13
+ * const assertOrgAdmin = createAssertOrgAdminOrOwner(container.orgMemberRepo);
14
+ * await assertOrgAdmin(tenantId, userId);
15
+ * ```
16
+ */
17
+ export declare function createAssertOrgAdminOrOwner(orgMemberRepo: IOrgMemberRepository): (tenantId: string, userId: string) => Promise<void>;
@@ -0,0 +1,26 @@
1
+ /**
2
+ * Shared auth helper factories for tRPC routers.
3
+ *
4
+ * These helpers can be constructed with explicit deps (for container-based DI)
5
+ * instead of relying on module-level singletons.
6
+ */
7
+ import { TRPCError } from "@trpc/server";
8
+ /**
9
+ * Creates an assertOrgAdminOrOwner function closed over the given repository.
10
+ *
11
+ * Usage:
12
+ * ```ts
13
+ * const assertOrgAdmin = createAssertOrgAdminOrOwner(container.orgMemberRepo);
14
+ * await assertOrgAdmin(tenantId, userId);
15
+ * ```
16
+ */
17
+ export function createAssertOrgAdminOrOwner(orgMemberRepo) {
18
+ return async function assertOrgAdminOrOwner(tenantId, userId) {
19
+ if (tenantId === userId)
20
+ return;
21
+ const member = await orgMemberRepo.findMember(tenantId, userId);
22
+ if (!member || (member.role !== "owner" && member.role !== "admin")) {
23
+ throw new TRPCError({ code: "FORBIDDEN", message: "Organization admin access required" });
24
+ }
25
+ };
26
+ }