@powerhousedao/switchboard 6.0.0-dev.20 → 6.0.0-dev.200

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/Auth.md +45 -27
  2. package/CHANGELOG.md +1573 -5
  3. package/README.md +13 -12
  4. package/dist/index.d.mts +1 -0
  5. package/dist/index.mjs +129 -0
  6. package/dist/index.mjs.map +1 -0
  7. package/dist/install-packages.d.mts +1 -0
  8. package/dist/install-packages.mjs +31 -0
  9. package/dist/install-packages.mjs.map +1 -0
  10. package/dist/migrate.d.mts +1 -0
  11. package/dist/migrate.mjs +55 -0
  12. package/dist/migrate.mjs.map +1 -0
  13. package/dist/server-BMtyzhoR.mjs +291 -0
  14. package/dist/server-BMtyzhoR.mjs.map +1 -0
  15. package/dist/server.d.mts +93 -0
  16. package/dist/server.d.mts.map +1 -0
  17. package/dist/server.mjs +4 -0
  18. package/dist/utils-DFl0ezBT.mjs +44 -0
  19. package/dist/utils-DFl0ezBT.mjs.map +1 -0
  20. package/dist/utils.d.mts +9 -0
  21. package/dist/utils.d.mts.map +1 -0
  22. package/dist/utils.mjs +2 -0
  23. package/package.json +53 -39
  24. package/test/metrics.test.ts +202 -0
  25. package/tsconfig.json +11 -9
  26. package/tsdown.config.ts +16 -0
  27. package/vitest.config.ts +11 -0
  28. package/Dockerfile +0 -86
  29. package/dist/src/clients/redis.d.ts +0 -5
  30. package/dist/src/clients/redis.d.ts.map +0 -1
  31. package/dist/src/clients/redis.js +0 -48
  32. package/dist/src/clients/redis.js.map +0 -1
  33. package/dist/src/config.d.ts +0 -12
  34. package/dist/src/config.d.ts.map +0 -1
  35. package/dist/src/config.js +0 -33
  36. package/dist/src/config.js.map +0 -1
  37. package/dist/src/connect-crypto.d.ts +0 -41
  38. package/dist/src/connect-crypto.d.ts.map +0 -1
  39. package/dist/src/connect-crypto.js +0 -127
  40. package/dist/src/connect-crypto.js.map +0 -1
  41. package/dist/src/feature-flags.d.ts +0 -2
  42. package/dist/src/feature-flags.d.ts.map +0 -1
  43. package/dist/src/feature-flags.js +0 -9
  44. package/dist/src/feature-flags.js.map +0 -1
  45. package/dist/src/index.d.ts +0 -3
  46. package/dist/src/index.d.ts.map +0 -1
  47. package/dist/src/index.js +0 -21
  48. package/dist/src/index.js.map +0 -1
  49. package/dist/src/install-packages.d.ts +0 -2
  50. package/dist/src/install-packages.d.ts.map +0 -1
  51. package/dist/src/install-packages.js +0 -36
  52. package/dist/src/install-packages.js.map +0 -1
  53. package/dist/src/migrate.d.ts +0 -3
  54. package/dist/src/migrate.d.ts.map +0 -1
  55. package/dist/src/migrate.js +0 -65
  56. package/dist/src/migrate.js.map +0 -1
  57. package/dist/src/profiler.d.ts +0 -4
  58. package/dist/src/profiler.d.ts.map +0 -1
  59. package/dist/src/profiler.js +0 -17
  60. package/dist/src/profiler.js.map +0 -1
  61. package/dist/src/server.d.ts +0 -6
  62. package/dist/src/server.d.ts.map +0 -1
  63. package/dist/src/server.js +0 -304
  64. package/dist/src/server.js.map +0 -1
  65. package/dist/src/types.d.ts +0 -64
  66. package/dist/src/types.d.ts.map +0 -1
  67. package/dist/src/types.js +0 -2
  68. package/dist/src/types.js.map +0 -1
  69. package/dist/src/utils.d.ts +0 -6
  70. package/dist/src/utils.d.ts.map +0 -1
  71. package/dist/src/utils.js +0 -92
  72. package/dist/src/utils.js.map +0 -1
  73. package/dist/tsconfig.tsbuildinfo +0 -1
  74. package/entrypoint.sh +0 -17
package/README.md CHANGED
@@ -50,6 +50,7 @@ docker compose -f packages/reactor/docker-compose.yml up -d
50
50
  ```
51
51
 
52
52
  This starts:
53
+
53
54
  - PostgreSQL on port `5433` (mapped from container port 5432)
54
55
  - Adminer (database UI) on port `8080`
55
56
 
@@ -94,22 +95,21 @@ pnpm add -g @powerhousedao/switchboard
94
95
 
95
96
  ## 🏃‍♂️ Quick Start
96
97
 
97
-
98
98
  ## ⚙️ Configuration
99
99
 
100
100
  ### Environment Variables
101
101
 
102
- | Variable | Description | Default |
103
- | ---------------------------- | ---------------------------------- | --------------------- |
104
- | `PORT` | Server port | `4001` |
105
- | `DATABASE_URL` | Database connection string | `./.ph/drive-storage` |
106
- | `PH_REACTOR_DATABASE_URL` | PostgreSQL URL (takes precedence) | - |
107
- | `REDIS_URL` | Redis connection URL | - |
108
- | `REDIS_TLS_URL` | Redis TLS connection URL | - |
109
- | `SENTRY_DSN` | Sentry DSN for error tracking | - |
110
- | `SENTRY_ENV` | Sentry environment | - |
111
- | `PYROSCOPE_SERVER_ADDRESS` | Pyroscope server address | - |
112
- | `FEATURE_REACTORV2_ENABLED` | Enable Reactor v2 subgraph feature | `false` |
102
+ | Variable | Description | Default |
103
+ | --------------------------- | ---------------------------------- | --------------------- |
104
+ | `PORT` | Server port | `4001` |
105
+ | `DATABASE_URL` | Database connection string | `./.ph/drive-storage` |
106
+ | `PH_REACTOR_DATABASE_URL` | PostgreSQL URL (takes precedence) | - |
107
+ | `REDIS_URL` | Redis connection URL | - |
108
+ | `REDIS_TLS_URL` | Redis TLS connection URL | - |
109
+ | `SENTRY_DSN` | Sentry DSN for error tracking | - |
110
+ | `SENTRY_ENV` | Sentry environment | - |
111
+ | `PYROSCOPE_SERVER_ADDRESS` | Pyroscope server address | - |
112
+ | `FEATURE_REACTORV2_ENABLED` | Enable Reactor v2 subgraph feature | `false` |
113
113
 
114
114
  ### Authentication Configuration
115
115
 
@@ -245,6 +245,7 @@ ph switchboard --db-path postgresql://user:pass@localhost:5432/db --migrate-stat
245
245
  #### Environment Variables for Migrations
246
246
 
247
247
  The migration commands check for a PostgreSQL URL in this order:
248
+
248
249
  1. `PH_REACTOR_DATABASE_URL`
249
250
  2. `DATABASE_URL`
250
251
  3. Config file (`powerhouse.config.json` -> `switchboard.database.url`)
@@ -0,0 +1 @@
1
+ export { };
package/dist/index.mjs ADDED
@@ -0,0 +1,129 @@
1
+ #!/usr/bin/env node
2
+ import { n as startSwitchboard } from "./server-BMtyzhoR.mjs";
3
+ import "./utils-DFl0ezBT.mjs";
4
+ import * as Sentry from "@sentry/node";
5
+ import { childLogger } from "document-model";
6
+ import dotenv from "dotenv";
7
+ import { getConfig } from "@powerhousedao/config/node";
8
+ import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http";
9
+ import { Resource } from "@opentelemetry/resources";
10
+ import { MeterProvider, PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
11
+ //#region src/config.ts
12
+ dotenv.config();
13
+ const { switchboard } = getConfig();
14
+ const config = {
15
+ database: { url: process.env.PH_SWITCHBOARD_DATABASE_URL ?? switchboard?.database?.url ?? "dev.db" },
16
+ port: process.env.PH_SWITCHBOARD_PORT && !isNaN(Number(process.env.PH_SWITCHBOARD_PORT)) ? Number(process.env.PH_SWITCHBOARD_PORT) : switchboard?.port ?? 4001,
17
+ mcp: true,
18
+ drive: {
19
+ id: "powerhouse",
20
+ slug: "powerhouse",
21
+ global: {
22
+ name: "Powerhouse",
23
+ icon: "https://ipfs.io/ipfs/QmcaTDBYn8X2psGaXe7iQ6qd8q6oqHLgxvMX9yXf7f9uP7"
24
+ },
25
+ local: {
26
+ availableOffline: true,
27
+ listeners: [],
28
+ sharingType: "public",
29
+ triggers: []
30
+ }
31
+ }
32
+ };
33
+ //#endregion
34
+ //#region src/metrics.ts
35
+ const logger$1 = childLogger(["switchboard", "metrics"]);
36
+ function createMeterProviderFromEnv(env) {
37
+ const endpoint = env.OTEL_EXPORTER_OTLP_ENDPOINT;
38
+ if (!endpoint) return void 0;
39
+ const parsed = parseInt(env.OTEL_METRIC_EXPORT_INTERVAL ?? "", 10);
40
+ const exportIntervalMillis = Number.isFinite(parsed) && parsed > 0 ? parsed : 5e3;
41
+ const base = endpoint.replace(/\/$/, "");
42
+ const exporterUrl = base.endsWith("/v1/metrics") ? base : `${base}/v1/metrics`;
43
+ logger$1.info(`Initializing OpenTelemetry metrics exporter at: ${endpoint}`);
44
+ const meterProvider = new MeterProvider({
45
+ resource: new Resource({ "service.name": env.OTEL_SERVICE_NAME ?? "switchboard" }),
46
+ readers: [new PeriodicExportingMetricReader({
47
+ exporter: new OTLPMetricExporter({ url: exporterUrl }),
48
+ exportIntervalMillis,
49
+ exportTimeoutMillis: Math.max(exportIntervalMillis - 250, 1)
50
+ })]
51
+ });
52
+ logger$1.info(`Metrics export enabled (interval: ${exportIntervalMillis}ms)`);
53
+ return meterProvider;
54
+ }
55
+ //#endregion
56
+ //#region src/profiler.ts
57
+ async function initProfilerFromEnv(env) {
58
+ const { PYROSCOPE_SERVER_ADDRESS: serverAddress, PYROSCOPE_APPLICATION_NAME: appName, PYROSCOPE_USER: basicAuthUser, PYROSCOPE_PASSWORD: basicAuthPassword, PYROSCOPE_WALL_ENABLED: wallEnabled, PYROSCOPE_HEAP_ENABLED: heapEnabled } = env;
59
+ return initProfiler({
60
+ serverAddress,
61
+ appName,
62
+ basicAuthUser,
63
+ basicAuthPassword,
64
+ wall: {
65
+ samplingDurationMs: 1e4,
66
+ samplingIntervalMicros: 1e4,
67
+ collectCpuTime: true
68
+ },
69
+ heap: {
70
+ samplingIntervalBytes: 512 * 1024,
71
+ stackDepth: 64
72
+ }
73
+ }, {
74
+ wallEnabled: wallEnabled !== "false",
75
+ heapEnabled: heapEnabled === "true"
76
+ });
77
+ }
78
+ async function initProfiler(options, flags = {
79
+ wallEnabled: true,
80
+ heapEnabled: false
81
+ }) {
82
+ console.log("Initializing Pyroscope profiler at:", options?.serverAddress);
83
+ console.log(" Wall profiling:", flags.wallEnabled ? "enabled" : "disabled");
84
+ console.log(" Heap profiling:", flags.heapEnabled ? "enabled" : "disabled");
85
+ const { default: Pyroscope } = await import("@pyroscope/nodejs");
86
+ Pyroscope.init(options);
87
+ if (flags.wallEnabled) Pyroscope.startWallProfiling();
88
+ Pyroscope.startCpuProfiling();
89
+ if (flags.heapEnabled) Pyroscope.startHeapProfiling();
90
+ }
91
+ //#endregion
92
+ //#region src/index.mts
93
+ const logger = childLogger(["switchboard"]);
94
+ function ensureNodeVersion(minVersion = "24") {
95
+ const version = process.versions.node;
96
+ if (!version) return;
97
+ if (version < minVersion) {
98
+ console.error(`Node version ${minVersion} or higher is required. Current version: ${version}`);
99
+ process.exit(1);
100
+ }
101
+ }
102
+ ensureNodeVersion("24");
103
+ process.setMaxListeners(0);
104
+ const meterProvider = createMeterProviderFromEnv({
105
+ OTEL_EXPORTER_OTLP_ENDPOINT: process.env.OTEL_EXPORTER_OTLP_ENDPOINT,
106
+ OTEL_METRIC_EXPORT_INTERVAL: process.env.OTEL_METRIC_EXPORT_INTERVAL,
107
+ OTEL_SERVICE_NAME: process.env.OTEL_SERVICE_NAME
108
+ });
109
+ async function shutdown() {
110
+ console.log("\nShutting down...");
111
+ await Promise.race([meterProvider?.shutdown().catch(() => void 0), new Promise((resolve) => setTimeout(resolve, 5e3))]);
112
+ process.exit(0);
113
+ }
114
+ process.on("SIGINT", shutdown);
115
+ process.on("SIGTERM", shutdown);
116
+ if (process.env.PYROSCOPE_SERVER_ADDRESS) try {
117
+ await initProfilerFromEnv(process.env);
118
+ } catch (e) {
119
+ Sentry.captureException(e);
120
+ logger.error("Error starting profiler: @error", e);
121
+ }
122
+ startSwitchboard({
123
+ ...config,
124
+ meterProvider
125
+ }).catch(console.error);
126
+ //#endregion
127
+ export {};
128
+
129
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.mjs","names":["logger"],"sources":["../src/config.ts","../src/metrics.ts","../src/profiler.ts","../src/index.mts"],"sourcesContent":["import dotenv from \"dotenv\";\ndotenv.config();\n\nimport { getConfig } from \"@powerhousedao/config/node\";\nimport type { DriveInput } from \"@powerhousedao/shared/document-drive\";\nconst phConfig = getConfig();\nconst { switchboard } = phConfig;\ninterface Config {\n database: {\n url: string;\n };\n port: number;\n mcp: boolean;\n drive: DriveInput;\n}\nexport const config: Config = {\n database: {\n // url: process.env.PH_SWITCHBOARD_DATABASE_URL ?? switchboard?.database?.url ?? \"dev.db\",\n url:\n process.env.PH_SWITCHBOARD_DATABASE_URL ??\n switchboard?.database?.url ??\n \"dev.db\",\n },\n port:\n process.env.PH_SWITCHBOARD_PORT &&\n !isNaN(Number(process.env.PH_SWITCHBOARD_PORT))\n ? Number(process.env.PH_SWITCHBOARD_PORT)\n : (switchboard?.port ?? 4001),\n mcp: true,\n drive: {\n id: \"powerhouse\",\n slug: \"powerhouse\",\n global: {\n name: \"Powerhouse\",\n icon: \"https://ipfs.io/ipfs/QmcaTDBYn8X2psGaXe7iQ6qd8q6oqHLgxvMX9yXf7f9uP7\",\n },\n local: {\n availableOffline: true,\n listeners: [],\n sharingType: \"public\",\n triggers: [],\n },\n },\n};\n","import { OTLPMetricExporter } from \"@opentelemetry/exporter-metrics-otlp-http\";\nimport { Resource } from \"@opentelemetry/resources\";\nimport {\n MeterProvider,\n PeriodicExportingMetricReader,\n} from \"@opentelemetry/sdk-metrics\";\nimport { childLogger } from \"document-model\";\n\nconst logger = childLogger([\"switchboard\", \"metrics\"]);\n\nexport function createMeterProviderFromEnv(env: {\n OTEL_EXPORTER_OTLP_ENDPOINT?: string;\n OTEL_METRIC_EXPORT_INTERVAL?: string;\n OTEL_SERVICE_NAME?: string;\n}): MeterProvider | undefined {\n const endpoint = env.OTEL_EXPORTER_OTLP_ENDPOINT;\n if (!endpoint) return undefined;\n\n const parsed = parseInt(env.OTEL_METRIC_EXPORT_INTERVAL ?? \"\", 10);\n const exportIntervalMillis =\n Number.isFinite(parsed) && parsed > 0 ? parsed : 5_000;\n\n const base = endpoint.replace(/\\/$/, \"\");\n const exporterUrl = base.endsWith(\"/v1/metrics\")\n ? base\n : `${base}/v1/metrics`;\n\n logger.info(`Initializing OpenTelemetry metrics exporter at: ${endpoint}`);\n const meterProvider = new MeterProvider({\n resource: new Resource({\n \"service.name\": env.OTEL_SERVICE_NAME ?? \"switchboard\",\n }),\n readers: [\n new PeriodicExportingMetricReader({\n exporter: new OTLPMetricExporter({\n url: exporterUrl,\n }),\n exportIntervalMillis,\n exportTimeoutMillis: Math.max(exportIntervalMillis - 250, 1),\n }),\n ],\n });\n logger.info(`Metrics export enabled (interval: ${exportIntervalMillis}ms)`);\n return meterProvider;\n}\n","import type { PyroscopeConfig } from \"@pyroscope/nodejs\";\n\nexport async function initProfilerFromEnv(env: typeof process.env) {\n const {\n PYROSCOPE_SERVER_ADDRESS: serverAddress,\n PYROSCOPE_APPLICATION_NAME: appName,\n PYROSCOPE_USER: basicAuthUser,\n PYROSCOPE_PASSWORD: basicAuthPassword,\n PYROSCOPE_WALL_ENABLED: wallEnabled,\n PYROSCOPE_HEAP_ENABLED: heapEnabled,\n } = env;\n\n const options: PyroscopeConfig = {\n serverAddress,\n appName,\n basicAuthUser,\n basicAuthPassword,\n // Wall profiling captures wall-clock time (includes async I/O waits)\n // This shows GraphQL resolvers even when waiting for database\n wall: {\n samplingDurationMs: 10000, // 10 second sampling windows\n samplingIntervalMicros: 10000, // 10ms sampling interval (100 samples/sec)\n collectCpuTime: true, // Also collect CPU time alongside wall time\n },\n // Heap profiling for memory allocation tracking\n heap: {\n samplingIntervalBytes: 512 * 1024, // Sample every 512KB allocated\n stackDepth: 64, // Capture deeper stacks for better context\n },\n };\n return initProfiler(options, {\n wallEnabled: wallEnabled !== \"false\",\n heapEnabled: heapEnabled === \"true\",\n });\n}\n\ninterface ProfilerFlags {\n wallEnabled?: boolean;\n heapEnabled?: boolean;\n}\n\nexport async function initProfiler(\n options?: PyroscopeConfig,\n flags: ProfilerFlags = { wallEnabled: true, heapEnabled: false },\n) {\n console.log(\"Initializing Pyroscope profiler at:\", options?.serverAddress);\n console.log(\" Wall profiling:\", flags.wallEnabled ? \"enabled\" : \"disabled\");\n console.log(\" Heap profiling:\", flags.heapEnabled ? \"enabled\" : \"disabled\");\n\n const { default: Pyroscope } = await import(\"@pyroscope/nodejs\");\n Pyroscope.init(options);\n\n // Start wall profiling (captures async I/O time - shows resolvers)\n if (flags.wallEnabled) {\n Pyroscope.startWallProfiling();\n }\n\n // Start CPU profiling (captures CPU-bound work)\n Pyroscope.startCpuProfiling();\n\n // Optionally start heap profiling (memory allocations)\n if (flags.heapEnabled) {\n Pyroscope.startHeapProfiling();\n }\n}\n","#!/usr/bin/env node\nimport * as Sentry from \"@sentry/node\";\nimport { childLogger } from \"document-model\";\nimport { config } from \"./config.js\";\nimport { createMeterProviderFromEnv } from \"./metrics.js\";\nimport { initProfilerFromEnv } from \"./profiler.js\";\nimport { startSwitchboard } from \"./server.mjs\";\n\nconst logger = childLogger([\"switchboard\"]);\n\nfunction ensureNodeVersion(minVersion = \"24\") {\n const version = process.versions.node;\n if (!version) {\n return;\n }\n\n if (version < minVersion) {\n console.error(\n `Node version ${minVersion} or higher is required. Current version: ${version}`,\n );\n process.exit(1);\n }\n}\n// Ensure minimum Node.js version\nensureNodeVersion(\"24\");\n\n// Each subgraph registers its own SIGINT/SIGTERM listeners, and the count\n// scales with dynamically-loaded document models beyond the default cap of 10.\nprocess.setMaxListeners(0);\n\nconst meterProvider = createMeterProviderFromEnv({\n OTEL_EXPORTER_OTLP_ENDPOINT: process.env.OTEL_EXPORTER_OTLP_ENDPOINT,\n OTEL_METRIC_EXPORT_INTERVAL: process.env.OTEL_METRIC_EXPORT_INTERVAL,\n OTEL_SERVICE_NAME: process.env.OTEL_SERVICE_NAME,\n});\n\nasync function shutdown() {\n console.log(\"\\nShutting down...\");\n // Flush final metrics before exit. Races against a 5s deadline so an\n // unresponsive OTLP endpoint cannot exhaust terminationGracePeriodSeconds.\n await Promise.race([\n meterProvider?.shutdown().catch(() => undefined),\n new Promise<void>((resolve) => setTimeout(resolve, 5_000)),\n ]);\n process.exit(0);\n}\n\n// SIGINT: Ctrl-C in development; SIGTERM: graceful shutdown in Docker/Kubernetes\nprocess.on(\"SIGINT\", shutdown);\nprocess.on(\"SIGTERM\", shutdown);\n\nif (process.env.PYROSCOPE_SERVER_ADDRESS) {\n try {\n await initProfilerFromEnv(process.env);\n } catch (e) {\n Sentry.captureException(e);\n logger.error(\"Error starting profiler: @error\", e);\n }\n}\n\nstartSwitchboard({ ...config, meterProvider }).catch(console.error);\n"],"mappings":";;;;;;;;;;;AACA,OAAO,QAAQ;AAKf,MAAM,EAAE,gBADS,WAAW;AAU5B,MAAa,SAAiB;CAC5B,UAAU,EAER,KACE,QAAQ,IAAI,+BACZ,aAAa,UAAU,OACvB,UACH;CACD,MACE,QAAQ,IAAI,uBACZ,CAAC,MAAM,OAAO,QAAQ,IAAI,oBAAoB,CAAC,GAC3C,OAAO,QAAQ,IAAI,oBAAoB,GACtC,aAAa,QAAQ;CAC5B,KAAK;CACL,OAAO;EACL,IAAI;EACJ,MAAM;EACN,QAAQ;GACN,MAAM;GACN,MAAM;GACP;EACD,OAAO;GACL,kBAAkB;GAClB,WAAW,EAAE;GACb,aAAa;GACb,UAAU,EAAE;GACb;EACF;CACF;;;ACnCD,MAAMA,WAAS,YAAY,CAAC,eAAe,UAAU,CAAC;AAEtD,SAAgB,2BAA2B,KAIb;CAC5B,MAAM,WAAW,IAAI;AACrB,KAAI,CAAC,SAAU,QAAO,KAAA;CAEtB,MAAM,SAAS,SAAS,IAAI,+BAA+B,IAAI,GAAG;CAClE,MAAM,uBACJ,OAAO,SAAS,OAAO,IAAI,SAAS,IAAI,SAAS;CAEnD,MAAM,OAAO,SAAS,QAAQ,OAAO,GAAG;CACxC,MAAM,cAAc,KAAK,SAAS,cAAc,GAC5C,OACA,GAAG,KAAK;AAEZ,UAAO,KAAK,mDAAmD,WAAW;CAC1E,MAAM,gBAAgB,IAAI,cAAc;EACtC,UAAU,IAAI,SAAS,EACrB,gBAAgB,IAAI,qBAAqB,eAC1C,CAAC;EACF,SAAS,CACP,IAAI,8BAA8B;GAChC,UAAU,IAAI,mBAAmB,EAC/B,KAAK,aACN,CAAC;GACF;GACA,qBAAqB,KAAK,IAAI,uBAAuB,KAAK,EAAE;GAC7D,CAAC,CACH;EACF,CAAC;AACF,UAAO,KAAK,qCAAqC,qBAAqB,KAAK;AAC3E,QAAO;;;;ACzCT,eAAsB,oBAAoB,KAAyB;CACjE,MAAM,EACJ,0BAA0B,eAC1B,4BAA4B,SAC5B,gBAAgB,eAChB,oBAAoB,mBACpB,wBAAwB,aACxB,wBAAwB,gBACtB;AAoBJ,QAAO,aAlB0B;EAC/B;EACA;EACA;EACA;EAGA,MAAM;GACJ,oBAAoB;GACpB,wBAAwB;GACxB,gBAAgB;GACjB;EAED,MAAM;GACJ,uBAAuB,MAAM;GAC7B,YAAY;GACb;EACF,EAC4B;EAC3B,aAAa,gBAAgB;EAC7B,aAAa,gBAAgB;EAC9B,CAAC;;AAQJ,eAAsB,aACpB,SACA,QAAuB;CAAE,aAAa;CAAM,aAAa;CAAO,EAChE;AACA,SAAQ,IAAI,uCAAuC,SAAS,cAAc;AAC1E,SAAQ,IAAI,qBAAqB,MAAM,cAAc,YAAY,WAAW;AAC5E,SAAQ,IAAI,qBAAqB,MAAM,cAAc,YAAY,WAAW;CAE5E,MAAM,EAAE,SAAS,cAAc,MAAM,OAAO;AAC5C,WAAU,KAAK,QAAQ;AAGvB,KAAI,MAAM,YACR,WAAU,oBAAoB;AAIhC,WAAU,mBAAmB;AAG7B,KAAI,MAAM,YACR,WAAU,oBAAoB;;;;ACtDlC,MAAM,SAAS,YAAY,CAAC,cAAc,CAAC;AAE3C,SAAS,kBAAkB,aAAa,MAAM;CAC5C,MAAM,UAAU,QAAQ,SAAS;AACjC,KAAI,CAAC,QACH;AAGF,KAAI,UAAU,YAAY;AACxB,UAAQ,MACN,gBAAgB,WAAW,2CAA2C,UACvE;AACD,UAAQ,KAAK,EAAE;;;AAInB,kBAAkB,KAAK;AAIvB,QAAQ,gBAAgB,EAAE;AAE1B,MAAM,gBAAgB,2BAA2B;CAC/C,6BAA6B,QAAQ,IAAI;CACzC,6BAA6B,QAAQ,IAAI;CACzC,mBAAmB,QAAQ,IAAI;CAChC,CAAC;AAEF,eAAe,WAAW;AACxB,SAAQ,IAAI,qBAAqB;AAGjC,OAAM,QAAQ,KAAK,CACjB,eAAe,UAAU,CAAC,YAAY,KAAA,EAAU,EAChD,IAAI,SAAe,YAAY,WAAW,SAAS,IAAM,CAAC,CAC3D,CAAC;AACF,SAAQ,KAAK,EAAE;;AAIjB,QAAQ,GAAG,UAAU,SAAS;AAC9B,QAAQ,GAAG,WAAW,SAAS;AAE/B,IAAI,QAAQ,IAAI,yBACd,KAAI;AACF,OAAM,oBAAoB,QAAQ,IAAI;SAC/B,GAAG;AACV,QAAO,iBAAiB,EAAE;AAC1B,QAAO,MAAM,mCAAmC,EAAE;;AAItD,iBAAiB;CAAE,GAAG;CAAQ;CAAe,CAAC,CAAC,MAAM,QAAQ,MAAM"}
@@ -0,0 +1 @@
1
+ export { };
@@ -0,0 +1,31 @@
1
+ import path from "path";
2
+ import { execSync } from "child_process";
3
+ import fs from "fs";
4
+ //#region src/install-packages.mts
5
+ const pkgs = process.env.PH_PACKAGES?.split(",") || [];
6
+ if (pkgs.length === 0 || pkgs.length === 1 && pkgs[0] === "") process.exit(0);
7
+ try {
8
+ const packageJsonPath = path.join(process.cwd(), "package.json");
9
+ const packageJsonContent = fs.readFileSync(packageJsonPath, "utf-8");
10
+ const packageJson = JSON.parse(packageJsonContent);
11
+ const installedDependencies = {
12
+ ...packageJson.dependencies || {},
13
+ ...packageJson.devDependencies || {}
14
+ };
15
+ for (const pkg of pkgs) {
16
+ if (pkg === "") continue;
17
+ if (installedDependencies[pkg]) {
18
+ console.log(`> Package ${pkg} is already installed, skipping`);
19
+ continue;
20
+ }
21
+ console.log(`> Installing ${pkg}`);
22
+ execSync(`pnpm add ${pkg}@latest`, { stdio: "inherit" });
23
+ }
24
+ } catch (error) {
25
+ console.error("Error in package installation:", error);
26
+ process.exit(1);
27
+ }
28
+ //#endregion
29
+ export {};
30
+
31
+ //# sourceMappingURL=install-packages.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"install-packages.mjs","names":[],"sources":["../src/install-packages.mts"],"sourcesContent":["import { execSync } from \"child_process\";\nimport fs from \"fs\";\nimport path from \"path\";\n\n// Define interface for package.json\ninterface PackageJson {\n dependencies?: Record<string, string>;\n devDependencies?: Record<string, string>;\n}\n\n// Get the list of packages to install from the environment variable\nconst pkgs = process.env.PH_PACKAGES?.split(\",\") || [];\n\n// Skip if no packages to install\nif (pkgs.length === 0 || (pkgs.length === 1 && pkgs[0] === \"\")) {\n process.exit(0);\n}\n\ntry {\n // Read the package.json file to check existing dependencies\n const packageJsonPath = path.join(process.cwd(), \"package.json\");\n const packageJsonContent = fs.readFileSync(packageJsonPath, \"utf-8\");\n const packageJson = JSON.parse(packageJsonContent) as PackageJson;\n\n // Get all installed dependencies\n const installedDependencies: Record<string, string> = {\n ...(packageJson.dependencies || {}),\n ...(packageJson.devDependencies || {}),\n };\n\n for (const pkg of pkgs) {\n if (pkg === \"\") continue;\n\n // Check if the package is already installed\n if (installedDependencies[pkg]) {\n console.log(`> Package ${pkg} is already installed, skipping`);\n continue;\n }\n\n console.log(`> Installing ${pkg}`);\n execSync(`pnpm add ${pkg}@latest`, { stdio: \"inherit\" });\n }\n} catch (error) {\n console.error(\"Error in package installation:\", error);\n process.exit(1);\n}\n"],"mappings":";;;;AAWA,MAAM,OAAO,QAAQ,IAAI,aAAa,MAAM,IAAI,IAAI,EAAE;AAGtD,IAAI,KAAK,WAAW,KAAM,KAAK,WAAW,KAAK,KAAK,OAAO,GACzD,SAAQ,KAAK,EAAE;AAGjB,IAAI;CAEF,MAAM,kBAAkB,KAAK,KAAK,QAAQ,KAAK,EAAE,eAAe;CAChE,MAAM,qBAAqB,GAAG,aAAa,iBAAiB,QAAQ;CACpE,MAAM,cAAc,KAAK,MAAM,mBAAmB;CAGlD,MAAM,wBAAgD;EACpD,GAAI,YAAY,gBAAgB,EAAE;EAClC,GAAI,YAAY,mBAAmB,EAAE;EACtC;AAED,MAAK,MAAM,OAAO,MAAM;AACtB,MAAI,QAAQ,GAAI;AAGhB,MAAI,sBAAsB,MAAM;AAC9B,WAAQ,IAAI,aAAa,IAAI,iCAAiC;AAC9D;;AAGF,UAAQ,IAAI,gBAAgB,MAAM;AAClC,WAAS,YAAY,IAAI,UAAU,EAAE,OAAO,WAAW,CAAC;;SAEnD,OAAO;AACd,SAAQ,MAAM,kCAAkC,MAAM;AACtD,SAAQ,KAAK,EAAE"}
@@ -0,0 +1 @@
1
+ export { };
@@ -0,0 +1,55 @@
1
+ #!/usr/bin/env node
2
+ import { getConfig } from "@powerhousedao/config/node";
3
+ import { REACTOR_SCHEMA, getMigrationStatus, runMigrations } from "@powerhousedao/reactor";
4
+ import { Kysely, PostgresDialect } from "kysely";
5
+ import { Pool } from "pg";
6
+ //#region src/migrate.mts
7
+ function isPostgresUrl(url) {
8
+ return url.startsWith("postgresql://") || url.startsWith("postgres://");
9
+ }
10
+ async function main() {
11
+ const command = process.argv[2];
12
+ const config = getConfig();
13
+ const dbPath = process.env.PH_REACTOR_DATABASE_URL ?? process.env.DATABASE_URL ?? config.switchboard?.database?.url;
14
+ if (!dbPath || !isPostgresUrl(dbPath)) {
15
+ console.log("No PostgreSQL URL configured. Skipping migrations.");
16
+ console.log("(PGlite migrations are handled automatically on startup)");
17
+ return;
18
+ }
19
+ console.log(`Database: ${dbPath}`);
20
+ const db = new Kysely({ dialect: new PostgresDialect({ pool: new Pool({ connectionString: dbPath }) }) });
21
+ try {
22
+ if (command === "status") {
23
+ console.log("\nChecking migration status...");
24
+ const migrations = await getMigrationStatus(db, REACTOR_SCHEMA);
25
+ console.log("\nMigration Status:");
26
+ console.log("=================");
27
+ for (const migration of migrations) {
28
+ const status = migration.executedAt ? `[OK] Executed at ${migration.executedAt.toISOString()}` : "[--] Pending";
29
+ console.log(`${status} - ${migration.name}`);
30
+ }
31
+ } else {
32
+ console.log("\nRunning migrations...");
33
+ const result = await runMigrations(db, REACTOR_SCHEMA);
34
+ if (!result.success) {
35
+ console.error("Migration failed:", result.error?.message);
36
+ process.exit(1);
37
+ }
38
+ if (result.migrationsExecuted.length === 0) console.log("No migrations to run - database is up to date");
39
+ else {
40
+ console.log(`Successfully executed ${result.migrationsExecuted.length} migration(s):`);
41
+ for (const name of result.migrationsExecuted) console.log(` - ${name}`);
42
+ }
43
+ }
44
+ } catch (error) {
45
+ console.error("Error:", error instanceof Error ? error.message : String(error));
46
+ process.exit(1);
47
+ } finally {
48
+ await db.destroy();
49
+ }
50
+ }
51
+ main();
52
+ //#endregion
53
+ export {};
54
+
55
+ //# sourceMappingURL=migrate.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"migrate.mjs","names":[],"sources":["../src/migrate.mts"],"sourcesContent":["#!/usr/bin/env node\nimport { Kysely, PostgresDialect } from \"kysely\";\nimport { Pool } from \"pg\";\nimport {\n runMigrations,\n getMigrationStatus,\n REACTOR_SCHEMA,\n} from \"@powerhousedao/reactor\";\nimport { getConfig } from \"@powerhousedao/config/node\";\n\nfunction isPostgresUrl(url: string): boolean {\n return url.startsWith(\"postgresql://\") || url.startsWith(\"postgres://\");\n}\n\nasync function main() {\n const command = process.argv[2];\n const config = getConfig();\n\n const dbPath =\n process.env.PH_REACTOR_DATABASE_URL ??\n process.env.DATABASE_URL ??\n config.switchboard?.database?.url;\n\n if (!dbPath || !isPostgresUrl(dbPath)) {\n console.log(\"No PostgreSQL URL configured. Skipping migrations.\");\n console.log(\"(PGlite migrations are handled automatically on startup)\");\n return;\n }\n\n console.log(`Database: ${dbPath}`);\n\n const pool = new Pool({ connectionString: dbPath });\n\n const db = new Kysely<any>({\n dialect: new PostgresDialect({ pool }),\n });\n\n try {\n if (command === \"status\") {\n console.log(\"\\nChecking migration status...\");\n const migrations = await getMigrationStatus(db, REACTOR_SCHEMA);\n\n console.log(\"\\nMigration Status:\");\n console.log(\"=================\");\n\n for (const migration of migrations) {\n const status = migration.executedAt\n ? `[OK] Executed at ${migration.executedAt.toISOString()}`\n : \"[--] Pending\";\n console.log(`${status} - ${migration.name}`);\n }\n } else {\n console.log(\"\\nRunning migrations...\");\n const result = await runMigrations(db, REACTOR_SCHEMA);\n\n if (!result.success) {\n console.error(\"Migration failed:\", result.error?.message);\n process.exit(1);\n }\n\n if (result.migrationsExecuted.length === 0) {\n console.log(\"No migrations to run - database is up to date\");\n } else {\n console.log(\n `Successfully executed ${result.migrationsExecuted.length} migration(s):`,\n );\n for (const name of result.migrationsExecuted) {\n console.log(` - ${name}`);\n }\n }\n }\n } catch (error) {\n console.error(\n \"Error:\",\n error instanceof Error ? error.message : String(error),\n );\n process.exit(1);\n } finally {\n await db.destroy();\n }\n}\n\nvoid main();\n"],"mappings":";;;;;;AAUA,SAAS,cAAc,KAAsB;AAC3C,QAAO,IAAI,WAAW,gBAAgB,IAAI,IAAI,WAAW,cAAc;;AAGzE,eAAe,OAAO;CACpB,MAAM,UAAU,QAAQ,KAAK;CAC7B,MAAM,SAAS,WAAW;CAE1B,MAAM,SACJ,QAAQ,IAAI,2BACZ,QAAQ,IAAI,gBACZ,OAAO,aAAa,UAAU;AAEhC,KAAI,CAAC,UAAU,CAAC,cAAc,OAAO,EAAE;AACrC,UAAQ,IAAI,qDAAqD;AACjE,UAAQ,IAAI,2DAA2D;AACvE;;AAGF,SAAQ,IAAI,aAAa,SAAS;CAIlC,MAAM,KAAK,IAAI,OAAY,EACzB,SAAS,IAAI,gBAAgB,EAAE,MAHpB,IAAI,KAAK,EAAE,kBAAkB,QAAQ,CAAC,EAGZ,CAAC,EACvC,CAAC;AAEF,KAAI;AACF,MAAI,YAAY,UAAU;AACxB,WAAQ,IAAI,iCAAiC;GAC7C,MAAM,aAAa,MAAM,mBAAmB,IAAI,eAAe;AAE/D,WAAQ,IAAI,sBAAsB;AAClC,WAAQ,IAAI,oBAAoB;AAEhC,QAAK,MAAM,aAAa,YAAY;IAClC,MAAM,SAAS,UAAU,aACrB,oBAAoB,UAAU,WAAW,aAAa,KACtD;AACJ,YAAQ,IAAI,GAAG,OAAO,KAAK,UAAU,OAAO;;SAEzC;AACL,WAAQ,IAAI,0BAA0B;GACtC,MAAM,SAAS,MAAM,cAAc,IAAI,eAAe;AAEtD,OAAI,CAAC,OAAO,SAAS;AACnB,YAAQ,MAAM,qBAAqB,OAAO,OAAO,QAAQ;AACzD,YAAQ,KAAK,EAAE;;AAGjB,OAAI,OAAO,mBAAmB,WAAW,EACvC,SAAQ,IAAI,gDAAgD;QACvD;AACL,YAAQ,IACN,yBAAyB,OAAO,mBAAmB,OAAO,gBAC3D;AACD,SAAK,MAAM,QAAQ,OAAO,mBACxB,SAAQ,IAAI,OAAO,OAAO;;;UAIzB,OAAO;AACd,UAAQ,MACN,UACA,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,CACvD;AACD,UAAQ,KAAK,EAAE;WACP;AACR,QAAM,GAAG,SAAS;;;AAIjB,MAAM"}
@@ -0,0 +1,291 @@
1
+ import { n as isPostgresUrl, t as addDefaultDrive } from "./utils-DFl0ezBT.mjs";
2
+ import { register } from "node:module";
3
+ import * as Sentry from "@sentry/node";
4
+ import { childLogger, documentModelDocumentModelModule, setLogLevel } from "document-model";
5
+ import dotenv from "dotenv";
6
+ import { getConfig } from "@powerhousedao/config/node";
7
+ import { PGlite } from "@electric-sql/pglite";
8
+ import { metrics } from "@opentelemetry/api";
9
+ import { ReactorInstrumentation } from "@powerhousedao/opentelemetry-instrumentation-reactor";
10
+ import { ChannelScheme, EventBus, ReactorBuilder, ReactorClientBuilder, driveCollectionId, parseDriveUrl } from "@powerhousedao/reactor";
11
+ import { HttpPackageLoader, ImportPackageLoader, PackageManagementService, PackagesSubgraph, getUniqueDocumentModels, initializeAndStartAPI } from "@powerhousedao/reactor-api";
12
+ import { httpsHooksPath } from "@powerhousedao/reactor-api/https-hooks";
13
+ import { VitePackageLoader, createViteLogger, startViteServer } from "@powerhousedao/reactor-api/vite";
14
+ import { driveDocumentModelModule } from "@powerhousedao/shared/document-drive";
15
+ import { documentModels } from "@powerhousedao/vetra";
16
+ import { processorFactory } from "@powerhousedao/vetra/processors";
17
+ import { Kysely, PostgresDialect } from "kysely";
18
+ import { PGliteDialect } from "kysely-pglite-dialect";
19
+ import net from "node:net";
20
+ import path from "path";
21
+ import { Pool } from "pg";
22
+ import { EnvVarProvider } from "@openfeature/env-var-provider";
23
+ import { OpenFeature } from "@openfeature/server-sdk";
24
+ import { DEFAULT_RENOWN_URL, NodeKeyStorage, RenownBuilder, RenownCryptoBuilder, createSignatureVerifier } from "@renown/sdk/node";
25
+ //#region src/feature-flags.ts
26
+ async function initFeatureFlags() {
27
+ const provider = new EnvVarProvider();
28
+ await OpenFeature.setProviderAndWait(provider);
29
+ return OpenFeature.getClient();
30
+ }
31
+ //#endregion
32
+ //#region src/renown.ts
33
+ const logger = childLogger(["switchboard", "renown"]);
34
+ /**
35
+ * Initialize Renown for the Switchboard instance.
36
+ * This allows Switchboard to authenticate with remote services
37
+ * using the same identity established during `ph login`.
38
+ */
39
+ async function initRenown(options = {}) {
40
+ const { keypairPath, requireExisting = false, baseUrl = DEFAULT_RENOWN_URL } = options;
41
+ const keyStorage = new NodeKeyStorage(keypairPath, { logger });
42
+ const existingKeyPair = await keyStorage.loadKeyPair();
43
+ if (!existingKeyPair && requireExisting) throw new Error("No existing keypair found and requireExisting is true. Run \"ph login\" to create one.");
44
+ if (!existingKeyPair) logger.info("No existing keypair found. A new one will be generated.");
45
+ const renownCrypto = await new RenownCryptoBuilder().withKeyPairStorage(keyStorage).build();
46
+ const renown = await new RenownBuilder("switchboard", {}).withCrypto(renownCrypto).withBaseUrl(baseUrl).build();
47
+ logger.info("Switchboard identity initialized: @did", renownCrypto.did);
48
+ return renown;
49
+ }
50
+ /**
51
+ * Get the signer config for the given renown instance.
52
+ *
53
+ * @param renown - The renown instance
54
+ * @param requireSignature - If true, unsigned actions are rejected
55
+ */
56
+ function getRenownSignerConfig(renown, requireSignature) {
57
+ return {
58
+ signer: renown.signer,
59
+ verifier: createSignatureVerifier(requireSignature)
60
+ };
61
+ }
62
+ //#endregion
63
+ //#region src/server.mts
64
+ const defaultLogger = childLogger(["switchboard"]);
65
+ const LogLevel = process.env.LOG_LEVEL || "info";
66
+ setLogLevel(LogLevel);
67
+ dotenv.config();
68
+ const DOCUMENT_MODEL_SUBGRAPHS_ENABLED = "DOCUMENT_MODEL_SUBGRAPHS_ENABLED";
69
+ const DOCUMENT_MODEL_SUBGRAPHS_ENABLED_DEFAULT = true;
70
+ const REQUIRE_SIGNATURES = "REQUIRE_SIGNATURES";
71
+ const REQUIRE_SIGNATURES_DEFAULT = false;
72
+ if (process.env.SENTRY_DSN) {
73
+ defaultLogger.info("Initialized Sentry with env: @env", process.env.SENTRY_ENV);
74
+ Sentry.init({
75
+ dsn: process.env.SENTRY_DSN,
76
+ environment: process.env.SENTRY_ENV,
77
+ release: process.env.SENTRY_RELEASE || (process.env.npm_package_version ? `v${process.env.npm_package_version}` : void 0)
78
+ });
79
+ }
80
+ const DEFAULT_PORT = process.env.PORT ? Number(process.env.PORT) : 4001;
81
+ const PORT_FALLBACK_ATTEMPTS = 20;
82
+ /**
83
+ * Attempt to bind a throwaway TCP server to the given port. Resolves true if
84
+ * the port is free, false if the OS reports it in use. Any other error is
85
+ * surfaced so we don't silently mask real issues (permissions, bad host, …).
86
+ */
87
+ function isPortAvailable(port) {
88
+ return new Promise((resolve, reject) => {
89
+ const tester = net.createServer();
90
+ tester.once("error", (err) => {
91
+ if (err.code === "EADDRINUSE" || err.code === "EACCES") resolve(false);
92
+ else reject(err);
93
+ });
94
+ tester.once("listening", () => {
95
+ tester.close(() => resolve(true));
96
+ });
97
+ tester.listen({
98
+ port,
99
+ host: "::"
100
+ });
101
+ });
102
+ }
103
+ async function resolveServerPort(requested, strictPort, logger) {
104
+ if (strictPort) return requested;
105
+ for (let i = 0; i < PORT_FALLBACK_ATTEMPTS; i++) {
106
+ const candidate = requested + i;
107
+ if (await isPortAvailable(candidate)) {
108
+ if (candidate !== requested) logger.info(`Port ${requested} is in use. Falling back to port ${candidate}.`);
109
+ return candidate;
110
+ }
111
+ }
112
+ return requested;
113
+ }
114
+ async function initServer(serverPort, options, renown) {
115
+ if (options.meterProvider) metrics.setGlobalMeterProvider(options.meterProvider);
116
+ const { dev, packages = [], remoteDrives = [], logger = defaultLogger } = options;
117
+ logger.level = LogLevel;
118
+ const readModelPath = (options.dbPath ?? process.env.DATABASE_URL) || ".ph/read-storage";
119
+ const config = getConfig(options.configFile ?? path.join(process.cwd(), "powerhouse.config.json"));
120
+ const registryUrl = process.env.PH_REGISTRY_URL ?? config.packageRegistryUrl;
121
+ const registryPackages = process.env.PH_REGISTRY_PACKAGES;
122
+ const dynamicModelLoading = options.dynamicModelLoading ?? process.env.DYNAMIC_MODEL_LOADING === "true";
123
+ let httpLoader;
124
+ if (registryUrl) {
125
+ register(httpsHooksPath, import.meta.url);
126
+ httpLoader = new HttpPackageLoader({ registryUrl });
127
+ registryPackages?.split(",").forEach((p) => {
128
+ const name = p.trim();
129
+ if (!packages.includes(name)) packages.push(name);
130
+ });
131
+ }
132
+ const reactorLogger = logger.child(["reactor"]);
133
+ const initializeClient = async (documentModels$1) => {
134
+ const eventBus = new EventBus();
135
+ const builder = new ReactorBuilder().withEventBus(eventBus).withDocumentModels(getUniqueDocumentModels([
136
+ documentModelDocumentModelModule,
137
+ driveDocumentModelModule,
138
+ ...documentModels,
139
+ ...documentModels$1
140
+ ])).withChannelScheme(ChannelScheme.SWITCHBOARD).withSignalHandlers().withLogger(reactorLogger);
141
+ const maxSkipThreshold = parseInt(process.env.MAX_SKIP_THRESHOLD ?? "", 10);
142
+ if (!isNaN(maxSkipThreshold) && maxSkipThreshold > 0) {
143
+ builder.withExecutorConfig({ maxSkipThreshold });
144
+ logger.info(`Reactor maxSkipThreshold set to ${maxSkipThreshold}`);
145
+ }
146
+ const reactorDbUrl = process.env.PH_REACTOR_DATABASE_URL;
147
+ if (reactorDbUrl && isPostgresUrl(reactorDbUrl)) {
148
+ const kysely = new Kysely({ dialect: new PostgresDialect({ pool: new Pool({ connectionString: reactorDbUrl.includes("?") ? reactorDbUrl : `${reactorDbUrl}?sslmode=disable` }) }) });
149
+ builder.withKysely(kysely);
150
+ logger.info("Using PostgreSQL for reactor storage");
151
+ } else {
152
+ const kysely = new Kysely({ dialect: new PGliteDialect(new PGlite("./.ph/reactor-storage")) });
153
+ builder.withKysely(kysely);
154
+ logger.info("Using PGlite for reactor storage");
155
+ }
156
+ if (httpLoader && dynamicModelLoading) builder.withDocumentModelLoader(httpLoader.documentModelLoader);
157
+ const clientBuilder = new ReactorClientBuilder().withReactorBuilder(builder);
158
+ if (renown) {
159
+ const signerConfig = getRenownSignerConfig(renown, options.identity?.requireSignatures);
160
+ clientBuilder.withSigner(signerConfig);
161
+ }
162
+ const module = await clientBuilder.buildModule();
163
+ if (module.reactorModule) {
164
+ new ReactorInstrumentation(module.reactorModule).start();
165
+ reactorLogger.info("Reactor metrics instrumentation started");
166
+ }
167
+ return module;
168
+ };
169
+ let defaultDriveUrl = void 0;
170
+ const basePath = process.cwd();
171
+ const viteLogger = createViteLogger(logger);
172
+ const vite = dev ? await startViteServer(process.cwd(), viteLogger) : void 0;
173
+ if (!options.disableLocalPackages) packages.push(basePath);
174
+ const packageLoaders = [];
175
+ if (vite) packageLoaders.push(VitePackageLoader.build(vite));
176
+ else packageLoaders.push(new ImportPackageLoader());
177
+ if (httpLoader) {
178
+ packageLoaders.push(httpLoader);
179
+ registryPackages?.split(",").forEach((p) => {
180
+ const name = p.trim();
181
+ if (!packages.includes(name)) packages.push(name);
182
+ });
183
+ }
184
+ const apiLogger = logger.child(["reactor-api"]);
185
+ const api = await initializeAndStartAPI(initializeClient, {
186
+ port: serverPort,
187
+ dbPath: readModelPath,
188
+ https: options.https,
189
+ packageLoaders: packageLoaders.length > 0 ? packageLoaders : void 0,
190
+ packages,
191
+ processorConfig: options.processorConfig,
192
+ processors: { "@powerhousedao/vetra": [processorFactory] },
193
+ configFile: options.configFile ?? path.join(process.cwd(), "powerhouse.config.json"),
194
+ mcp: options.mcp ?? true,
195
+ logger: apiLogger,
196
+ enableDocumentModelSubgraphs: options.enableDocumentModelSubgraphs
197
+ }, "switchboard");
198
+ if (process.env.SENTRY_DSN) api.httpAdapter.setupSentryErrorHandler(Sentry);
199
+ const { client, graphqlManager, documentModelRegistry } = api;
200
+ if (httpLoader) {
201
+ const packageManagementService = new PackageManagementService({
202
+ defaultRegistryUrl: registryUrl,
203
+ httpLoader,
204
+ documentModelRegistry
205
+ });
206
+ packageManagementService.setOnModelsChanged(() => {
207
+ graphqlManager.regenerateDocumentModelSubgraphs().catch(logger.error);
208
+ });
209
+ const packagesSubgraph = new PackagesSubgraph({
210
+ relationalDb: void 0,
211
+ analyticsStore: void 0,
212
+ reactorClient: client,
213
+ graphqlManager,
214
+ syncManager: api.syncManager,
215
+ path: graphqlManager.getBasePath(),
216
+ packageManagementService
217
+ });
218
+ graphqlManager.registerSubgraphInstance(packagesSubgraph, "graphql", false).then(() => graphqlManager.updateRouter()).catch((error) => {
219
+ logger.error("Failed to register packages subgraph: @error", error);
220
+ });
221
+ }
222
+ if (options.drive) {
223
+ if (!renown) throw new Error("Cannot create default drive without Renown identity");
224
+ defaultDriveUrl = await addDefaultDrive(client, options.drive, serverPort);
225
+ }
226
+ if (vite) api.httpAdapter.mountRawMiddleware(vite.middlewares);
227
+ if (remoteDrives.length > 0) for (const remoteDriveUrl of remoteDrives) {
228
+ let driveId;
229
+ try {
230
+ const { syncManager } = api;
231
+ const parsed = parseDriveUrl(remoteDriveUrl);
232
+ driveId = parsed.driveId;
233
+ const remoteName = `remote-drive-${driveId}-${crypto.randomUUID()}`;
234
+ await syncManager.add(remoteName, driveCollectionId("main", driveId), {
235
+ type: "gql",
236
+ parameters: { url: parsed.graphqlEndpoint }
237
+ });
238
+ logger.debug("Remote drive @remoteDriveUrl synced", remoteDriveUrl);
239
+ } catch (error) {
240
+ if (error instanceof Error && error.message.includes("already exists")) {
241
+ logger.debug("Remote drive already added: @remoteDriveUrl", remoteDriveUrl);
242
+ driveId = remoteDriveUrl.split("/").pop();
243
+ } else logger.error("Failed to connect to remote drive @remoteDriveUrl: @error", remoteDriveUrl, error);
244
+ } finally {
245
+ if (!defaultDriveUrl && driveId) defaultDriveUrl = `${options.https ? "https" : "http"}://localhost:${serverPort}/d/${driveId}`;
246
+ }
247
+ }
248
+ return {
249
+ defaultDriveUrl,
250
+ api,
251
+ reactor: client,
252
+ renown,
253
+ port: serverPort
254
+ };
255
+ }
256
+ const startSwitchboard = async (options = {}) => {
257
+ const requestedPort = options.port ?? DEFAULT_PORT;
258
+ const logger = options.logger ?? defaultLogger;
259
+ const serverPort = await resolveServerPort(requestedPort, options.strictPort ?? false, logger);
260
+ const featureFlags = await initFeatureFlags();
261
+ const enableDocumentModelSubgraphs = await featureFlags.getBooleanValue(DOCUMENT_MODEL_SUBGRAPHS_ENABLED, options.enableDocumentModelSubgraphs ?? DOCUMENT_MODEL_SUBGRAPHS_ENABLED_DEFAULT);
262
+ options.enableDocumentModelSubgraphs = enableDocumentModelSubgraphs;
263
+ const requireSignatures = options.identity?.requireSignatures ?? await featureFlags.getBooleanValue(REQUIRE_SIGNATURES, REQUIRE_SIGNATURES_DEFAULT);
264
+ options.identity = {
265
+ ...options.identity,
266
+ requireSignatures
267
+ };
268
+ logger.info("Feature flags: @flags", JSON.stringify({
269
+ DOCUMENT_MODEL_SUBGRAPHS_ENABLED: enableDocumentModelSubgraphs,
270
+ REQUIRE_SIGNATURES: requireSignatures
271
+ }, null, 2));
272
+ let renown = null;
273
+ try {
274
+ renown = await initRenown(options.identity);
275
+ } catch (e) {
276
+ logger.warn("Failed to initialize ConnectCrypto: @error", e);
277
+ if (options.identity?.requireExisting) throw new Error("Identity required but failed to initialize. Run \"ph login\" first.");
278
+ }
279
+ try {
280
+ return await initServer(serverPort, options, renown);
281
+ } catch (e) {
282
+ Sentry.captureException(e);
283
+ logger.error("App crashed: @error", e);
284
+ throw e;
285
+ }
286
+ };
287
+ if (import.meta.main) await startSwitchboard();
288
+ //#endregion
289
+ export { startSwitchboard as n, isPortAvailable as t };
290
+
291
+ //# sourceMappingURL=server-BMtyzhoR.mjs.map