arcway 0.1.13 → 0.1.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -120,7 +120,7 @@ export default {
120
120
  server: {
121
121
  port: 3000,
122
122
  shutdownTimeoutMs: 10_000,
123
- maxBodySize: 1_048_576, // 1 MB
123
+ maxBodySize: 26_214_400, // 25 MB
124
124
  },
125
125
  api: {
126
126
  pathPrefix: '', // Prefix all API routes (e.g., '/api')
package/client/head.js CHANGED
@@ -137,4 +137,4 @@ function renderHeadToString(headData) {
137
137
  return parts.join('\n');
138
138
  }
139
139
 
140
- export { Head, clearSSRHeadData, extractHeadChildren, renderHeadToString, setSSRHeadData };
140
+ export { Head, clearSSRHeadData, renderHeadToString, setSSRHeadData };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "arcway",
3
- "version": "0.1.13",
3
+ "version": "0.1.15",
4
4
  "description": "A convention-based framework for building modular monoliths with strict domain boundaries.",
5
5
  "license": "MIT",
6
6
  "type": "module",
@@ -1,16 +1,10 @@
1
1
  import { makeConfig } from '../config/loader.js';
2
- import { createDB } from '../db/index.js';
3
- import Redis from '../redis/index.js';
4
- import Events from '../events/index.js';
2
+ import { createInfrastructure, destroyInfrastructure } from './infrastructure.js';
5
3
  import { EventHandler } from '../events/handler.js';
6
4
  import { JobRunner } from '../jobs/runner.js';
7
- import Logger from '../logger/index.js';
5
+ import { WorkerPool } from '../jobs/worker-pool.js';
8
6
  import { loadEnvFiles } from '../env.js';
9
7
  import { McpRouter } from '../mcp/index.js';
10
- import Queue from '../queue/index.js';
11
- import Cache from '../cache/index.js';
12
- import Files from '../files/index.js';
13
- import { Mail } from '../mail/index.js';
14
8
  import WebServer from '../web-server.js';
15
9
  import { FileWatcher } from '../filewatcher/index.js';
16
10
  import { ApiRouter } from '../router/api-router.js';
@@ -27,30 +21,13 @@ async function boot(options) {
27
21
  const envFiles = loadEnvFiles(rootDir, mode);
28
22
  const config = await makeConfig(rootDir, { overrides: options.configOverrides, mode });
29
23
 
30
- const log = new Logger(config.logger);
24
+ const infrastructure = await createInfrastructure(config, { runMigrations: true });
25
+ const { db, redis, queue, cache, files, mail, events, log } = infrastructure;
26
+
31
27
  const mcpRouter = new McpRouter(config.mcp, { log });
32
28
 
33
29
  if (envFiles.length > 0) log.info('Env files loaded', { envFiles });
34
30
 
35
- const db = await createDB(config.database, { log });
36
- await db.runMigrations();
37
-
38
- const redis = new Redis(config.redis, { log });
39
- await redis.connect();
40
-
41
- const queue = new Queue(config.queue, { db, redis, log });
42
- await queue.init();
43
-
44
- const cache = new Cache(config.cache, { redis, log });
45
- await cache.init();
46
-
47
- const files = new Files(config.files, { log });
48
- await files.init();
49
-
50
- const mail = new Mail(config.mail, { db, log, queue });
51
-
52
- const events = new Events(config.events, { redis, log });
53
-
54
31
  const eventHandler = new EventHandler(config.events, {
55
32
  events,
56
33
  log,
@@ -58,11 +35,32 @@ async function boot(options) {
58
35
  });
59
36
  await eventHandler.init();
60
37
 
61
- const jobRunner = new JobRunner(config.jobs, { db, queue, cache, files, mail, events, log });
38
+ // Workers rebuild their own infrastructure from a JSON-safe config snapshot,
39
+ // so anything non-serializable on the main-thread config (streams, functions)
40
+ // must not ride through workerData. makeConfig already yields plain objects.
41
+ const workerPool =
42
+ config.jobs?.workerPoolSize > 0
43
+ ? new WorkerPool({
44
+ size: config.jobs.workerPoolSize,
45
+ workerData: { config },
46
+ defaultTimeoutMs: config.jobs?.staleTimeoutMs ?? null,
47
+ })
48
+ : null;
49
+
50
+ const jobRunner = new JobRunner(config.jobs, {
51
+ db,
52
+ queue,
53
+ cache,
54
+ files,
55
+ mail,
56
+ events,
57
+ log,
58
+ workerPool,
59
+ });
62
60
  await jobRunner.init();
63
61
 
64
- const fileWatcher = new FileWatcher(rootDir, { log });
65
- await fileWatcher.start();
62
+ const fileWatcher = mode === 'development' ? new FileWatcher(rootDir, { log }) : null;
63
+ if (fileWatcher) await fileWatcher.start();
66
64
 
67
65
  const appContext = { db, redis, events, queue, cache, files, mail, log, fileWatcher };
68
66
 
@@ -70,13 +68,14 @@ async function boot(options) {
70
68
 
71
69
  const apiRouter = new ApiRouter(config.api, {
72
70
  log,
71
+ mode,
73
72
  fileWatcher,
74
73
  appContext,
75
74
  sessionConfig: config.session,
76
75
  });
77
76
  await apiRouter.init();
78
77
 
79
- const pagesRouter = new PagesRouter(config, { rootDir, log, fileWatcher, appContext });
78
+ const pagesRouter = new PagesRouter(config, { rootDir, log, mode, fileWatcher, appContext });
80
79
  await pagesRouter.init();
81
80
 
82
81
  const healthDeps = {
@@ -115,13 +114,12 @@ async function boot(options) {
115
114
 
116
115
  const shutdown = async () => {
117
116
  await jobRunner.shutdown();
117
+ if (workerPool) await workerPool.shutdown();
118
118
  await pagesRouter.close();
119
119
  await apiRouter.close();
120
120
  await webServer.close();
121
- await fileWatcher.close();
122
- await events.disconnect();
123
- await redis.disconnect();
124
- await db.destroy();
121
+ if (fileWatcher) await fileWatcher.close();
122
+ await destroyInfrastructure(infrastructure);
125
123
  await mcpRouter.cleanup(rootDir);
126
124
  };
127
125
 
@@ -0,0 +1,47 @@
1
+ import { createDB } from '../db/index.js';
2
+ import Redis from '../redis/index.js';
3
+ import Queue from '../queue/index.js';
4
+ import Cache from '../cache/index.js';
5
+ import Files from '../files/index.js';
6
+ import { Mail } from '../mail/index.js';
7
+ import Events from '../events/index.js';
8
+ import Logger from '../logger/index.js';
9
+
10
+ // Shared service construction for both the main-thread boot path and per-job
11
+ // worker threads (see server/jobs/worker-entry.js). Workers call this with
12
+ // `runMigrations: false` — only the main process should touch schema.
13
+ async function createInfrastructure(config, { runMigrations = false } = {}) {
14
+ const log = new Logger(config.logger);
15
+
16
+ const db = await createDB(config.database, { log });
17
+ if (runMigrations) await db.runMigrations();
18
+
19
+ const redis = new Redis(config.redis, { log });
20
+ await redis.connect();
21
+
22
+ const queue = new Queue(config.queue, { db, redis, log });
23
+ await queue.init();
24
+
25
+ const cache = new Cache(config.cache, { redis, log });
26
+ await cache.init();
27
+
28
+ const files = new Files(config.files, { log });
29
+ await files.init();
30
+
31
+ const mail = new Mail(config.mail, { db, log, queue });
32
+
33
+ const events = new Events(config.events, { redis, log });
34
+
35
+ return { db, redis, queue, cache, files, mail, events, log };
36
+ }
37
+
38
+ async function destroyInfrastructure(services) {
39
+ if (!services) return;
40
+ // Order matches boot/index.js teardown: events → redis → db. Mail/queue/
41
+ // cache/files share redis/db and do not own independent connections.
42
+ await services.events?.disconnect?.();
43
+ await services.redis?.disconnect?.();
44
+ await services.db?.destroy?.();
45
+ }
46
+
47
+ export { createInfrastructure, destroyInfrastructure };
@@ -1,5 +1,14 @@
1
+ import os from 'node:os';
1
2
  import path from 'node:path';
2
3
 
4
+ // Leave one core free for the main thread (dispatch/polling/HTTP). 0 disables
5
+ // worker threads entirely and runs handlers inline on the main thread.
6
+ function defaultWorkerPoolSize() {
7
+ const cores =
8
+ typeof os.availableParallelism === 'function' ? os.availableParallelism() : os.cpus().length;
9
+ return Math.max(1, cores - 1);
10
+ }
11
+
3
12
  const DEFAULTS = {
4
13
  enabled: true,
5
14
  backoffMs: 1000,
@@ -14,6 +23,7 @@ function resolve(config, { rootDir } = {}) {
14
23
  if (jobs.dir && !path.isAbsolute(jobs.dir)) {
15
24
  jobs.dir = path.resolve(rootDir, jobs.dir);
16
25
  }
26
+ if (jobs.workerPoolSize === undefined) jobs.workerPoolSize = defaultWorkerPoolSize();
17
27
  return { ...config, jobs };
18
28
  }
19
29
 
@@ -1,7 +1,7 @@
1
1
  const DEFAULTS = {
2
2
  host: '0.0.0.0',
3
3
  port: 3000,
4
- maxBodySize: 1024 * 1024,
4
+ maxBodySize: 25 * 1024 * 1024,
5
5
  shutdownTimeoutMs: 10000,
6
6
  trustProxy: false,
7
7
  };
package/server/context.js CHANGED
@@ -64,4 +64,4 @@ function buildContext(appContext, extras) {
64
64
  return ctx;
65
65
  }
66
66
 
67
- export { buildContext, trackDb };
67
+ export { buildContext };
@@ -2,9 +2,7 @@ import { discoverGraphQL } from './discovery.js';
2
2
  import { mergeGraphQLSchemas, mergeGraphQLResolvers } from './merge.js';
3
3
  import { createLoaderFactory } from './loaders.js';
4
4
  import { createGraphQLHandler } from './handler.js';
5
- import { attachGraphQLSubscriptions } from './subscriptions.js';
6
5
  export {
7
- attachGraphQLSubscriptions,
8
6
  createGraphQLHandler,
9
7
  createLoaderFactory,
10
8
  discoverGraphQL,
@@ -1,7 +1,7 @@
1
- import { buildContext } from '../../context.js';
2
1
  import { validateEnqueue, toError, calculateBackoff } from '../queue.js';
3
2
  import { checkDbThroughput } from '../throughput.js';
4
3
  import LeaseManager from './lease.js';
4
+ import { runHandler } from './run-handler.js';
5
5
  const DEFAULT_STALE_TIMEOUT_MS = 5 * 60 * 1e3;
6
6
  class KnexJobQueue {
7
7
  db;
@@ -11,12 +11,14 @@ class KnexJobQueue {
11
11
  registered = new Map();
12
12
  _size = 0;
13
13
  leaseManager;
14
+ workerPool;
14
15
  constructor(db, options) {
15
16
  this.db = db;
16
17
  this.tableName = options?.tableName ?? 'arcway_jobs';
17
18
  this.backoffMs = options?.backoffMs ?? 1000;
18
19
  this.staleTimeoutMs = options?.staleTimeoutMs ?? DEFAULT_STALE_TIMEOUT_MS;
19
20
  this.leaseManager = new LeaseManager(db, { tableName: options?.leaseTableName });
21
+ this.workerPool = options?.workerPool ?? null;
20
22
  }
21
23
  /** Create the jobs table if it doesn't exist. Must be called before use. */
22
24
  async init() {
@@ -39,9 +41,9 @@ class KnexJobQueue {
39
41
  await this.leaseManager.init();
40
42
  await this.syncSize();
41
43
  }
42
- register(domain, definition, store) {
44
+ register(domain, definition, store, filePath) {
43
45
  const qualifiedName = `${domain}/${definition.name}`;
44
- this.registered.set(qualifiedName, { domain, definition, store });
46
+ this.registered.set(qualifiedName, { domain, definition, store, filePath });
45
47
  }
46
48
  async enqueue(qualifiedName, payload, options) {
47
49
  const { reg, validatedPayload, maxRetries, delay } = validateEnqueue(
@@ -163,8 +165,7 @@ class KnexJobQueue {
163
165
 
164
166
  try {
165
167
  console.log(`[job] ${qualifiedName} attempt ${attempt}/${maxAttempts}`);
166
- const ctx = buildContext(reg.store, { payload });
167
- await reg.definition.handler(ctx);
168
+ await runHandler(reg, payload, this.workerPool);
168
169
  console.log(`[job] ${qualifiedName} completed`);
169
170
  await this._updateJob(jobId, { status: 'completed', attempt });
170
171
  this._size--;
@@ -1,7 +1,7 @@
1
- import { buildContext } from '../../context.js';
2
1
  import { validateEnqueue, toError, calculateBackoff } from '../queue.js';
3
2
  import { MemoryThroughputTracker } from '../throughput.js';
4
3
  import LeaseManager from './lease.js';
4
+ import { runHandler } from './run-handler.js';
5
5
 
6
6
  class JobDispatcher {
7
7
  queue = [];
@@ -9,15 +9,17 @@ class JobDispatcher {
9
9
  backoffMs;
10
10
  throughputTracker = new MemoryThroughputTracker();
11
11
  leaseManager;
12
+ workerPool;
12
13
 
13
14
  constructor(options) {
14
15
  this.backoffMs = options?.backoffMs ?? 1000;
15
16
  this.leaseManager = new LeaseManager(null);
17
+ this.workerPool = options?.workerPool ?? null;
16
18
  }
17
19
 
18
- register(domain, definition, store) {
20
+ register(domain, definition, store, filePath) {
19
21
  const qualifiedName = `${domain}/${definition.name}`;
20
- this.registered.set(qualifiedName, { domain, definition, store });
22
+ this.registered.set(qualifiedName, { domain, definition, store, filePath });
21
23
  }
22
24
 
23
25
  async enqueue(qualifiedName, payload, options) {
@@ -89,8 +91,7 @@ class JobDispatcher {
89
91
 
90
92
  try {
91
93
  console.log(`[job] ${qualifiedName} attempt ${job.attempt}/${maxAttempts}`);
92
- const ctx = buildContext(reg.store, { payload: job.payload });
93
- await reg.definition.handler(ctx);
94
+ await runHandler(reg, job.payload, this.workerPool);
94
95
  job.status = 'completed';
95
96
  console.log(`[job] ${qualifiedName} completed`);
96
97
  this.throughputTracker.record(qualifiedName);
@@ -0,0 +1,22 @@
1
+ import { buildContext } from '../../context.js';
2
+
3
+ // Dispatch a job handler either inline (main thread, legacy / tests / continuous
4
+ // jobs) or through the WorkerPool. The worker builds its own ctx from the
5
+ // workerData config snapshot and calls handler(ctx) — see worker-entry.js.
6
+ // We only route through the pool when both a pool *and* a filePath are
7
+ // available; system jobs register without a filePath and stay inline on
8
+ // purpose (they use namespaced services that don't round-trip through
9
+ // workerData).
10
+ async function runHandler(reg, payload, workerPool) {
11
+ if (workerPool && reg.filePath) {
12
+ await workerPool.run(
13
+ { handlerPath: reg.filePath, payload, withContext: true },
14
+ { timeoutMs: reg.definition.staleTimeout },
15
+ );
16
+ return;
17
+ }
18
+ const ctx = buildContext(reg.store, { payload });
19
+ await reg.definition.handler(ctx);
20
+ }
21
+
22
+ export { runHandler };
@@ -21,7 +21,13 @@ async function discoverJobs(jobsDir) {
21
21
  if (!job.name) {
22
22
  job.name = relativePath.replace(/\\/g, '/').replace(/\.js$/, '');
23
23
  }
24
- jobs.push({ definition: job, fileName: name, cooldownMs: job.cooldownMs, staleTimeout: job.staleTimeout });
24
+ jobs.push({
25
+ definition: job,
26
+ fileName: name,
27
+ filePath,
28
+ cooldownMs: job.cooldownMs,
29
+ staleTimeout: job.staleTimeout,
30
+ });
25
31
  }
26
32
  return jobs;
27
33
  }
@@ -39,10 +45,14 @@ class JobRunner {
39
45
  _stopped = false;
40
46
  _lastEnqueuedMinute = new Map();
41
47
 
42
- constructor(config, { db, queue, cache, files, mail, events, log } = {}) {
48
+ constructor(config, { db, queue, cache, files, mail, events, log, workerPool } = {}) {
43
49
  this._config = config;
44
50
  this._log = log;
45
- this._dispatcher = new JobDispatcher({ backoffMs: config?.backoffMs, staleTimeoutMs: config?.staleTimeoutMs });
51
+ this._dispatcher = new JobDispatcher({
52
+ backoffMs: config?.backoffMs,
53
+ staleTimeoutMs: config?.staleTimeoutMs,
54
+ workerPool,
55
+ });
46
56
  this._appContext = { db, queue, cache, files, mail, events, log };
47
57
  }
48
58
 
@@ -62,10 +72,11 @@ class JobRunner {
62
72
  const jobsDir = this._config?.dir;
63
73
  if (!jobsDir) return;
64
74
 
65
- // Discover and register user jobs
75
+ // Discover and register user jobs. `filePath` is captured so the
76
+ // dispatcher can dynamic-import the handler inside a worker thread.
66
77
  const discovered = await discoverJobs(jobsDir);
67
- for (const { definition, fileName } of discovered) {
68
- this._dispatcher.register('app', definition, this._appContext);
78
+ for (const { definition, fileName, filePath } of discovered) {
79
+ this._dispatcher.register('app', definition, this._appContext, filePath);
69
80
  this._jobs.push({
70
81
  jobName: definition.name,
71
82
  fileName,
@@ -0,0 +1,122 @@
1
+ import { parentPort, workerData } from 'node:worker_threads';
2
+ import { pathToFileURL } from 'node:url';
3
+ import { serializeError } from './worker-pool.js';
4
+ import { buildContext } from '../context.js';
5
+ import { createInfrastructure, destroyInfrastructure } from '../boot/infrastructure.js';
6
+
7
+ // Worker-side entry for the jobs WorkerPool. Each worker owns its own DB /
8
+ // redis / queue / cache / files / mail / events / log — built lazily on the
9
+ // first task and reused for every subsequent task. The pool tells the worker
10
+ // to tear those down with a `{ __shutdown: true }` control message (see
11
+ // WorkerPool.shutdown) before terminate().
12
+ if (!parentPort) {
13
+ throw new Error('worker-entry.js must be run as a worker thread');
14
+ }
15
+
16
+ const moduleCache = new Map();
17
+ let infrastructure = null;
18
+ let infrastructurePromise = null;
19
+ let shuttingDown = false;
20
+
21
+ async function ensureInfrastructure() {
22
+ if (infrastructure) return infrastructure;
23
+ if (!infrastructurePromise) {
24
+ const config = workerData?.config;
25
+ if (!config) {
26
+ throw new Error('Worker started without workerData.config — cannot build job context');
27
+ }
28
+ infrastructurePromise = createInfrastructure(config, { runMigrations: false }).then(
29
+ (services) => {
30
+ infrastructure = services;
31
+ return services;
32
+ },
33
+ );
34
+ }
35
+ return infrastructurePromise;
36
+ }
37
+
38
+ async function loadHandler(handlerPath, handlerExport) {
39
+ let mod = moduleCache.get(handlerPath);
40
+ if (!mod) {
41
+ const specifier = handlerPath.startsWith('file:')
42
+ ? handlerPath
43
+ : pathToFileURL(handlerPath).href;
44
+ mod = await import(specifier);
45
+ moduleCache.set(handlerPath, mod);
46
+ }
47
+ const exportName = handlerExport ?? 'default';
48
+ const exported = mod[exportName];
49
+ // Job files export `{ handler, ... }` as default; generic callers (and the
50
+ // Phase 1 unit tests) can also export a plain function directly.
51
+ const fn =
52
+ typeof exported === 'function'
53
+ ? exported
54
+ : typeof exported?.handler === 'function'
55
+ ? exported.handler
56
+ : null;
57
+ if (!fn) {
58
+ throw new Error(
59
+ `Worker handler export "${exportName}" in ${handlerPath} is not a function`,
60
+ );
61
+ }
62
+ return fn;
63
+ }
64
+
65
+ async function runTask(task) {
66
+ const { handlerPath, handlerExport, payload, withContext } = task ?? {};
67
+ if (!handlerPath) throw new Error('Worker task is missing handlerPath');
68
+ const fn = await loadHandler(handlerPath, handlerExport);
69
+ // `withContext: true` (the jobs path) gives the handler a real ctx built
70
+ // from the worker's own services. `withContext: false` (the Phase 1 tests
71
+ // and any future cpu-only caller) gets the raw payload — no DB spin-up.
72
+ if (withContext) {
73
+ const services = await ensureInfrastructure();
74
+ const ctx = buildContext(services, { payload });
75
+ return fn(ctx);
76
+ }
77
+ return fn(payload);
78
+ }
79
+
80
+ async function handleShutdown(id) {
81
+ shuttingDown = true;
82
+ try {
83
+ if (infrastructure) {
84
+ await destroyInfrastructure(infrastructure);
85
+ infrastructure = null;
86
+ }
87
+ parentPort.postMessage({ id, status: 'ok', result: null, __shutdown: true });
88
+ } catch (err) {
89
+ parentPort.postMessage({
90
+ id,
91
+ status: 'error',
92
+ error: serializeError(err),
93
+ __shutdown: true,
94
+ });
95
+ } finally {
96
+ // Exit cleanly so the pool's graceful-shutdown wait resolves on the
97
+ // 'exit' event rather than having to fall back to terminate().
98
+ process.exit(0);
99
+ }
100
+ }
101
+
102
+ parentPort.on('message', async (msg) => {
103
+ const { id, task } = msg ?? {};
104
+ if (task?.__shutdown) {
105
+ await handleShutdown(id);
106
+ return;
107
+ }
108
+ if (shuttingDown) {
109
+ parentPort.postMessage({
110
+ id,
111
+ status: 'error',
112
+ error: serializeError(new Error('Worker is shutting down')),
113
+ });
114
+ return;
115
+ }
116
+ try {
117
+ const result = await runTask(task);
118
+ parentPort.postMessage({ id, status: 'ok', result });
119
+ } catch (err) {
120
+ parentPort.postMessage({ id, status: 'error', error: serializeError(err) });
121
+ }
122
+ });