@aexol/spectral 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,504 @@
1
+ /**
2
+ * Relay envelope dispatcher (CLI side).
3
+ *
4
+ * Translates inbound `RelayFrame`s from the backend into calls against the
5
+ * pure REST handlers (`handlers/projects.ts`, `handlers/sessions.ts`) and
6
+ * the live `SessionStreamManager`. Pure module — no socket, no timers,
7
+ * no logging side-channels except what the caller passes in. The caller
8
+ * (`commands/serve.ts`) owns lifecycle and the `RelayClient`.
9
+ *
10
+ * Two entry points, one per inbound frame kind we route:
11
+ *
12
+ * - `handleRestRequest(frame, deps)` — synchronous-style request/response.
13
+ * Resolves to a `RestResponseFrame` the caller sends back via
14
+ * `relay.send(...)`. Errors thrown by handlers are mapped to status
15
+ * codes (400/404/500) and returned in the body, never re-thrown — the
16
+ * backend has a 30 s pending-request timeout and we want every request
17
+ * to terminate with a wire response so the browser doesn't sit on a
18
+ * stale spinner.
19
+ *
20
+ * - `handleClientMessage(frame, deps)` — fire-and-forget. Attaches a
21
+ * `Subscriber` (the relay socket, wrapped) to the session if not
22
+ * already attached, then calls `manager.prompt()`. Outbound
23
+ * `ServerEvent`s flow back through the subscriber as `WsEventFrame`s
24
+ * via `deps.relay.send()`.
25
+ *
26
+ * Subscriber bookkeeping lives in `serve.ts`, NOT here. The dispatcher
27
+ * is given the subscriber by the caller so it stays pure-ish (depends
28
+ * only on `manager` + `relay.send`). This also keeps the lifecycle for
29
+ * "detach all on relay disconnect" in one place.
30
+ *
31
+ * Path matching is a 30-line inline switch; we deliberately do not pull
32
+ * in `path-to-regexp` for five literal patterns. The matcher returns
33
+ * the parsed `id` param when applicable.
34
+ *
35
+ * Close-code policy (mirroring backend `router.ts`):
36
+ * - The dispatcher itself never closes the relay; that's serve.ts.
37
+ * - Backend cancels pending REST requests with `code:"timeout"` (30 s)
38
+ * or `code:"machine_offline"` (CLI socket dropped). Both are
39
+ * transparent to this layer — we just respond when we can.
40
+ */
41
+ import { BadRequestError, NotFoundError } from "../server/handlers/errors.js";
42
+ import { handleCreateProject, handleDeleteProject, handleListProjects, handleListSessionsByProject, handleUpdateProject, } from "../server/handlers/projects.js";
43
+ import { handleCreateSession, handleDeleteSession, handleGetSessionDetail, handleUpdateSession, } from "../server/handlers/sessions.js";
44
+ import { shutdownState } from "../server/shutdown.js";
45
+ /**
46
+ * Inline path matcher. Returns `null` for any path/method combination we
47
+ * don't recognise; the caller turns that into a `404 Unknown route`.
48
+ *
49
+ * Intentionally literal — a regex table would be marginally fancier but
50
+ * also marginally slower and harder to read for ~9 routes.
51
+ */
52
+ export function matchRoute(method, path) {
53
+ // Strip query string if any (we don't use any, but be defensive).
54
+ const qIdx = path.indexOf("?");
55
+ const cleanPath = qIdx === -1 ? path : path.slice(0, qIdx);
56
+ // /api/projects
57
+ if (cleanPath === "/api/projects") {
58
+ if (method === "GET")
59
+ return { route: "list_projects" };
60
+ if (method === "POST")
61
+ return { route: "create_project" };
62
+ return null;
63
+ }
64
+ // /api/sessions
65
+ if (cleanPath === "/api/sessions") {
66
+ if (method === "POST")
67
+ return { route: "create_session" };
68
+ return null;
69
+ }
70
+ // /api/projects/:id and /api/projects/:id/sessions
71
+ const projectMatch = /^\/api\/projects\/([^/]+)(\/sessions)?$/.exec(cleanPath);
72
+ if (projectMatch) {
73
+ const id = decodeURIComponent(projectMatch[1]);
74
+ const isSessions = projectMatch[2] === "/sessions";
75
+ if (isSessions) {
76
+ if (method === "GET")
77
+ return { route: "list_project_sessions", id };
78
+ return null;
79
+ }
80
+ if (method === "PATCH")
81
+ return { route: "update_project", id };
82
+ if (method === "DELETE")
83
+ return { route: "delete_project", id };
84
+ return null;
85
+ }
86
+ // /api/sessions/:id
87
+ const sessionMatch = /^\/api\/sessions\/([^/]+)$/.exec(cleanPath);
88
+ if (sessionMatch) {
89
+ const id = decodeURIComponent(sessionMatch[1]);
90
+ if (method === "GET")
91
+ return { route: "get_session", id };
92
+ if (method === "PATCH")
93
+ return { route: "update_session", id };
94
+ if (method === "DELETE")
95
+ return { route: "delete_session", id };
96
+ return null;
97
+ }
98
+ return null;
99
+ }
100
+ /**
101
+ * Dispatch a `rest_request` frame. Always resolves with a
102
+ * `RestResponseFrame` — handler exceptions are caught and translated.
103
+ *
104
+ * Status mapping:
105
+ * - 200: handler returned successfully (body is the handler's return value
106
+ * or `{ ok: true }` for void returns)
107
+ * - 400: `BadRequestError` thrown by handler, OR malformed body for a
108
+ * route that requires one, OR unknown route
109
+ * - 404: `NotFoundError` thrown by handler
110
+ * - 405: route matches but method doesn't (returned as 404 since we don't
111
+ * distinguish — the matcher treats it as "no route", consistent
112
+ * with the original Hono router which also 404'd)
113
+ * - 500: anything else; `error` field carries a sanitized message
114
+ */
115
+ export function handleRestRequest(frame, deps) {
116
+ const { reqId, method, path, body } = frame;
117
+ const logger = deps.logger ?? console;
118
+ const match = matchRoute(method, path);
119
+ if (!match) {
120
+ return {
121
+ kind: "rest_response",
122
+ reqId,
123
+ status: 404,
124
+ body: { error: `Unknown route: ${method} ${path}` },
125
+ };
126
+ }
127
+ try {
128
+ const result = dispatchRoute(match, body, deps);
129
+ return {
130
+ kind: "rest_response",
131
+ reqId,
132
+ status: 200,
133
+ body: result,
134
+ };
135
+ }
136
+ catch (err) {
137
+ if (err instanceof BadRequestError) {
138
+ return {
139
+ kind: "rest_response",
140
+ reqId,
141
+ status: 400,
142
+ body: { error: err.message, code: err.code },
143
+ };
144
+ }
145
+ if (err instanceof NotFoundError) {
146
+ return {
147
+ kind: "rest_response",
148
+ reqId,
149
+ status: 404,
150
+ body: { error: err.message, code: err.code },
151
+ };
152
+ }
153
+ // Unexpected — log locally so operators can correlate, but only
154
+ // return a sanitized message to the caller.
155
+ logger.error?.(`[dispatcher] unexpected error in ${method} ${path}:`, err);
156
+ return {
157
+ kind: "rest_response",
158
+ reqId,
159
+ status: 500,
160
+ body: { error: "Internal error" },
161
+ };
162
+ }
163
+ }
164
+ /**
165
+ * Inner switch: route → handler call. Separated so `handleRestRequest`
166
+ * owns ONE try/catch boundary for all error mapping.
167
+ *
168
+ * Meta publish is fired AFTER the handler returns successfully (i.e.
169
+ * after SQLite has committed) so subscribers never see a "create" event
170
+ * for a row the next read won't find. Publish failures are swallowed
171
+ * inside `safePublish` — the wire response goes back regardless.
172
+ *
173
+ * The body is `unknown` from the wire; each handler validates its own
174
+ * shape via the `BadRequestError`-throwing checks they already do.
175
+ */
176
+ function dispatchRoute(match, body, deps) {
177
+ const { store, manager, publishMetaEvent, logger } = deps;
178
+ const id = match.id ?? "";
179
+ switch (match.route) {
180
+ case "list_projects":
181
+ return handleListProjects(store);
182
+ case "create_project": {
183
+ const project = handleCreateProject(store, asObject(body));
184
+ safePublish(publishMetaEvent, logger, {
185
+ type: "project_created",
186
+ projectId: project.id,
187
+ });
188
+ return project;
189
+ }
190
+ case "update_project": {
191
+ const project = handleUpdateProject(store, id, asObject(body));
192
+ safePublish(publishMetaEvent, logger, {
193
+ type: "project_renamed",
194
+ projectId: project.id,
195
+ });
196
+ return project;
197
+ }
198
+ case "delete_project": {
199
+ // Order matters: tear down live streams BEFORE the row is gone so
200
+ // the bridges don't try to write to a deleted FK target. Mirrors
201
+ // the ordering the deleted Hono route used.
202
+ const project = store.getProject(id);
203
+ if (project) {
204
+ const sessions = store.listSessionsByProject(id);
205
+ manager.disposeProjectStreams(sessions.map((s) => s.id));
206
+ }
207
+ const result = handleDeleteProject(store, id);
208
+ // Publish AFTER the cascade has committed. We use `id` (the URL
209
+ // param) as the projectId — `handleDeleteProject` would have
210
+ // thrown 404 above if it didn't exist.
211
+ safePublish(publishMetaEvent, logger, {
212
+ type: "project_deleted",
213
+ projectId: id,
214
+ });
215
+ return result;
216
+ }
217
+ case "list_project_sessions":
218
+ return handleListSessionsByProject(store, id);
219
+ case "create_session": {
220
+ const session = handleCreateSession(store, asObject(body));
221
+ safePublish(publishMetaEvent, logger, {
222
+ type: "session_created",
223
+ projectId: session.projectId,
224
+ sessionId: session.id,
225
+ });
226
+ return session;
227
+ }
228
+ case "get_session":
229
+ return handleGetSessionDetail(store, id);
230
+ case "update_session": {
231
+ const session = handleUpdateSession(store, id, asObject(body));
232
+ safePublish(publishMetaEvent, logger, {
233
+ type: "session_renamed",
234
+ projectId: session.projectId,
235
+ sessionId: session.id,
236
+ });
237
+ return session;
238
+ }
239
+ case "delete_session": {
240
+ // Capture projectId BEFORE the row is gone — otherwise the
241
+ // post-delete publish has no project to attribute to. Treat a
242
+ // missing session here the same way `handleDeleteSession` does
243
+ // (throws NotFoundError → 404), but capture first.
244
+ const detail = store.getSession(id);
245
+ // Same ordering rationale as delete_project.
246
+ manager.disposeSessionStream(id);
247
+ handleDeleteSession(store, id);
248
+ if (detail) {
249
+ safePublish(publishMetaEvent, logger, {
250
+ type: "session_deleted",
251
+ projectId: detail.projectId,
252
+ sessionId: id,
253
+ });
254
+ }
255
+ return { ok: true };
256
+ }
257
+ }
258
+ }
259
+ /**
260
+ * Invoke `publishMetaEvent` if provided, swallowing any throw. The
261
+ * caller has already returned a 200 to the browser; a publish failure
262
+ * (e.g. relay socket just dropped) must NEVER turn a successful
263
+ * mutation into a wire-level error.
264
+ */
265
+ function safePublish(publish, logger, event) {
266
+ if (!publish)
267
+ return;
268
+ try {
269
+ publish(event);
270
+ }
271
+ catch (err) {
272
+ (logger ?? console).error?.(`[dispatcher] publishMetaEvent failed for ${event.type}:`, err);
273
+ }
274
+ }
275
+ /**
276
+ * Coerce an `unknown` body to a plain object for the handler. Returns an
277
+ * empty object when the body is missing/non-object so the handler's own
278
+ * field checks produce the canonical error message ("name (string) is
279
+ * required") instead of a confusing "body must be object" from us.
280
+ */
281
+ function asObject(body) {
282
+ if (body !== null && typeof body === "object" && !Array.isArray(body)) {
283
+ return body;
284
+ }
285
+ return {};
286
+ }
287
+ /**
288
+ * Build a `Subscriber` that wraps each `ServerEvent` in a `WsEventFrame`
289
+ * and pushes it through the relay. `isOpen()` defers to the relay's
290
+ * connection state — we treat queue-while-disconnected as "still open"
291
+ * since `RelayClient.send()` will buffer and flush on reconnect.
292
+ */
293
+ function makeRelaySubscriber(sessionId, relay) {
294
+ return {
295
+ send(event) {
296
+ relay.send({
297
+ kind: "ws_event",
298
+ sessionId,
299
+ event,
300
+ });
301
+ },
302
+ isOpen() {
303
+ // The relay client buffers while reconnecting; from the manager's
304
+ // POV the subscriber is always reachable until serve.ts detaches
305
+ // it. Returning `true` here means the manager won't garbage-collect
306
+ // the subscriber on transient socket blips.
307
+ return true;
308
+ },
309
+ };
310
+ }
311
+ /**
312
+ * Dispatch a `client_message` frame. Idempotent w.r.t. attach: the same
313
+ * `Subscriber` is reused across messages for a given session.
314
+ *
315
+ * Errors:
316
+ * - Unknown sessionId → emits a `ws_event` carrying an `error`-typed
317
+ * `ServerEvent` so the browser sees it on the established stream
318
+ * (mirrors how the old WS route surfaced `manager.attach` failures).
319
+ * - Unknown message shape → silently ignored with a logger warning;
320
+ * the wire contract is `{type:"user_message", content:string}` and
321
+ * anything else is a protocol violation we don't escalate.
322
+ * - `manager.prompt()` rejection → logged; the manager itself broadcasts
323
+ * an `error` event to subscribers, so we don't double-report.
324
+ */
325
+ export function handleClientMessage(frame, deps) {
326
+ const { sessionId, message, modelId } = frame;
327
+ const { manager, relay, subscribers } = deps;
328
+ const logger = deps.logger ?? console;
329
+ // 0. Shutdown gate. Once `gracefulShutdown` flips the flag we refuse
330
+ // new turns immediately — even if the relay socket is still open
331
+ // and even if the session already has a subscriber attached. The
332
+ // error surfaces on the same `ws_event` channel the browser is
333
+ // already listening on, so the UI sees it on the established
334
+ // stream and can show a "server shutting down" message instead of
335
+ // sitting on a perpetual spinner.
336
+ //
337
+ // We do NOT detach the subscriber here — pre-existing in-flight
338
+ // turns still need to flush their final events before the grace
339
+ // window closes.
340
+ if (shutdownState.isShuttingDown) {
341
+ relay.send({
342
+ kind: "ws_event",
343
+ sessionId,
344
+ event: {
345
+ type: "error",
346
+ message: "Server is shutting down; please retry shortly.",
347
+ },
348
+ });
349
+ return;
350
+ }
351
+ // 1. Validate inner message shape. Only `user_message` is supported
352
+ // today; reject anything else loudly-but-locally.
353
+ if (message === null ||
354
+ typeof message !== "object" ||
355
+ message.type !== "user_message" ||
356
+ typeof message.content !== "string") {
357
+ logger.error?.(`[dispatcher] ignoring malformed client_message for ${sessionId}`);
358
+ return;
359
+ }
360
+ const content = message.content;
361
+ const isAexol = message.aexol === true;
362
+ // Set autonomous refactor loop state before firing the prompt.
363
+ // aexol:true → start/renew loop; aexol:false → stop loop.
364
+ manager.setAexolActive(sessionId, isAexol);
365
+ // 2. Attach (idempotent). On first attach we capture the replay payload
366
+ // and synthesize a `session_ready` ws_event so the browser sees the
367
+ // same first frame it would have on a direct WS connection.
368
+ let subscriber = subscribers.get(sessionId);
369
+ if (!subscriber) {
370
+ subscriber = makeRelaySubscriber(sessionId, relay);
371
+ let attachResult;
372
+ try {
373
+ attachResult = manager.attach(sessionId, subscriber);
374
+ }
375
+ catch (err) {
376
+ // Unknown session — surface as a ws_event so the browser's stream
377
+ // handler sees it. Don't keep the subscriber around.
378
+ const msg = err instanceof Error ? err.message : String(err);
379
+ relay.send({
380
+ kind: "ws_event",
381
+ sessionId,
382
+ event: { type: "error", message: msg },
383
+ });
384
+ return;
385
+ }
386
+ subscribers.set(sessionId, subscriber);
387
+ // Synthesize the initial frame the WS route used to send. The
388
+ // browser's protocol layer expects `session_ready` as the first
389
+ // event on a new stream.
390
+ relay.send({
391
+ kind: "ws_event",
392
+ sessionId,
393
+ event: {
394
+ type: "session_ready",
395
+ sessionId,
396
+ history: attachResult.history,
397
+ currentTurn: attachResult.currentTurn,
398
+ },
399
+ });
400
+ // Surface bridge-start failures as `error` events; otherwise the
401
+ // browser would sit on a `session_ready` with no further frames.
402
+ attachResult.ready.catch((err) => {
403
+ const msg = err instanceof Error ? err.message : String(err);
404
+ relay.send({
405
+ kind: "ws_event",
406
+ sessionId,
407
+ event: { type: "error", message: msg },
408
+ });
409
+ });
410
+ }
411
+ // 3. Fire the prompt. `prompt()` resolves on `agent_end`; errors are
412
+ // converted to `error` events by the manager itself, so we just log.
413
+ // `modelId` (Phase 3 — Available Models whitelist) is forwarded as
414
+ // sticky next-prompt selection. The backend has already validated it
415
+ // against the team-scoped whitelist; the CLI resolves it via pi's
416
+ // own model registry inside the manager → bridge.
417
+ //
418
+ // When `aexol: true` is set on the message, route to the refactor-loop
419
+ // user model instead of the default session model.
420
+ const effectiveModelId = isAexol ? "__aexol_refactor_loop__" : modelId;
421
+ manager.prompt(sessionId, content, effectiveModelId).catch((err) => {
422
+ logger.error?.(`[dispatcher] manager.prompt failed for ${sessionId}:`, err);
423
+ });
424
+ }
425
+ /**
426
+ * Dispatch a `subscribe` frame from the backend. Handles the case where a
427
+ * browser enters an old session — we load history from SQLite immediately
428
+ * via `manager.attach()` and synthesize `session_ready` so the browser sees
429
+ * the full chat history without needing to send a first `client_message`.
430
+ *
431
+ * Idempotent: if a subscriber already exists for this session, we re-send
432
+ * `session_ready` (history may have changed, and the newly-joined browser
433
+ * tab needs it). The `ready.catch` handler is only registered once — on the
434
+ * first subscriber creation — to avoid duplicate error events.
435
+ */
436
+ export function handleSubscribe(frame, deps) {
437
+ const { sessionId } = frame;
438
+ const { manager, relay, subscribers } = deps;
439
+ const logger = deps.logger ?? console;
440
+ let subscriber = subscribers.get(sessionId);
441
+ let isNewSubscriber = false;
442
+ if (!subscriber) {
443
+ subscriber = makeRelaySubscriber(sessionId, relay);
444
+ subscribers.set(sessionId, subscriber);
445
+ isNewSubscriber = true;
446
+ }
447
+ let attachResult;
448
+ try {
449
+ attachResult = manager.attach(sessionId, subscriber);
450
+ }
451
+ catch (err) {
452
+ const msg = err instanceof Error ? err.message : String(err);
453
+ relay.send({
454
+ kind: "ws_event",
455
+ sessionId,
456
+ event: { type: "error", message: msg },
457
+ });
458
+ return;
459
+ }
460
+ // Send history to all browser subscribers. The backend's fan-out
461
+ // delivers this to the newly-subscribed tab (and any others).
462
+ relay.send({
463
+ kind: "ws_event",
464
+ sessionId,
465
+ event: {
466
+ type: "session_ready",
467
+ sessionId,
468
+ history: attachResult.history,
469
+ currentTurn: attachResult.currentTurn,
470
+ },
471
+ });
472
+ if (isNewSubscriber) {
473
+ // Surface bridge-start failures as `error` events; otherwise the
474
+ // browser would sit on a `session_ready` with no further frames.
475
+ attachResult.ready.catch((err) => {
476
+ const msg = err instanceof Error ? err.message : String(err);
477
+ relay.send({
478
+ kind: "ws_event",
479
+ sessionId,
480
+ event: { type: "error", message: msg },
481
+ });
482
+ });
483
+ }
484
+ }
485
+ /**
486
+ * Detach every subscriber the dispatcher has attached. Called by
487
+ * `serve.ts` on relay disconnect / shutdown so the underlying pi
488
+ * processes don't keep an unreachable subscriber pinned.
489
+ *
490
+ * NOTE: this does NOT dispose the streams themselves — pi keeps running
491
+ * so a future browser reconnect can resume mid-turn. Use
492
+ * `manager.dispose()` at full shutdown.
493
+ */
494
+ export function detachAllSubscribers(manager, subscribers) {
495
+ for (const [sessionId, sub] of subscribers) {
496
+ try {
497
+ manager.detach(sessionId, sub);
498
+ }
499
+ catch {
500
+ // ignore — best-effort cleanup
501
+ }
502
+ }
503
+ subscribers.clear();
504
+ }
@@ -0,0 +1,116 @@
1
+ /**
2
+ * Persistent record of a registered machine.
3
+ *
4
+ * Lives at `<configDir>/machine.json` (mode 0600). Created the first time
5
+ * `spectral serve` runs after login; reused across restarts so a single
6
+ * developer machine appears as one logical agent in the backend's machine
7
+ * list, regardless of how many times the CLI process restarts.
8
+ *
9
+ * Why a separate file from `config.json`?
10
+ * - `config.json` carries the team API key (rotated on logout). The
11
+ * machine identity outlives logout/login: if you `logout` then
12
+ * `login` with the same team, your machine should keep its `machineId`
13
+ * so the backend's audit log stays continuous.
14
+ * - The machine JWT is short-lived (decoded `exp` checked by
15
+ * `registration.ts`). Splitting it out keeps the auth file minimal and
16
+ * avoids any risk of accidentally serializing the JWT into a
17
+ * `spectral login` log line.
18
+ *
19
+ * Schema is intentionally minimal — Batch 3 will add capability flags;
20
+ * unknown fields are preserved on round-trip via `extra` so older CLIs
21
+ * don't strip data the backend later cares about.
22
+ */
23
+ import { mkdirSync } from "node:fs";
24
+ import { readFile, writeFile } from "node:fs/promises";
25
+ import { dirname, join } from "node:path";
26
+ import { getConfigDir } from "../config.js";
27
+ const KNOWN_KEYS = new Set([
28
+ "machineId",
29
+ "machineName",
30
+ "machineJwt",
31
+ "teamId",
32
+ "registeredAt",
33
+ "hostname",
34
+ "version",
35
+ ]);
36
+ export function getMachineFile() {
37
+ return join(getConfigDir(), "machine.json");
38
+ }
39
+ /**
40
+ * Read the persisted machine record. Returns `null` when the file is
41
+ * missing or malformed — callers treat both as "needs registration".
42
+ *
43
+ * We intentionally do NOT throw on parse errors: a corrupted machine.json
44
+ * should self-heal on the next `spectral serve` (re-register, overwrite),
45
+ * not block startup forever.
46
+ */
47
+ export async function loadMachine() {
48
+ let raw;
49
+ try {
50
+ raw = await readFile(getMachineFile(), "utf8");
51
+ }
52
+ catch {
53
+ return null;
54
+ }
55
+ let parsed;
56
+ try {
57
+ parsed = JSON.parse(raw);
58
+ }
59
+ catch {
60
+ return null;
61
+ }
62
+ if (typeof parsed.machineId !== "string" ||
63
+ typeof parsed.machineName !== "string" ||
64
+ typeof parsed.machineJwt !== "string" ||
65
+ typeof parsed.registeredAt !== "number" ||
66
+ typeof parsed.hostname !== "string" ||
67
+ typeof parsed.version !== "string") {
68
+ return null;
69
+ }
70
+ const extra = {};
71
+ for (const [k, v] of Object.entries(parsed)) {
72
+ if (!KNOWN_KEYS.has(k))
73
+ extra[k] = v;
74
+ }
75
+ return {
76
+ machineId: parsed.machineId,
77
+ machineName: parsed.machineName,
78
+ machineJwt: parsed.machineJwt,
79
+ teamId: typeof parsed.teamId === "string" ? parsed.teamId : undefined,
80
+ registeredAt: parsed.registeredAt,
81
+ hostname: parsed.hostname,
82
+ version: parsed.version,
83
+ extra: Object.keys(extra).length > 0 ? extra : undefined,
84
+ };
85
+ }
86
+ /**
87
+ * Write the machine record with restrictive permissions. Creates the
88
+ * config directory if missing (matches `writeConfig` in `config.ts`).
89
+ *
90
+ * Atomicity: we do NOT write-then-rename. The record is small, the file
91
+ * is single-writer (one `spectral serve` at a time per `$SPECTRAL_CONFIG_DIR`),
92
+ * and a torn write self-heals via `loadMachine` returning null + re-register.
93
+ */
94
+ export async function saveMachine(rec) {
95
+ const file = getMachineFile();
96
+ mkdirSync(dirname(file), { recursive: true, mode: 0o700 });
97
+ // Reassemble: known fields in stable order, then any forward-compat keys.
98
+ const toWrite = {
99
+ machineId: rec.machineId,
100
+ machineName: rec.machineName,
101
+ machineJwt: rec.machineJwt,
102
+ registeredAt: rec.registeredAt,
103
+ hostname: rec.hostname,
104
+ version: rec.version,
105
+ };
106
+ if (rec.teamId !== undefined)
107
+ toWrite.teamId = rec.teamId;
108
+ if (rec.extra) {
109
+ for (const [k, v] of Object.entries(rec.extra))
110
+ toWrite[k] = v;
111
+ }
112
+ await writeFile(file, JSON.stringify(toWrite, null, 2) + "\n", {
113
+ mode: 0o600,
114
+ encoding: "utf8",
115
+ });
116
+ }