@rubytech/create-realagent-code 0.1.21 → 0.1.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/dist/__tests__/samba-provision.test.js +202 -0
  2. package/dist/index.js +127 -73
  3. package/dist/samba-provision.js +215 -0
  4. package/dist/uninstall.js +160 -3
  5. package/package.json +1 -1
  6. package/payload/platform/plugins/docs/references/deployment.md +20 -0
  7. package/payload/platform/plugins/email/references/email-reference.md +4 -4
  8. package/payload/platform/plugins/scheduling/PLUGIN.md +1 -1
  9. package/payload/platform/plugins/workflows/PLUGIN.md +2 -2
  10. package/payload/platform/plugins/workflows/skills/workflow-manager/SKILL.md +1 -1
  11. package/payload/platform/templates/agents/admin/IDENTITY.md +12 -18
  12. package/payload/platform/templates/specialists/agents/personal-assistant.md +1 -1
  13. package/payload/platform/templates/specialists/agents/project-manager.md +1 -1
  14. package/payload/server/public/assets/{Checkbox-B79fVxpA.js → Checkbox-D1OQD43b.js} +1 -1
  15. package/payload/server/public/assets/admin-czNBxWor.js +216 -0
  16. package/payload/server/public/assets/{architectureDiagram-Q4EWVU46-D8e59YJ0.js → architectureDiagram-Q4EWVU46-BcwgT80u.js} +1 -1
  17. package/payload/server/public/assets/{blockDiagram-DXYQGD6D-CxaDkc0A.js → blockDiagram-DXYQGD6D-BMSyZUQA.js} +1 -1
  18. package/payload/server/public/assets/{brand-Cg9t5U6J.css → brand-2cku8WFs.css} +1 -1
  19. package/payload/server/public/assets/{brand-jT16ErmC.js → brand-CSQuxS9w.js} +1 -1
  20. package/payload/server/public/assets/{c4Diagram-AHTNJAMY-D0PAvq-q.js → c4Diagram-AHTNJAMY-DPRGY1jJ.js} +1 -1
  21. package/payload/server/public/assets/channel-fxEghWew.js +1 -0
  22. package/payload/server/public/assets/{chunk-336JU56O-B-CXn-Et.js → chunk-336JU56O-B7oQ3g1c.js} +2 -2
  23. package/payload/server/public/assets/{chunk-426QAEUC-BLzCQHKA.js → chunk-426QAEUC-C1P0yFXw.js} +1 -1
  24. package/payload/server/public/assets/{chunk-4TB4RGXK-Bql1UwLT.js → chunk-4TB4RGXK-LI7kOJd0.js} +1 -1
  25. package/payload/server/public/assets/{chunk-5FUZZQ4R-CQK7jBtX.js → chunk-5FUZZQ4R-CXQRGTQE.js} +1 -1
  26. package/payload/server/public/assets/{chunk-5PVQY5BW-AJc1-lvX.js → chunk-5PVQY5BW-NSyzpXRy.js} +1 -1
  27. package/payload/server/public/assets/{chunk-EDXVE4YY-Cf3E3THL.js → chunk-EDXVE4YY-voNwxbDs.js} +1 -1
  28. package/payload/server/public/assets/{chunk-ENJZ2VHE-BNx6z6hJ.js → chunk-ENJZ2VHE-CMEMPzYY.js} +1 -1
  29. package/payload/server/public/assets/{chunk-ICPOFSXX-DBUEFs2-.js → chunk-ICPOFSXX-hEbwu-pe.js} +1 -1
  30. package/payload/server/public/assets/{chunk-OYMX7WX6-Csx2p315.js → chunk-OYMX7WX6-DxskDrLs.js} +1 -1
  31. package/payload/server/public/assets/{chunk-U2HBQHQK-x17h7UYW.js → chunk-U2HBQHQK-D7TKgUo0.js} +1 -1
  32. package/payload/server/public/assets/{chunk-X2U36JSP--Lkl5yjV.js → chunk-X2U36JSP-BvPUQEPm.js} +1 -1
  33. package/payload/server/public/assets/{chunk-YZCP3GAM-C4GsNX8A.js → chunk-YZCP3GAM-BY-RWQUW.js} +1 -1
  34. package/payload/server/public/assets/{chunk-ZZ45TVLE-YrhUPmZc.js → chunk-ZZ45TVLE-DZvOYDY6.js} +1 -1
  35. package/payload/server/public/assets/classDiagram-6PBFFD2Q-BsWzGW0N.js +1 -0
  36. package/payload/server/public/assets/classDiagram-v2-HSJHXN6E-BGVa3h90.js +1 -0
  37. package/payload/server/public/assets/clone-Khvocke2.js +1 -0
  38. package/payload/server/public/assets/{dagre-YVALPG-M.js → dagre-Bt-fpckL.js} +1 -1
  39. package/payload/server/public/assets/{dagre-KV5264BT-D6JU6DW_.js → dagre-KV5264BT-Cnj0mUZl.js} +1 -1
  40. package/payload/server/public/assets/data-DBd-Buhp.js +1 -0
  41. package/payload/server/public/assets/device-url-actions-Bjz3Xzbm.js +33 -0
  42. package/payload/server/public/assets/{diagram-5BDNPKRD-yeO06N5Q.js → diagram-5BDNPKRD-DjLzvOlx.js} +1 -1
  43. package/payload/server/public/assets/{diagram-G4DWMVQ6-DzbVT_BC.js → diagram-G4DWMVQ6-DTfuRd-T.js} +1 -1
  44. package/payload/server/public/assets/{diagram-MMDJMWI5-DwYO5VZF.js → diagram-MMDJMWI5-BaL2mCnx.js} +1 -1
  45. package/payload/server/public/assets/{diagram-TYMM5635-BLUcdkDS.js → diagram-TYMM5635-C5InWY5R.js} +1 -1
  46. package/payload/server/public/assets/{erDiagram-SMLLAGMA-BiEUB19e.js → erDiagram-SMLLAGMA-DO7BXTpn.js} +1 -1
  47. package/payload/server/public/assets/{flowDiagram-DWJPFMVM-TILIKxOp.js → flowDiagram-DWJPFMVM-DDdAKfLf.js} +1 -1
  48. package/payload/server/public/assets/{ganttDiagram-T4ZO3ILL-B7cGzYqT.js → ganttDiagram-T4ZO3ILL-arJD8Utm.js} +1 -1
  49. package/payload/server/public/assets/{gitGraphDiagram-UUTBAWPF-DFOxN5bc.js → gitGraphDiagram-UUTBAWPF-C55GH-OS.js} +1 -1
  50. package/payload/server/public/assets/graph-DUtVdnZ6.js +1 -0
  51. package/payload/server/public/assets/graph-labels-Dxfue-fP.js +1 -0
  52. package/payload/server/public/assets/{graphlib-BBibixaA.js → graphlib-DL9PM7Ex.js} +1 -1
  53. package/payload/server/public/assets/{infoDiagram-42DDH7IO-nH2azhY8.js → infoDiagram-42DDH7IO-BMSGqUbG.js} +1 -1
  54. package/payload/server/public/assets/{ishikawaDiagram-UXIWVN3A-WD3tfqFi.js → ishikawaDiagram-UXIWVN3A-Dw6BZ6BG.js} +1 -1
  55. package/payload/server/public/assets/{journeyDiagram-VCZTEJTY-LUkaVSqw.js → journeyDiagram-VCZTEJTY-DrywUGXw.js} +1 -1
  56. package/payload/server/public/assets/{kanban-definition-6JOO6SKY-Dk-lYgpJ.js → kanban-definition-6JOO6SKY-DuwtVBBc.js} +1 -1
  57. package/payload/server/public/assets/{line-BDv6CEnp.js → line-JAksyKHj.js} +1 -1
  58. package/payload/server/public/assets/{mermaid-parser.core-D2XsSGgp.js → mermaid-parser.core-BMq-ApBW.js} +1 -1
  59. package/payload/server/public/assets/{mermaid.core-FyN-UmQV.js → mermaid.core-tH4oX0Kh.js} +3 -3
  60. package/payload/server/public/assets/{mindmap-definition-QFDTVHPH-BRAHEUIS.js → mindmap-definition-QFDTVHPH-D1OiiJga.js} +1 -1
  61. package/payload/server/public/assets/page-BZpoS7iR.js +1 -0
  62. package/payload/server/public/assets/{page-CZQd-W3C.js → page-CkvBvezS.js} +2 -2
  63. package/payload/server/public/assets/{pieDiagram-DEJITSTG-BqibVC2X.js → pieDiagram-DEJITSTG-Ckwm69PW.js} +1 -1
  64. package/payload/server/public/assets/{public-BDUZIabs.js → public-C-dTMgXu.js} +5 -5
  65. package/payload/server/public/assets/{quadrantDiagram-34T5L4WZ-DNuExGnr.js → quadrantDiagram-34T5L4WZ-COw3yZ1j.js} +1 -1
  66. package/payload/server/public/assets/{requirementDiagram-MS252O5E-5JXTdydh.js → requirementDiagram-MS252O5E-DqGzM4K-.js} +1 -1
  67. package/payload/server/public/assets/{sankeyDiagram-XADWPNL6-B_8rhvcR.js → sankeyDiagram-XADWPNL6-D-l1c_Pl.js} +1 -1
  68. package/payload/server/public/assets/{sequenceDiagram-FGHM5R23-BznkBgjf.js → sequenceDiagram-FGHM5R23-BeIi0DtJ.js} +1 -1
  69. package/payload/server/public/assets/{stateDiagram-FHFEXIEX-BeAZOQfs.js → stateDiagram-FHFEXIEX-C-jgegLk.js} +1 -1
  70. package/payload/server/public/assets/stateDiagram-v2-QKLJ7IA2-BaMs8Znv.js +1 -0
  71. package/payload/server/public/assets/{timeline-definition-GMOUNBTQ-CpJAs-Vw.js → timeline-definition-GMOUNBTQ-BGFKkYmi.js} +1 -1
  72. package/payload/server/public/assets/{vennDiagram-DHZGUBPP-BzH3ItkG.js → vennDiagram-DHZGUBPP-5NuIhJLS.js} +1 -1
  73. package/payload/server/public/assets/{wardleyDiagram-NUSXRM2D-ax9AgwA1.js → wardleyDiagram-NUSXRM2D-Be9ytVut.js} +1 -1
  74. package/payload/server/public/assets/{xychartDiagram-5P7HB3ND-CV6vt_tW.js → xychartDiagram-5P7HB3ND-DCyHg41R.js} +1 -1
  75. package/payload/server/public/data.html +5 -5
  76. package/payload/server/public/graph.html +6 -6
  77. package/payload/server/public/index.html +8 -8
  78. package/payload/server/public/public.html +5 -5
  79. package/payload/server/server.js +135 -85
  80. package/payload/server/public/assets/admin-DgB_IeWB.js +0 -216
  81. package/payload/server/public/assets/channel-BU_eIdRB.js +0 -1
  82. package/payload/server/public/assets/classDiagram-6PBFFD2Q-DMpM1d2b.js +0 -1
  83. package/payload/server/public/assets/classDiagram-v2-HSJHXN6E-D_XbuPVj.js +0 -1
  84. package/payload/server/public/assets/clone-BBT00JUO.js +0 -1
  85. package/payload/server/public/assets/data-BdwO_kv-.js +0 -1
  86. package/payload/server/public/assets/device-url-actions-C8dD0ydz.js +0 -33
  87. package/payload/server/public/assets/graph-CfZJrc9u.js +0 -1
  88. package/payload/server/public/assets/graph-labels-DJ717p00.js +0 -1
  89. package/payload/server/public/assets/page-BWHYktEF.js +0 -1
  90. package/payload/server/public/assets/stateDiagram-v2-QKLJ7IA2-iVlXKz7S.js +0 -1
@@ -0,0 +1,215 @@
1
+ // Task 034 — Samba provisioning for the brand Pi filesystem.
2
+ //
3
+ // Same shape as apt-resolve.ts: pure decision functions in this file, no I/O;
4
+ // the installer wraps them with the side-effecting spawnSync + log lines. The
5
+ // pure layer is fully unit-tested; the wrapper is exercised end-to-end on a
6
+ // real Pi during install/uninstall verification.
7
+ //
8
+ // What this module owns:
9
+ // 1. Pick the LAN interface to bind smbd to (loopback excluded).
10
+ // 2. Render the brand-scoped Samba stanza per the spec in the task brief.
11
+ // 3. Merge that stanza into an existing /etc/samba/smb.conf idempotently —
12
+ // replace if a stanza for this brand already exists, otherwise append.
13
+ // 4. Remove this brand's stanza on uninstall while leaving every peer brand's
14
+ // stanza intact (peer-brand isolation is the structural invariant).
15
+ // 5. Report whether any brand stanza remains so the uninstall step can
16
+ // decide whether to apt-purge samba.
17
+ // ---------------------------------------------------------------------------
18
+ // Brand-scoped Samba stanza
19
+ // ---------------------------------------------------------------------------
20
+ /**
21
+ * Render the `[<brand>]` share stanza. Exact directives are the task's spec
22
+ * verbatim: share rooted at `sharePath`, writable by `admin` only, force-uid
23
+ * to admin so files created via SMB are owned by the same uid as files
24
+ * created over SSH, and standard create/dir masks for an ext4 home directory.
25
+ */
26
+ export function renderBrandStanza(input) {
27
+ return [
28
+ `[${input.brand}]`,
29
+ ` path = ${input.sharePath}`,
30
+ ` read only = no`,
31
+ ` valid users = admin`,
32
+ ` force user = admin`,
33
+ ` browseable = yes`,
34
+ ` create mask = 0664`,
35
+ ` directory mask = 0775`,
36
+ ``,
37
+ ].join("\n");
38
+ }
39
+ /**
40
+ * Render the `[global]` section. `interfaces = lo <lan>` plus `bind interfaces
41
+ * only = yes` is the LAN-only posture: smbd accepts connections on the LAN
42
+ * interface and loopback, nothing else. Cloudflare tunnels carry HTTPS only,
43
+ * so this is the structural guarantee that SMB never leaves the LAN even if
44
+ * the firewall is misconfigured upstream.
45
+ */
46
+ export function renderGlobalSection(input) {
47
+ return [
48
+ `[global]`,
49
+ ` workgroup = WORKGROUP`,
50
+ ` server string = %h Samba`,
51
+ ` server role = standalone server`,
52
+ ` interfaces = lo ${input.lanInterface}`,
53
+ ` bind interfaces only = yes`,
54
+ ` log file = /var/log/samba/log.%m`,
55
+ ` max log size = 1000`,
56
+ ` panic action = /usr/share/samba/panic-action %d`,
57
+ ` passdb backend = tdbsam`,
58
+ ` unix password sync = no`,
59
+ ` map to guest = bad user`,
60
+ ` usershare allow guests = no`,
61
+ ``,
62
+ ].join("\n");
63
+ }
64
+ /**
65
+ * Build a complete smb.conf from scratch — globals + one brand stanza. Used
66
+ * only when no existing config is present (fresh apt install always writes
67
+ * one, so this branch fires on weird edge cases like operator-deleted conf
68
+ * files). Normal path is `mergeSmbConf` against the apt-shipped default.
69
+ */
70
+ export function renderFullSmbConf(input) {
71
+ return renderGlobalSection({ lanInterface: input.lanInterface }) +
72
+ renderBrandStanza({ brand: input.brand, sharePath: input.sharePath });
73
+ }
74
+ // ---------------------------------------------------------------------------
75
+ // LAN interface detection
76
+ // ---------------------------------------------------------------------------
77
+ /**
78
+ * Pick the LAN interface to bind smbd to. Preference order: wlan0, eth0, then
79
+ * the first non-loopback interface with a non-internal IPv4 address. Returns
80
+ * null when no such interface exists — the caller treats that as a hard
81
+ * failure (the Pi has no LAN connectivity; SMB has nothing to bind to).
82
+ *
83
+ * Input is the shape returned by `os.networkInterfaces()` so the test can pass
84
+ * realistic fixtures without spinning up real interfaces.
85
+ */
86
+ export function pickLanInterface(ifaces) {
87
+ const hasIPv4 = (name) => {
88
+ const addrs = ifaces[name];
89
+ if (!addrs)
90
+ return false;
91
+ return addrs.some((a) => a.family === "IPv4" && !a.internal);
92
+ };
93
+ if (hasIPv4("wlan0"))
94
+ return "wlan0";
95
+ if (hasIPv4("eth0"))
96
+ return "eth0";
97
+ for (const name of Object.keys(ifaces)) {
98
+ if (name === "lo")
99
+ continue;
100
+ if (hasIPv4(name))
101
+ return name;
102
+ }
103
+ return null;
104
+ }
105
+ // ---------------------------------------------------------------------------
106
+ // smb.conf merge / remove
107
+ // ---------------------------------------------------------------------------
108
+ /**
109
+ * Find the start and end byte offsets of a `[<section>]` block in an smb.conf.
110
+ * End is the index just before the next `[…]` header or EOF, whichever comes
111
+ * first. Returns null when no such section exists.
112
+ *
113
+ * Section names are matched case-sensitively because Samba itself is
114
+ * case-sensitive for share names on Linux filesystems.
115
+ */
116
+ function findSectionRange(conf, section) {
117
+ const lines = conf.split("\n");
118
+ const header = `[${section}]`;
119
+ let startLine = -1;
120
+ for (let i = 0; i < lines.length; i++) {
121
+ if (lines[i].trim() === header) {
122
+ startLine = i;
123
+ break;
124
+ }
125
+ }
126
+ if (startLine === -1)
127
+ return null;
128
+ let endLine = lines.length;
129
+ for (let i = startLine + 1; i < lines.length; i++) {
130
+ if (/^\[[^\]]+\]\s*$/.test(lines[i].trim())) {
131
+ endLine = i;
132
+ break;
133
+ }
134
+ }
135
+ const start = lines.slice(0, startLine).reduce((n, l) => n + l.length + 1, 0);
136
+ const sectionText = lines.slice(startLine, endLine).join("\n") + (endLine < lines.length ? "\n" : "");
137
+ return { start, end: start + sectionText.length };
138
+ }
139
+ /**
140
+ * Merge globals + brand stanza into an existing smb.conf. Idempotent: a
141
+ * second call with the same inputs produces byte-identical output.
142
+ *
143
+ * - `[global]`: replace verbatim with our rendered globals. The apt-shipped
144
+ * `[global]` is a good starting point but doesn't carry our LAN-only
145
+ * directives; replacing it is the only way to guarantee `bind interfaces
146
+ * only = yes` is in effect.
147
+ * - `[<brand>]`: replace if present, append at end of file otherwise. Peer
148
+ * brand stanzas (other `[…]` blocks) are preserved verbatim.
149
+ */
150
+ export function mergeSmbConf(input) {
151
+ const { existing, brand, sharePath, lanInterface } = input;
152
+ const newGlobals = renderGlobalSection({ lanInterface });
153
+ const newBrand = renderBrandStanza({ brand, sharePath });
154
+ let conf = existing;
155
+ // Replace or insert the global section.
156
+ const globalRange = findSectionRange(conf, "global");
157
+ if (globalRange) {
158
+ conf = conf.slice(0, globalRange.start) + newGlobals + conf.slice(globalRange.end);
159
+ }
160
+ else {
161
+ conf = newGlobals + (conf.startsWith("\n") ? conf : conf);
162
+ }
163
+ // Replace or insert the brand stanza.
164
+ const brandRange = findSectionRange(conf, brand);
165
+ if (brandRange) {
166
+ conf = conf.slice(0, brandRange.start) + newBrand + conf.slice(brandRange.end);
167
+ }
168
+ else {
169
+ // Append with a single trailing newline guarantee.
170
+ if (!conf.endsWith("\n"))
171
+ conf += "\n";
172
+ conf += newBrand;
173
+ }
174
+ return conf;
175
+ }
176
+ /**
177
+ * Remove `[<brand>]` from an smb.conf. Returns the input unchanged when no
178
+ * such stanza exists. Other sections (global, other brands) are preserved
179
+ * byte-for-byte. Used by the uninstall path.
180
+ */
181
+ export function removeBrandStanza(input) {
182
+ const { existing, brand } = input;
183
+ const range = findSectionRange(existing, brand);
184
+ if (!range)
185
+ return existing;
186
+ return existing.slice(0, range.start) + existing.slice(range.end);
187
+ }
188
+ /**
189
+ * True when at least one non-global stanza remains in the smb.conf. Used by
190
+ * uninstall to decide whether to apt-purge samba: only purge when no brand
191
+ * stanza is left.
192
+ */
193
+ export function hasAnyBrandStanza(conf) {
194
+ const lines = conf.split("\n");
195
+ for (const line of lines) {
196
+ const m = line.trim().match(/^\[([^\]]+)\]$/);
197
+ if (m && m[1].toLowerCase() !== "global")
198
+ return true;
199
+ }
200
+ return false;
201
+ }
202
+ // ---------------------------------------------------------------------------
203
+ // Step-marker contract
204
+ // ---------------------------------------------------------------------------
205
+ /**
206
+ * The four install-invariant markers the installer (and uninstall, set-pin)
207
+ * emit. Locked here so a typo at a call site cannot silently drift from the
208
+ * spec. The state vocabulary is also locked: `ok` (step succeeded), `fail:
209
+ * <stderr>` (step threw — installer aborts), `deferred reason=<…>` (step
210
+ * intentionally skipped because a precondition wasn't met).
211
+ */
212
+ export const SAMBA_STEPS = ["apt", "conf", "user", "units"];
213
+ export function formatSambaMarker(step, state) {
214
+ return `[install-invariant] samba-provision-${step} ${state}`;
215
+ }
package/dist/uninstall.js CHANGED
@@ -3,6 +3,7 @@ import { existsSync, mkdirSync, readFileSync, readdirSync, rmSync, appendFileSyn
3
3
  import { resolve, join, dirname } from "node:path";
4
4
  import { homedir } from "node:os";
5
5
  import { createInterface } from "node:readline";
6
+ import { removeBrandStanza, hasAnyBrandStanza } from "./samba-provision.js";
6
7
  const HOME = homedir();
7
8
  const PAYLOAD_DIR = resolve(import.meta.dirname, "../payload");
8
9
  // Brand manifest — read from payload to derive brand-specific installation paths.
@@ -23,7 +24,7 @@ catch (err) {
23
24
  const INSTALL_DIR = resolve(HOME, BRAND.installDir);
24
25
  const CONFIG_DIR = resolve(HOME, BRAND.configDir);
25
26
  const LOG_FILE = join("/tmp", `${BRAND.productName.toLowerCase().replace(/\s+/g, "-")}-uninstall-${new Date().toISOString().replace(/[:.]/g, "-")}.log`);
26
- const TOTAL = "10";
27
+ const TOTAL = "11";
27
28
  // ---------------------------------------------------------------------------
28
29
  // Logging — timestamped to console AND persistent log file in /tmp
29
30
  // (Log lives in /tmp because the uninstall deletes the config directory)
@@ -233,6 +234,80 @@ function stopServices() {
233
234
  console.log(" Stopped Ollama");
234
235
  }
235
236
  // ---------------------------------------------------------------------------
237
+ // Step 1b: Strip the legacy installer-registered cron block
238
+ //
239
+ // Task 039 retired `installCrons()` in src/index.ts. Older installs (every
240
+ // brand prior to the Task 039 release) wrote three minute-cadence entries
241
+ // between `# BEGIN <BRAND> CRONS` and `# END <BRAND> CRONS`. This step
242
+ // removes that block on uninstall so an upgraded device is fully torn down
243
+ // and a subsequent reinstall does not get re-seeded by cron recreating
244
+ // `data/accounts/<accountId>/logs/` every 60s (which tripped seed-neo4j.sh's
245
+ // stub-account-dirs guard — see Task 039 problem statement). Idempotent:
246
+ // no-op when the block is absent. Gated on Linux to mirror the installer's
247
+ // `installCrons` which early-returned on every other platform.
248
+ // ---------------------------------------------------------------------------
249
+ function removeLegacyCronBlock() {
250
+ log("1b", "Removing legacy cron block...");
251
+ if (!isLinux()) {
252
+ console.log(" Skipped — non-Linux platform.");
253
+ return;
254
+ }
255
+ if (!commandExists("crontab")) {
256
+ console.log(" crontab not available — nothing to strip.");
257
+ return;
258
+ }
259
+ const current = spawnSync("crontab", ["-l"], { encoding: "utf-8", stdio: ["pipe", "pipe", "pipe"] });
260
+ // Non-zero status (or empty stdout) on `crontab -l` means the user has no
261
+ // crontab at all — nothing to strip, and writing back would create one.
262
+ if (current.status !== 0 || !current.stdout) {
263
+ console.log(" Cron block: none present");
264
+ return;
265
+ }
266
+ const beginMarker = `# BEGIN ${BRAND.productName.toUpperCase()} CRONS`;
267
+ const endMarker = `# END ${BRAND.productName.toUpperCase()} CRONS`;
268
+ const escapeRegex = (s) => s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
269
+ const blockPattern = new RegExp(`${escapeRegex(beginMarker)}[\\s\\S]*?${escapeRegex(endMarker)}\\n?`, "g");
270
+ // Count entry lines (non-comment, non-blank) between markers for the
271
+ // operator-visible log. matchAll returns all block bodies in one pass.
272
+ const extractPattern = new RegExp(`${escapeRegex(beginMarker)}([\\s\\S]*?)${escapeRegex(endMarker)}`, "g");
273
+ let entryCount = 0;
274
+ for (const match of current.stdout.matchAll(extractPattern)) {
275
+ entryCount += match[1]
276
+ .split("\n")
277
+ .filter((l) => l.trim().length > 0 && !l.trim().startsWith("#"))
278
+ .length;
279
+ }
280
+ if (entryCount === 0 && !blockPattern.test(current.stdout)) {
281
+ console.log(" Cron block: none present");
282
+ return;
283
+ }
284
+ const stripped = current.stdout.replace(blockPattern, "").trimEnd();
285
+ if (stripped.length === 0) {
286
+ // Block was the only content — remove the crontab outright so `crontab -l`
287
+ // reports "no crontab for admin" (Task 039 success criterion). Writing an
288
+ // empty buffer via `crontab -` leaves a zero-byte crontab on Debian.
289
+ const removed = spawnSync("crontab", ["-r"], { stdio: "pipe" });
290
+ if (removed.status !== 0) {
291
+ console.log(` Cron block: stripped ${entryCount} entries but crontab -r failed — ${(removed.stderr ?? "").toString().trim()}`);
292
+ logFile(` crontab -r failed: ${removed.stderr}`);
293
+ return;
294
+ }
295
+ }
296
+ else {
297
+ const write = spawnSync("crontab", ["-"], {
298
+ input: stripped + "\n",
299
+ encoding: "utf-8",
300
+ stdio: ["pipe", "pipe", "pipe"],
301
+ });
302
+ if (write.status !== 0) {
303
+ console.log(` Cron block: write failed — ${(write.stderr ?? "").trim()}`);
304
+ logFile(` crontab write failed: ${write.stderr}`);
305
+ return;
306
+ }
307
+ }
308
+ console.log(` Cron block: removed ${entryCount} entries`);
309
+ }
310
+ // ---------------------------------------------------------------------------
236
311
  // Step 2: Delete Cloudflare tunnel
237
312
  // ---------------------------------------------------------------------------
238
313
  function deleteCloudflareTunnel() {
@@ -709,10 +784,90 @@ function removeOllama() {
709
784
  // Models directory (~/.ollama/) is removed in step 4 (removeAppDirs)
710
785
  }
711
786
  // ---------------------------------------------------------------------------
712
- // Step 10: Restore hostname
787
+ // Task 034 Samba teardown. Symmetric to provisionSamba in index.ts.
788
+ //
789
+ // Peer-brand discipline: smb.conf, the smbpasswd entry, and the samba apt
790
+ // package are device-wide singletons shared by every brand that ships an
791
+ // SMB share. Drop only this brand's stanza on every uninstall; stop+disable
792
+ // units / smbpasswd -x admin / apt-purge samba run only when no brand stanza
793
+ // remains in smb.conf AND no peer brand is detected. This mirrors the
794
+ // Neo4j / cloudflared / Ollama treatment in steps 5–9 above.
795
+ // ---------------------------------------------------------------------------
796
+ function removeSamba() {
797
+ log("10", "Removing Samba share...");
798
+ if (!isLinux()) {
799
+ console.log(" Not Linux — skipping.");
800
+ return;
801
+ }
802
+ const SMB_CONF = "/etc/samba/smb.conf";
803
+ if (!existsSync(SMB_CONF)) {
804
+ console.log(" /etc/samba/smb.conf not present — nothing to remove.");
805
+ return;
806
+ }
807
+ let existing = "";
808
+ try {
809
+ const cat = spawnSync("sudo", ["cat", SMB_CONF], { encoding: "utf-8", stdio: "pipe", timeout: 5_000 });
810
+ if (cat.status === 0)
811
+ existing = cat.stdout ?? "";
812
+ }
813
+ catch {
814
+ console.log(" Could not read smb.conf — skipping stanza removal.");
815
+ return;
816
+ }
817
+ const stripped = removeBrandStanza({ existing, brand: BRAND.hostname });
818
+ if (stripped !== existing) {
819
+ const tee = spawnSync("sudo", ["tee", SMB_CONF], { input: stripped, encoding: "utf-8", stdio: ["pipe", "pipe", "pipe"], timeout: 10_000 });
820
+ if (tee.status === 0) {
821
+ console.log(` Removed [${BRAND.hostname}] stanza from ${SMB_CONF}`);
822
+ }
823
+ else {
824
+ console.log(` Failed to write stripped smb.conf: ${(tee.stderr ?? "").trim()}`);
825
+ }
826
+ }
827
+ else {
828
+ console.log(` No [${BRAND.hostname}] stanza found in ${SMB_CONF}`);
829
+ }
830
+ // Reload smbd so the brand share disappears from the running config without
831
+ // dropping connections to peer brand shares. `reload` is best-effort: the
832
+ // operator has already torn down the brand; failing to reload would leave a
833
+ // dangling share name but nothing routable to its (now-deleted) sharePath.
834
+ spawnSync("sudo", ["systemctl", "reload", "smbd"], { stdio: "pipe", timeout: 10_000 });
835
+ // Per-brand cleanup stops here when a peer brand stanza still references the
836
+ // share — disabling smbd or purging samba would take the peer's share down.
837
+ const peer = peerBrandPresent();
838
+ if (hasAnyBrandStanza(stripped) || peer) {
839
+ const reason = peer ? `peer brand present (${peer})` : "other brand stanza remains";
840
+ console.log(` Leaving smbd/nmbd + samba package in place — ${reason}`);
841
+ return;
842
+ }
843
+ // No brand stanza, no peer brand — full device-wide teardown.
844
+ try {
845
+ spawnSync("sudo", ["systemctl", "disable", "--now", "smbd", "nmbd"], { stdio: "pipe", timeout: 30_000 });
846
+ console.log(" Stopped + disabled smbd, nmbd");
847
+ }
848
+ catch (err) {
849
+ console.log(` Failed to disable smbd/nmbd: ${err instanceof Error ? err.message : String(err)}`);
850
+ }
851
+ // Drop the admin smbpasswd entry. `smbpasswd -x` exits 0 on success, non-zero
852
+ // if the user isn't in the passdb — both are acceptable end-states.
853
+ spawnSync("sudo", ["smbpasswd", "-x", "admin"], { stdio: "pipe", timeout: 10_000 });
854
+ try {
855
+ shell("apt-get", ["remove", "--purge", "-y", "samba", "samba-common-bin"], { sudo: true, timeout: 120_000 });
856
+ console.log(" Purged samba package.");
857
+ }
858
+ catch (err) {
859
+ console.log(` apt-get purge samba failed: ${err instanceof Error ? err.message : String(err)}`);
860
+ console.log(" Run manually: sudo apt-get remove --purge -y samba samba-common-bin");
861
+ }
862
+ // Drop the smbpasswd sudoers grant once no brand stanza references it.
863
+ spawnSync("sudo", ["rm", "-f", "/etc/sudoers.d/maxy-samba"], { stdio: "pipe", timeout: 5_000 });
864
+ console.log(" Removed /etc/sudoers.d/maxy-samba");
865
+ }
866
+ // ---------------------------------------------------------------------------
867
+ // Step 11: Restore hostname
713
868
  // ---------------------------------------------------------------------------
714
869
  function restoreHostname() {
715
- log("10", "Restoring hostname...");
870
+ log("11", "Restoring hostname...");
716
871
  if (!isLinux()) {
717
872
  console.log(" Not Linux — skipping.");
718
873
  return;
@@ -810,6 +965,7 @@ export async function runUninstall(options) {
810
965
  const failures = [];
811
966
  const steps = [
812
967
  { name: "Stop services", fn: stopServices },
968
+ { name: "Remove legacy cron block", fn: removeLegacyCronBlock },
813
969
  { name: "Delete Cloudflare tunnel", fn: deleteCloudflareTunnel },
814
970
  ...(options.exportPath
815
971
  ? [{ name: "Export data", fn: () => exportData(options.exportPath) }]
@@ -821,6 +977,7 @@ export async function runUninstall(options) {
821
977
  { name: "Remove system configuration", fn: removeSystemConfig },
822
978
  { name: "Remove systemd service", fn: removeSystemdService },
823
979
  { name: "Remove Ollama", fn: removeOllama },
980
+ { name: "Remove Samba share", fn: removeSamba },
824
981
  { name: "Restore hostname", fn: restoreHostname },
825
982
  ];
826
983
  for (const step of steps) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rubytech/create-realagent-code",
3
- "version": "0.1.21",
3
+ "version": "0.1.23",
4
4
  "description": "Install Real Agent — Built for agents. By agents.",
5
5
  "bin": {
6
6
  "create-realagent-code": "./dist/index.js"
@@ -68,6 +68,26 @@ Failure signals to grep in `~/.maxy/logs/server.log` (or `~/.realagent/logs/serv
68
68
 
69
69
  If you need to restart the service manually (rare), ask {{productName}} to do it for you.
70
70
 
71
+ ## Browsing the brand filesystem on your LAN (SMB)
72
+
73
+ {{productName}} exposes its install folder (`/home/admin/<brand>` on the Pi) as a network share so you can drop files in, drag files out, and edit them from your Mac, Windows PC, iPhone, or Android device. It uses SMB — the same protocol Windows file sharing uses — which every modern OS speaks natively. No client install required.
74
+
75
+ The share is **LAN-only**: it binds to the loopback and your Pi's Wi-Fi/Ethernet interface only. Cloudflare tunnels carry HTTPS for the web UI, not SMB, so the share is invisible from the public internet. Off-LAN access for travelling operators is handled by a separate route (in progress).
76
+
77
+ **Credentials:** SMB user is `admin`; password is your {{productName}} PIN. The installer wires the sync so every time you set or rotate the PIN in the admin UI, the SMB password updates with it.
78
+
79
+ **macOS Finder**: Press `Cmd-K` from any Finder window, type `smb://<hostname>.local` (use the hostname your installer printed — for example `smb://maxy-code.local` or `smb://realagent-code.local`), click Connect, sign in as `admin` with your PIN. The share appears in the Finder sidebar; drag-and-drop works in both directions.
80
+
81
+ **Windows Explorer**: Open File Explorer, type `\\<hostname>.local\<brand>` in the address bar (for example `\\maxy-code.local\maxy-code`), press Enter, sign in as `admin` with your PIN. To keep it across reboots, right-click → "Map network drive".
82
+
83
+ **iOS Files**: Open Files → tap the `…` menu (top-right) → "Connect to Server" → enter `smb://<hostname>.local` → "Registered User" → username `admin`, password is your PIN.
84
+
85
+ **Android (Solid Explorer, CX File Explorer)**: Add a new connection of type SMB / Network. Host = `<hostname>.local`, share = `<brand>` (same as your install folder name), user = `admin`, password = your PIN.
86
+
87
+ **Troubleshooting:** if the mount fails with "logon failure", change your PIN in the admin UI and try again — that re-triggers the smbpasswd sync. If the share doesn't show up at all, your client may need `<hostname>.local` resolved by mDNS — try the Pi's LAN IP address as a fallback (`smb://192.168.1.50` on macOS, `\\192.168.1.50\<brand>` on Windows).
88
+
89
+ The installer maintains the share automatically. To remove it, uninstalling the brand strips its stanza from `/etc/samba/smb.conf` and (when no peer brand remains on the device) stops `smbd`, drops the smbpasswd entry, and purges the samba package.
90
+
71
91
  ## Remote Access via Cloudflare
72
92
 
73
93
  {{productName}} uses a Cloudflare tunnel to make your local Pi accessible from anywhere without opening router ports. The tunnel is configured during setup and runs as a background service.
@@ -73,7 +73,7 @@ When `agentAddress` is not set or matches the auth email, all tools behave as be
73
73
 
74
74
  ## Email Persistence
75
75
 
76
- Emails are automatically polled from IMAP and stored as `Email` nodes in the graph. This happens via a background cron jobno manual triggering needed.
76
+ Emails are fetched from IMAP and stored as `Email` nodes in the graph. The fetcher binary lives at `email/mcp/dist/scripts/email-fetch.js`. As of Task 039 it is **not currently scheduled on any install** migration to Desktop scheduled tasks (the canonical dispatch surface, see `maxy-code-prd.md` §Scheduled tasks) is tracked separately. Until that landing, new email is only ingested when an operator invokes the fetcher manually.
77
77
 
78
78
  - **Polling:** After `email-setup` completes, the platform polls IMAP at the configured interval (default: every 5 minutes). Only emails addressed TO the agent's `agentAddress` are stored.
79
79
  - **Deduplication:** Each email is identified by its Message-ID header. Re-polling the same messages does not create duplicates, even if the agent's email address changes between poll cycles. A composite unique constraint on `(messageId, accountId)` provides database-level enforcement.
@@ -82,7 +82,7 @@ Emails are automatically polled from IMAP and stored as `Email` nodes in the gra
82
82
 
83
83
  ## Email Threading
84
84
 
85
- Emails are linked into conversation threads via `REPLY_TO` graph edges. When an email has an `In-Reply-To` header, the platform looks up the parent email by `Message-ID` within the same account and creates an edge. Thread linking happens automatically during the polling cron job.
85
+ Emails are linked into conversation threads via `REPLY_TO` graph edges. When an email has an `In-Reply-To` header, the platform looks up the parent email by `Message-ID` within the same account and creates an edge. Thread linking happens as part of each fetch run (which is operator-invoked until the dispatcher is wired — see above).
86
86
 
87
87
  - **Out-of-order delivery:** If a reply arrives before its parent, the edge is created later when the parent is stored (orphan back-fill).
88
88
  - **Thread context:** `email-read` and `email-search` include `Thread-Depth` (number of hops to the thread root) and `Thread-ID` (emailId of the root message) for any email that is part of a thread. Root emails (no parent) have no thread fields.
@@ -150,7 +150,7 @@ Classification verdicts are logged to `{accountDir}/logs/email-fetch.log` with p
150
150
 
151
151
  ## Auto-Respond
152
152
 
153
- When enabled, a public agent automatically replies to incoming emails. A cron job polls the inbox for new messages and generates replies via the assigned agent.
153
+ When enabled, a public agent replies to incoming emails. The auto-respond binary lives at `email/mcp/dist/scripts/email-auto-respond.js` and, like the fetcher, is **not currently scheduled on any install** as of Task 039 — see `maxy-code-prd.md` §Scheduled tasks for the canonical dispatch destination. Until that landing, auto-respond only runs when an operator invokes the script manually.
154
154
 
155
155
  ### Setup
156
156
 
@@ -168,7 +168,7 @@ When called without an `agentSlug`, the tool returns available agents. Present t
168
168
 
169
169
  ### Behaviour
170
170
 
171
- - The cron job runs every minute. Each account's poll is skipped if the configured interval hasn't elapsed since the last poll.
171
+ - Once the dispatcher is wired (see above), each account's poll is skipped if the configured interval hasn't elapsed since the last poll. The interval gate is enforced inside `email-auto-respond.js`, so the same skip logic applies whether the script is fired by the dispatcher or manually by an operator.
172
172
  - Only emails addressed TO the agent's email address are processed (alias filtering applies).
173
173
  - Auto-replies, mailing list messages, and emails from the agent's own address are automatically skipped (RFC 3834 loop prevention).
174
174
  - Outgoing replies include `In-Reply-To` and `References` headers for correct threading, and `Auto-Submitted: auto-replied` to prevent loops with other auto-responders.
@@ -49,7 +49,7 @@ For recurring events, `schedule-update` with `skipNext: true` advances `nextRun`
49
49
 
50
50
  ## Event actions
51
51
 
52
- Events can carry an automated action that fires when the event's time arrives. The platform heartbeat cron (every minute) checks for due events and dispatches their actions by spawning the target plugin's MCP server and calling the named tool.
52
+ Events can carry an automated action that fires when the event's time arrives. The dispatcher binary that reads due events and spawns the target plugin's MCP server lives at `scheduling/mcp/dist/scripts/check-due-events.js`. As of Task 039 it is **not currently scheduled on any install**: the legacy crontab writer was removed and migration of the dispatcher to Desktop scheduled tasks (the canonical dispatch surface — see `maxy-code-prd.md` §Scheduled tasks) is tracked separately. Until that landing, `action:` payloads are stored on the event and only execute when an operator invokes the dispatcher manually.
53
53
 
54
54
  To create an event with an action, pass the `action` parameter to `schedule-event`:
55
55
 
@@ -15,7 +15,7 @@ metadata: {"platform":{"always":false,"embed":[],"pluginKey":"workflows"}}
15
15
 
16
16
  # Workflows
17
17
 
18
- > **Loading note:** `platform.always:false` in the frontmatter above refers to **prose embedding** — this file's contents are not auto-injected into every agent system prompt. It does **not** refer to MCP server loading. The workflows MCP server is always loaded in admin sessions (registered in `getMcpServers()` in `claude-agent.ts` alongside `tasks` / `scheduling` / `email`), and the same binary is also spawned ad-hoc from the heartbeat cron for scheduled `workflow-execute` calls via `.mcp.json`.
18
+ > **Loading note:** `platform.always:false` in the frontmatter above refers to **prose embedding** — this file's contents are not auto-injected into every agent system prompt. It does **not** refer to MCP server loading. The workflows MCP server is always loaded in admin sessions (registered in `getMcpServers()` in `claude-agent.ts` alongside `tasks` / `scheduling` / `email`). The same binary is designed to be spawned ad-hoc by the platform's scheduled-task dispatcher for `workflow-execute` calls via `.mcp.json`; that dispatcher is currently unwired (see `maxy-code-prd.md` §Scheduled tasks for the canonical destination surface).
19
19
 
20
20
  Workflows are persistent, named compositions of executable steps that the user creates and the engine executes. Steps can chain MCP tool calls and LLM reasoning into composable pipelines. Steps are validated at creation time — a workflow with unmet dependencies cannot be activated. The user manages them conversationally.
21
21
 
@@ -207,7 +207,7 @@ To trigger a workflow on a schedule, create an Event with the scheduling plugin
207
207
  action: { plugin: "workflows", tool: "workflow-execute", args: { workflowId: "..." } }
208
208
  ```
209
209
 
210
- The existing `check-due-events` heartbeat dispatches the workflow execution at the scheduled time. The WorkflowRun captures `trigger: "schedule"`.
210
+ Scheduled workflows are dispatched by `check-due-events`, which is currently unwired pending migration to Desktop scheduled tasks (see `maxy-code-prd.md` §Scheduled tasks). When the dispatcher fires, the WorkflowRun captures `trigger: "schedule"`.
211
211
 
212
212
  ## Managing Workflows
213
213
 
@@ -85,4 +85,4 @@ To run a workflow on a schedule, create a scheduling event with action dispatch:
85
85
  - Tool: `schedule-event`
86
86
  - Action: `{ plugin: "workflows", tool: "workflow-execute", args: { workflowId: "..." } }`
87
87
 
88
- The platform heartbeat cron dispatches the workflow at the scheduled time. The WorkflowRun captures `trigger: "schedule"`.
88
+ The `check-due-events` dispatcher (currently unwired — see `maxy-code-prd.md` §Scheduled tasks) is the surface that will fire the workflow at the scheduled time. The WorkflowRun captures `trigger: "schedule"` when the dispatcher fires.
@@ -4,17 +4,16 @@
4
4
 
5
5
  Three rules govern every turn. They are load-bearing — when they conflict with anything else in this prompt, they win.
6
6
 
7
- **PRECISE.** Use exact names: exact tool names, exact field values, exact file paths, exact node properties. When relaying a tool result, relay what the tool returned — do not paraphrase, do not approximate, do not invent flags. When uncertain about an exact value, look it up; never substitute a loose-but-plausible string. *Failure symptoms:* paraphrasing tool output, approximate tool name, inventing a flag.
7
+ **BE PRECISE.**
8
8
 
9
- **CONCISE.** Every output is the minimum tokens that convey the signal. The Neo4j graph is the canonical store of knowledge for this account; keep it dense in signal via the two-step memory discipline:
10
- - *Compress on write.* Before `memory-write`, reduce the input to the minimal node/edge/property set that preserves the signal. Do not persist raw monologues, document bodies, or tool-result dumps — persist the extracted structure. If extraction is unclear, ask in one sentence what to preserve rather than saving everything.
11
- - *Filter on read.* `memory-search` returns candidates, not answers. Filter the returned set to the subset that answers the current turn. Relay one line of signal, not ten lines of candidate text.
9
+ **BE CONCISE.**
12
10
 
13
- *Failure symptoms:* unrequested summary, three-paragraph answer to a one-line question, pasting a raw tool result verbatim into chat.
11
+ **BE EVIDENCE-BASED.**
14
12
 
15
- **EVIDENCE-BASED.** The graph is the single, canonical source of truth about this account. Consult it via `memory-search`, `memory-read`, or `profile-read` — before answering factual questions or embarking on activity. When the graph is wrong, correct it via `memory-write` or `memory-update`, then answer. Never substitute training-data recall for a graph read when the graph holds the canonical version. When the graph has no answer and you must rely on training knowledge, say so explicitly. *Failure symptoms:* factual claim without a prior graph read this turn, training-data fallback when the graph has the canonical version.
16
-
17
- A landfill graph defeats EVIDENCE-BASED: search returns noise, the agent re-writes the noise, the noise compounds. Compress on write; filter on read.
13
+ The Neo4j graph is the canonical store of knowledge for this account; keep it dense in signal via the two-step memory discipline:
14
+ - *Compress on write.* Before `memory-write`, reduce the input to the minimal node/edge/property set that preserves the signal.
15
+ - *Filter on read.* `memory-search` returns candidates, not answers. Filter the returned set to the subset that answers the current turn in the least amount of tokens. When the graph is wrong, correct it via `memory-write` or `memory-update`, then answer. Never substitute training-data recall for a graph read when the graph holds the canonical version. When the graph has no answer and you must rely on training knowledge, say so explicitly.
16
+ Comply with these doctrines and you cannot help but be precise and concise.
18
17
 
19
18
  ---
20
19
 
@@ -28,21 +27,16 @@ No action without clear intent. Before acting on any request, you must know:
28
27
 
29
28
  When the owner's words are precise, all three are self-evident — act without delay. When any of the three requires assumption, stop and ask. Vagueness and urgency are signals to slow down, not speed up. Once confirmed, the rules of engagement are binding for the duration of the task.
30
29
 
31
- **Antecedent lookup before asking.** The owner's words are not the only source of intent — the chain on this conversationId is the first place to consult, not the last. Before emitting any "stop and ask"-class reply (phrasings such as "what are you referring to?", "not enough signal", "could you clarify?"), call `mcp__memory__conversation-search` against the live conversationId. Only when that lookup returns nothing relevant may you ask. Asking while your own history sits one tool-call away unread is a doctrine violation — the chain is your memory, and ignoring it is the same failure mode as paraphrasing a tool result instead of reading it.
32
-
33
- **Recovery from any recorded breakdown.** If the chain holds a recent `:TurnFailure` event (mode such as `server-shutdown` or `client-network-drop`) and the owner's next message is a natural follow-up to that breakdown ("what happened?", "where are we?", "did that work?"), the reply names the recorded failure mode and resumes from the preserved chain. Never ask the owner to reframe what they were doing — that information is already in the chain.
34
-
35
- This governs everything below.
30
+ **Antecedent lookup before asking.** The owner's words are not the only source of intent — the chain on this conversationId is the first place to consult, not the last.
36
31
 
37
32
  ---
38
33
 
39
- You are the head of operations. Your purpose across every session — not just the first, not just when asked — is three things:
34
+ ## Your role
40
35
 
41
- 1. **Calibrate what excellence means here.** Learn the owner's standards what good looks like in their work, their decisions, their outcomes. Refine this understanding every session. Session 300 should be more precise than session 3.
42
- 2. **Prevent what used to go wrong.** Learn the failure patterns — what fell through the cracks before you, what keeps breaking, what frustrates. Your job is to break those patterns, not just record them.
43
- 3. **Compress what takes months to learn.** Recognise the compound knowledge — the rhythms, instincts, and patterns that humans build only through sustained repetition. Encode them from the first instance. Give the owner the benefit of experience immediately.
36
+ You are the head of operations, chief of staff and private secretary. Your purpose across every session:
44
37
 
45
- These are not tasks to complete. They are the lens through which you approach every interaction. As your understanding sharpens, your own identity should become more precise possibly more concise never merely longer. Every refinement replaces vagueness with exactness.
38
+ 1. **Condense every conversation into a precise, concise action.** Cut through the vagueness, filter out the noise, isolate the signal and explicitly surface it.
39
+ 2. **Keep everything hyper-organised.** The core function and architecture of the graph is to maintain order. Everything that has reason to be stored should be done under a proper hierarchy. Use projects to host top-level entity nodes, tasks for activities related to them and person and organisation nodes for the relationships between projects and activities.
46
40
 
47
41
  Your personalisation is in `agents/admin/SOUL.md`. Read it and apply it. SOUL.md is personality and tone only — never behavioural rules, knowledge, or operational constraints. When writing or updating SOUL.md, keep it to how you sound, not what you do.
48
42
 
@@ -45,7 +45,7 @@ Manages events, appointments, and recurring triggers in the graph.
45
45
 
46
46
  **Cron patterns:** `0 8 * * 1-5` (weekdays 8am), `0 9 * * 1` (Monday 9am), `0 0 1 * *` (first of month), `*/30 * * * *` (every 30 min).
47
47
 
48
- **Event actions:** Events can dispatch automated MCP tool calls when their time arrives. Pass `action: { plugin, tool, args }` to `schedule-event`. The platform heartbeat cron (every minute) fires the action by spawning the target plugin's MCP server. Use this to trigger workflows on a schedule: `action: { plugin: "workflows", tool: "workflow-execute", args: { workflowId: "..." } }`.
48
+ **Event actions:** Events can carry an MCP tool-call payload that fires when their time arrives. Pass `action: { plugin, tool, args }` to `schedule-event`. The `check-due-events` dispatcher binary is the surface that reads due events and spawns the target plugin's MCP server. As of Task 039 the dispatcher is **not currently scheduled** — migration to Desktop scheduled tasks is tracked separately (see `maxy-code-prd.md` §Scheduled tasks). Until that landing, scheduled `action:` payloads are stored on the event but only fire when an operator invokes the dispatcher manually. Use this to schedule workflow runs: `action: { plugin: "workflows", tool: "workflow-execute", args: { workflowId: "..." } }`.
49
49
 
50
50
  **Skip vs cancel:** `schedule-update` with `skipNext: true` advances one cycle without triggering. `schedule-cancel` kills the entire series — there is no per-occurrence cancellation.
51
51
 
@@ -98,7 +98,7 @@ Workflows are persistent, named compositions of executable steps — tool calls
98
98
 
99
99
  **Listing and reading:** `workflow-list` returns all workflows with status. `workflow-get` returns a single workflow with full step definitions. `workflow-update` modifies steps or metadata. `workflow-delete` removes a workflow.
100
100
 
101
- **Schedule integration:** Workflows can be triggered by scheduled events via the platform heartbeat cron. Link a workflow to a schedule by creating a scheduled event with `action: { plugin: "workflows", tool: "workflow-execute", args: { workflowId: "..." } }`.
101
+ **Schedule integration:** Workflows can be triggered by scheduled events via the `check-due-events` dispatcher. Link a workflow to a schedule by creating a scheduled event with `action: { plugin: "workflows", tool: "workflow-execute", args: { workflowId: "..." } }`. Note: as of Task 039 the dispatcher is **not currently scheduled** — migration to Desktop scheduled tasks is tracked separately (see `maxy-code-prd.md` §Scheduled tasks). Until that landing, the action stays inert until an operator invokes the dispatcher manually.
102
102
 
103
103
  ## Contacts (domain context — you do not have contact tools)
104
104
 
@@ -1 +1 @@
1
- import{a as e}from"./brand-jT16ErmC.js";var t=e();function n({checked:e,onChange:n,label:r,disabled:i}){return(0,t.jsxs)(`label`,{className:`maxy-checkbox${i?` maxy-checkbox--disabled`:``}`,children:[(0,t.jsx)(`input`,{type:`checkbox`,checked:e,onChange:e=>n(e.target.checked),disabled:i}),(0,t.jsx)(`span`,{className:`maxy-checkbox__box`,children:`✱`}),r&&(0,t.jsx)(`span`,{className:`maxy-checkbox__label`,children:r})]})}export{n as t};
1
+ import{a as e}from"./brand-CSQuxS9w.js";var t=e();function n({checked:e,onChange:n,label:r,disabled:i}){return(0,t.jsxs)(`label`,{className:`maxy-checkbox${i?` maxy-checkbox--disabled`:``}`,children:[(0,t.jsx)(`input`,{type:`checkbox`,checked:e,onChange:e=>n(e.target.checked),disabled:i}),(0,t.jsx)(`span`,{className:`maxy-checkbox__box`,children:`✱`}),r&&(0,t.jsx)(`span`,{className:`maxy-checkbox__label`,children:r})]})}export{n as t};