@a-company/paradigm 5.37.11 → 6.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (362) hide show
  1. package/dist/{accept-orchestration-SBZVK3H4.js → accept-orchestration-QQISPINV.js} +1 -1
  2. package/dist/add-UOR4INIV.js +8 -0
  3. package/dist/{agent-loader-RIVI6QPP.js → agent-loader-2WJHD46U.js} +1 -1
  4. package/dist/{agent-loader-RJRVO5GQ.js → agent-loader-YKS2PQWO.js} +1 -1
  5. package/dist/{aggregate-W66DM3GA.js → aggregate-A5S5MTCC.js} +1 -1
  6. package/dist/{ambient-76YMUA5Q.js → ambient-BE3SQXNN.js} +1 -1
  7. package/dist/{ambient-WTLYUAQM.js → ambient-NVKQCW2A.js} +12 -12
  8. package/dist/{assess-UFPYEJKP.js → assess-63WXHWJV.js} +1 -1
  9. package/dist/{beacon-5QVYV5DF.js → beacon-QVUD3MGP.js} +1 -1
  10. package/dist/{calibration-OLJYB5HN.js → calibration-BDHGYJOK.js} +1 -1
  11. package/dist/{chunk-SI6SV76D.js → chunk-3DZK54RU.js} +72 -19
  12. package/dist/{chunk-CHVQNRRT.js → chunk-4PSD5R7N.js} +2 -2
  13. package/dist/chunk-6SKSV5B2.js +24 -0
  14. package/dist/{chunk-KFNHCQ4R.js → chunk-FEYOQMZ5.js} +1 -1
  15. package/dist/{chunk-NEJ4ZLCY.js → chunk-GAFKOFAV.js} +1 -1
  16. package/dist/chunk-GRZQIKST.js +2 -0
  17. package/dist/{chunk-RLCH7DXQ.js → chunk-K7X3Z3GL.js} +1 -1
  18. package/dist/chunk-LPBCQM5Y.js +6 -0
  19. package/dist/{chunk-T6IDXUUA.js → chunk-LWAIVOSF.js} +1 -1
  20. package/dist/{chunk-74SGKSRQ.js → chunk-M2HKWR25.js} +1 -1
  21. package/dist/{chunk-BOYQAMGC.js → chunk-M3PPXJU4.js} +1 -1
  22. package/dist/chunk-PHEX6LU4.js +111 -0
  23. package/dist/chunk-Q527BPUF.js +2 -0
  24. package/dist/chunk-R5ECMBIV.js +11 -0
  25. package/dist/{chunk-X3U3IGYT.js → chunk-TBWWFRL5.js} +1 -1
  26. package/dist/{chunk-MQIG6SMF.js → chunk-TNVWGPCE.js} +1 -1
  27. package/dist/{chunk-SUU6M4JH.js → chunk-TOYQ2QCB.js} +1 -1
  28. package/dist/chunk-TZDYIPVU.js +521 -0
  29. package/dist/{chunk-3XGNXXCT.js → chunk-UZ5H7K6Q.js} +1 -1
  30. package/dist/chunk-VIG5LSGZ.js +2 -0
  31. package/dist/chunk-VNIX5KBT.js +3 -0
  32. package/dist/{chunk-AGFPVSX5.js → chunk-VXIIVMTM.js} +1 -1
  33. package/dist/{chunk-ORDKEGII.js → chunk-WESTEMIM.js} +1 -1
  34. package/dist/{chunk-LBQBWIEX.js → chunk-Y4P4SGZV.js} +1 -1
  35. package/dist/{chunk-DOCDDDTD.js → chunk-YNDPSWOE.js} +5 -5
  36. package/dist/chunk-Z5QW6USC.js +2 -0
  37. package/dist/chunk-ZJQY5PPP.js +7 -0
  38. package/dist/{commands-LMUD5L6R.js → commands-ANRJNG2W.js} +1 -1
  39. package/dist/compliance-BNFWQPKM.js +6 -0
  40. package/dist/config-schema-FLHRVZMI.js +2 -0
  41. package/dist/{constellation-CG7C4WFE.js → constellation-NWLXYATA.js} +1 -1
  42. package/dist/{context-audit-XRPT3OU2.js → context-audit-JVCA6GSV.js} +1 -1
  43. package/dist/{cost-IDNVMAEV.js → cost-24UZSS2P.js} +1 -1
  44. package/dist/{cursorrules-U5O4G5T4.js → cursorrules-ZXPXPZ3P.js} +1 -1
  45. package/dist/decision-loader-HELL2AMX.js +2 -0
  46. package/dist/{delete-P5VULXR4.js → delete-2C6ALLYY.js} +1 -1
  47. package/dist/{diff-JVEYCXUC.js → diff-MF55KQZH.js} +1 -1
  48. package/dist/{dist-KGRCLBJP-2QAPFYNF.js → dist-GQ42YS5N-4HIJZVBB.js} +10 -10
  49. package/dist/dist-JZZJLVMR.js +2 -0
  50. package/dist/{dist-3ZCH25SG.js → dist-OG6MM4VY.js} +1 -1
  51. package/dist/dist-SE67SOXB.js +2 -0
  52. package/dist/{docs-USDAF26F.js → docs-O37YLLRN.js} +1 -1
  53. package/dist/doctor-IG5XM4C4.js +2 -0
  54. package/dist/{edit-GUU3HBVW.js → edit-P3MDAZLU.js} +1 -1
  55. package/dist/{flow-POQP27WA.js → flow-BGXOVE2V.js} +1 -1
  56. package/dist/{hooks-IG2GOAHP.js → hooks-TFMMMB2H.js} +1 -1
  57. package/dist/index.js +6 -6
  58. package/dist/init-M44SO65G.js +2 -0
  59. package/dist/init-V4KSEKPK.js +2 -0
  60. package/dist/{integrity-UYDOOJDP.js → integrity-ROO3G43N.js} +1 -1
  61. package/dist/{list-YKIQNKGB.js → list-2XIWUEMA.js} +1 -1
  62. package/dist/list-CFHINXIS.js +12 -0
  63. package/dist/lore-loader-D2ISOASW.js +2 -0
  64. package/dist/lore-loader-PXFKMKAN.js +2 -0
  65. package/dist/mcp.js +19 -11
  66. package/dist/metrics-UESGUHTA.js +2 -0
  67. package/dist/{migrate-IBDE7VK4.js → migrate-Z5UQN57G.js} +1 -1
  68. package/dist/migrate-assessments-YSITX7KM.js +4 -0
  69. package/dist/migrate-decisions-NPLQOEEH.js +6 -0
  70. package/dist/migrate-plsat-EM2ACIQ3.js +6 -0
  71. package/dist/{nomination-engine-EALA5MGI.js → nomination-engine-QPZJH6XO.js} +1 -1
  72. package/dist/{notebook-loader-PXNRBBXD.js → notebook-loader-3J2OFMS3.js} +1 -1
  73. package/dist/{orchestrate-RCAMBOIB.js → orchestrate-RID7HHHH.js} +1 -1
  74. package/dist/{platform-server-DNAMH4YI.js → platform-server-UD45NTGV.js} +1 -1
  75. package/dist/portal-check-DV2VSJ5E.js +8 -0
  76. package/dist/{portal-compliance-4MG5F2GI.js → portal-compliance-JONQ4SOP.js} +1 -1
  77. package/dist/{probe-B22G2JKF.js → probe-5HAXULAD.js} +1 -1
  78. package/dist/{providers-AWA7WLLM.js → providers-4PXMWA7V.js} +1 -1
  79. package/dist/quiz-WYIZJG5K.js +10 -0
  80. package/dist/{record-YXPB34MY.js → record-N3VNYYKJ.js} +1 -1
  81. package/dist/reindex-FWPD2VGM.js +2 -0
  82. package/dist/{retag-N5XF3KXP.js → retag-72R2OSZV.js} +1 -1
  83. package/dist/{review-77QI6VOC.js → review-2INNWLTW.js} +1 -1
  84. package/dist/{review-6UAH6V3R.js → review-VMSX2PKI.js} +1 -1
  85. package/dist/{ripple-ZGDITCGB.js → ripple-FNZI47SH.js} +1 -1
  86. package/dist/{sentinel-HYAZ3CO5.js → sentinel-EFPEX246.js} +1 -1
  87. package/dist/{sentinel-bridge-VR357PKL.js → sentinel-bridge-UR2MKARY.js} +1 -1
  88. package/dist/sentinel.js +1 -1
  89. package/dist/{serve-U47GULB6.js → serve-MO35XIZE.js} +1 -1
  90. package/dist/serve-OQYUO7CR.js +12 -0
  91. package/dist/{server-4YNUIK4W.js → server-4D77LCST.js} +1 -1
  92. package/dist/server-FGUL2FWQ.js +7 -0
  93. package/dist/session-tracker-KGORN6B5.js +2 -0
  94. package/dist/{session-work-log-PAKXOFGL.js → session-work-log-4IEVE4KK.js} +1 -1
  95. package/dist/{session-work-log-ZP45TREI.js → session-work-log-EE4UIZ33.js} +1 -1
  96. package/dist/{setup-3F5IK7MO.js → setup-ZSEC72BS.js} +2 -2
  97. package/dist/{shift-FDADESC4.js → shift-TVNY2CQF.js} +6 -6
  98. package/dist/{show-PJ5LFLIL.js → show-JH7LJ5MT.js} +1 -1
  99. package/dist/show-WVHAL4VU.js +7 -0
  100. package/dist/{snapshot-L2G56RPL.js → snapshot-3IYB67D4.js} +1 -1
  101. package/dist/{spawn-M5BAV252.js → spawn-UH5RENSE.js} +1 -1
  102. package/dist/{status-77M3SDIF.js → status-DB3KNLW3.js} +1 -1
  103. package/dist/status-S7Z5FVIE.js +6 -0
  104. package/dist/{summary-LXLHFRN7.js → summary-WLI3NF4G.js} +2 -2
  105. package/dist/{sweep-HU74OPVW.js → sweep-7TZFN5NS.js} +1 -1
  106. package/dist/sync-55U6QPIA.js +2 -0
  107. package/dist/{sync-llms-7CAI74QL.js → sync-llms-GF7DDQDI.js} +1 -1
  108. package/dist/team-MGT66HZQ.js +2 -0
  109. package/dist/{test-BQJMS4Y2.js → test-WLEPZQFC.js} +1 -1
  110. package/dist/{timeline-K3ZFKJ3R.js → timeline-RK7O2SCM.js} +1 -1
  111. package/dist/tools-QJHAVYI6.js +2 -0
  112. package/dist/university-content/notes/N-para-001-build-something.md +126 -0
  113. package/dist/university-content/notes/N-para-001-meet-the-team.md +85 -0
  114. package/dist/university-content/notes/N-para-001-shift-setup.md +74 -0
  115. package/dist/university-content/notes/N-para-101-component-types.md +99 -0
  116. package/dist/university-content/notes/N-para-101-first-steps.md +134 -0
  117. package/dist/university-content/notes/N-para-101-five-symbols.md +128 -0
  118. package/dist/university-content/notes/N-para-101-paradigm-logger.md +89 -0
  119. package/dist/university-content/notes/N-para-101-portal-yaml.md +112 -0
  120. package/dist/university-content/notes/N-para-101-project-structure.md +143 -0
  121. package/dist/university-content/notes/N-para-101-purpose-files.md +121 -0
  122. package/dist/university-content/notes/N-para-101-tags-and-classification.md +93 -0
  123. package/dist/university-content/notes/N-para-101-welcome.md +51 -0
  124. package/dist/university-content/notes/N-para-201-architecture-review.md +175 -0
  125. package/dist/university-content/notes/N-para-201-aspect-graph.md +79 -0
  126. package/dist/university-content/notes/N-para-201-aspects-and-anchors.md +112 -0
  127. package/dist/university-content/notes/N-para-201-component-patterns.md +138 -0
  128. package/dist/university-content/notes/N-para-201-cross-cutting-concerns.md +145 -0
  129. package/dist/university-content/notes/N-para-201-disciplines.md +187 -0
  130. package/dist/university-content/notes/N-para-201-flows-deep-dive.md +119 -0
  131. package/dist/university-content/notes/N-para-201-gates-deep-dive.md +165 -0
  132. package/dist/university-content/notes/N-para-201-portal-protocol.md +133 -0
  133. package/dist/university-content/notes/N-para-201-signal-patterns.md +159 -0
  134. package/dist/university-content/notes/N-para-201-symbol-naming.md +149 -0
  135. package/dist/university-content/notes/N-para-301-context-management.md +53 -0
  136. package/dist/university-content/notes/N-para-301-decisions.md +99 -0
  137. package/dist/university-content/notes/N-para-301-doctor-and-validation.md +70 -0
  138. package/dist/university-content/notes/N-para-301-enforcement-levels.md +102 -0
  139. package/dist/university-content/notes/N-para-301-fragility-tracking.md +50 -0
  140. package/dist/university-content/notes/N-para-301-history-system.md +42 -0
  141. package/dist/university-content/notes/N-para-301-navigation-system.md +55 -0
  142. package/dist/university-content/notes/N-para-301-operations-review.md +55 -0
  143. package/dist/university-content/notes/N-para-301-paradigm-shift.md +93 -0
  144. package/dist/university-content/notes/N-para-301-protocols.md +113 -0
  145. package/dist/university-content/notes/N-para-301-ripple-analysis.md +53 -0
  146. package/dist/university-content/notes/N-para-301-sentinel-observability.md +87 -0
  147. package/dist/university-content/notes/N-para-301-sync-and-maintenance.md +57 -0
  148. package/dist/university-content/notes/N-para-301-wisdom-system.md +89 -0
  149. package/dist/university-content/notes/N-para-401-agent-identity.md +99 -0
  150. package/dist/university-content/notes/N-para-401-agent-interop.md +87 -0
  151. package/dist/university-content/notes/N-para-401-agent-roles.md +107 -0
  152. package/dist/university-content/notes/N-para-401-commit-conventions.md +82 -0
  153. package/dist/university-content/notes/N-para-401-mastery-review.md +71 -0
  154. package/dist/university-content/notes/N-para-401-mcp-tools-overview.md +102 -0
  155. package/dist/university-content/notes/N-para-401-multi-agent-coordination.md +80 -0
  156. package/dist/university-content/notes/N-para-401-notebooks-permissions.md +66 -0
  157. package/dist/university-content/notes/N-para-401-orchestration-workflow.md +101 -0
  158. package/dist/university-content/notes/N-para-401-pm-governance.md +71 -0
  159. package/dist/university-content/notes/N-para-401-provider-cascade.md +75 -0
  160. package/dist/university-content/notes/N-para-401-quick-check.md +95 -0
  161. package/dist/university-content/notes/N-para-501-advanced-workflows.md +122 -0
  162. package/dist/university-content/notes/N-para-501-aspect-graph-advanced.md +195 -0
  163. package/dist/university-content/notes/N-para-501-aspect-graph-internals.md +97 -0
  164. package/dist/university-content/notes/N-para-501-assessment-loops.md +116 -0
  165. package/dist/university-content/notes/N-para-501-conductor-workspace.md +77 -0
  166. package/dist/university-content/notes/N-para-501-habits-practice.md +164 -0
  167. package/dist/university-content/notes/N-para-501-hook-enforcement.md +100 -0
  168. package/dist/university-content/notes/N-para-501-lore-system.md +155 -0
  169. package/dist/university-content/notes/N-para-501-platform-agent-ui.md +108 -0
  170. package/dist/university-content/notes/N-para-501-review-compliance.md +72 -0
  171. package/dist/university-content/notes/N-para-501-sentinel-deep-dive.md +173 -0
  172. package/dist/university-content/notes/N-para-501-session-intelligence.md +104 -0
  173. package/dist/university-content/notes/N-para-501-symphony-a-mail.md +120 -0
  174. package/dist/university-content/notes/N-para-501-symphony-networking.md +119 -0
  175. package/dist/university-content/notes/N-para-501-task-management.md +100 -0
  176. package/dist/university-content/notes/N-para-601-agent-renaissance.md +121 -0
  177. package/dist/university-content/notes/N-para-601-attention-scoring.md +129 -0
  178. package/dist/university-content/notes/N-para-601-context-composition.md +146 -0
  179. package/dist/university-content/notes/N-para-601-data-sovereignty.md +140 -0
  180. package/dist/university-content/notes/N-para-601-event-stream.md +126 -0
  181. package/dist/university-content/notes/N-para-601-knowledge-streams.md +144 -0
  182. package/dist/university-content/notes/N-para-601-learning-loop.md +68 -0
  183. package/dist/university-content/notes/N-para-601-maestro-team-collab.md +136 -0
  184. package/dist/university-content/notes/N-para-601-nominations-debates.md +115 -0
  185. package/dist/university-content/notes/N-para-701-agent-notebooks.md +131 -0
  186. package/dist/university-content/notes/N-para-701-agent-pods-nevrland.md +182 -0
  187. package/dist/university-content/notes/N-para-701-agent-profiles.md +197 -0
  188. package/dist/university-content/notes/N-para-701-agent-roster.md +82 -0
  189. package/dist/university-content/notes/N-para-701-agent-state.md +180 -0
  190. package/dist/university-content/notes/N-para-701-learning-feedback-loop.md +188 -0
  191. package/dist/university-content/notes/N-para-701-model-tier-resolution.md +204 -0
  192. package/dist/university-content/notes/N-para-701-orchestration-enforcement.md +169 -0
  193. package/dist/university-content/notes/N-para-701-per-project-rosters.md +198 -0
  194. package/dist/university-content/notes/N-para-701-symphony-visibility.md +142 -0
  195. package/dist/university-content/paths/LP-para-001.yaml +29 -0
  196. package/dist/university-content/paths/LP-para-101.yaml +59 -0
  197. package/dist/university-content/paths/LP-para-201.yaml +69 -0
  198. package/dist/university-content/paths/LP-para-301.yaml +84 -0
  199. package/dist/university-content/paths/LP-para-401.yaml +74 -0
  200. package/dist/university-content/paths/LP-para-501.yaml +89 -0
  201. package/dist/university-content/paths/LP-para-601.yaml +59 -0
  202. package/dist/university-content/paths/LP-para-701.yaml +64 -0
  203. package/dist/university-content/quizzes/Q-para-001-build-something.yaml +46 -0
  204. package/dist/university-content/quizzes/Q-para-001-meet-the-team.yaml +46 -0
  205. package/dist/university-content/quizzes/Q-para-001-shift-setup.yaml +46 -0
  206. package/dist/university-content/quizzes/Q-para-101-component-types.yaml +46 -0
  207. package/dist/university-content/quizzes/Q-para-101-first-steps.yaml +56 -0
  208. package/dist/university-content/quizzes/Q-para-101-five-symbols.yaml +66 -0
  209. package/dist/university-content/quizzes/Q-para-101-paradigm-logger.yaml +56 -0
  210. package/dist/university-content/quizzes/Q-para-101-portal-yaml.yaml +56 -0
  211. package/dist/university-content/quizzes/Q-para-101-project-structure.yaml +66 -0
  212. package/dist/university-content/quizzes/Q-para-101-purpose-files.yaml +56 -0
  213. package/dist/university-content/quizzes/Q-para-101-tags-and-classification.yaml +56 -0
  214. package/dist/university-content/quizzes/Q-para-101-welcome.yaml +56 -0
  215. package/dist/university-content/quizzes/Q-para-201-architecture-review.yaml +66 -0
  216. package/dist/university-content/quizzes/Q-para-201-aspect-graph.yaml +46 -0
  217. package/dist/university-content/quizzes/Q-para-201-aspects-and-anchors.yaml +56 -0
  218. package/dist/university-content/quizzes/Q-para-201-component-patterns.yaml +56 -0
  219. package/dist/university-content/quizzes/Q-para-201-cross-cutting-concerns.yaml +56 -0
  220. package/dist/university-content/quizzes/Q-para-201-disciplines.yaml +66 -0
  221. package/dist/university-content/quizzes/Q-para-201-flows-deep-dive.yaml +66 -0
  222. package/dist/university-content/quizzes/Q-para-201-gates-deep-dive.yaml +66 -0
  223. package/dist/university-content/quizzes/Q-para-201-portal-protocol.yaml +56 -0
  224. package/dist/university-content/quizzes/Q-para-201-signal-patterns.yaml +56 -0
  225. package/dist/university-content/quizzes/Q-para-201-symbol-naming.yaml +66 -0
  226. package/dist/university-content/quizzes/Q-para-301-context-management.yaml +56 -0
  227. package/dist/university-content/quizzes/Q-para-301-decisions.yaml +76 -0
  228. package/dist/university-content/quizzes/Q-para-301-doctor-and-validation.yaml +66 -0
  229. package/dist/university-content/quizzes/Q-para-301-enforcement-levels.yaml +46 -0
  230. package/dist/university-content/quizzes/Q-para-301-fragility-tracking.yaml +46 -0
  231. package/dist/university-content/quizzes/Q-para-301-history-system.yaml +56 -0
  232. package/dist/university-content/quizzes/Q-para-301-navigation-system.yaml +56 -0
  233. package/dist/university-content/quizzes/Q-para-301-operations-review.yaml +66 -0
  234. package/dist/university-content/quizzes/Q-para-301-paradigm-shift.yaml +46 -0
  235. package/dist/university-content/quizzes/Q-para-301-protocols.yaml +56 -0
  236. package/dist/university-content/quizzes/Q-para-301-ripple-analysis.yaml +56 -0
  237. package/dist/university-content/quizzes/Q-para-301-sentinel-observability.yaml +46 -0
  238. package/dist/university-content/quizzes/Q-para-301-sync-and-maintenance.yaml +46 -0
  239. package/dist/university-content/quizzes/Q-para-301-wisdom-system.yaml +56 -0
  240. package/dist/university-content/quizzes/Q-para-401-agent-identity.yaml +66 -0
  241. package/dist/university-content/quizzes/Q-para-401-agent-interop.yaml +46 -0
  242. package/dist/university-content/quizzes/Q-para-401-agent-roles.yaml +56 -0
  243. package/dist/university-content/quizzes/Q-para-401-commit-conventions.yaml +56 -0
  244. package/dist/university-content/quizzes/Q-para-401-mastery-review.yaml +66 -0
  245. package/dist/university-content/quizzes/Q-para-401-mcp-tools-overview.yaml +66 -0
  246. package/dist/university-content/quizzes/Q-para-401-multi-agent-coordination.yaml +76 -0
  247. package/dist/university-content/quizzes/Q-para-401-notebooks-permissions.yaml +61 -0
  248. package/dist/university-content/quizzes/Q-para-401-orchestration-workflow.yaml +66 -0
  249. package/dist/university-content/quizzes/Q-para-401-pm-governance.yaml +66 -0
  250. package/dist/university-content/quizzes/Q-para-401-provider-cascade.yaml +56 -0
  251. package/dist/university-content/quizzes/Q-para-401-quick-check.yaml +46 -0
  252. package/dist/university-content/quizzes/Q-para-501-advanced-workflows.yaml +66 -0
  253. package/dist/university-content/quizzes/Q-para-501-aspect-graph-advanced.yaml +66 -0
  254. package/dist/university-content/quizzes/Q-para-501-aspect-graph-internals.yaml +66 -0
  255. package/dist/university-content/quizzes/Q-para-501-assessment-loops.yaml +46 -0
  256. package/dist/university-content/quizzes/Q-para-501-conductor-workspace.yaml +46 -0
  257. package/dist/university-content/quizzes/Q-para-501-habits-practice.yaml +56 -0
  258. package/dist/university-content/quizzes/Q-para-501-hook-enforcement.yaml +66 -0
  259. package/dist/university-content/quizzes/Q-para-501-lore-system.yaml +66 -0
  260. package/dist/university-content/quizzes/Q-para-501-platform-agent-ui.yaml +66 -0
  261. package/dist/university-content/quizzes/Q-para-501-review-compliance.yaml +61 -0
  262. package/dist/university-content/quizzes/Q-para-501-sentinel-deep-dive.yaml +86 -0
  263. package/dist/university-content/quizzes/Q-para-501-session-intelligence.yaml +66 -0
  264. package/dist/university-content/quizzes/Q-para-501-symphony-a-mail.yaml +66 -0
  265. package/dist/university-content/quizzes/Q-para-501-symphony-networking.yaml +66 -0
  266. package/dist/university-content/quizzes/Q-para-501-task-management.yaml +46 -0
  267. package/dist/university-content/quizzes/Q-para-601-agent-renaissance.yaml +66 -0
  268. package/dist/university-content/quizzes/Q-para-601-attention-scoring.yaml +56 -0
  269. package/dist/university-content/quizzes/Q-para-601-context-composition.yaml +66 -0
  270. package/dist/university-content/quizzes/Q-para-601-data-sovereignty.yaml +56 -0
  271. package/dist/university-content/quizzes/Q-para-601-event-stream.yaml +66 -0
  272. package/dist/university-content/quizzes/Q-para-601-knowledge-streams.yaml +66 -0
  273. package/dist/university-content/quizzes/Q-para-601-learning-loop.yaml +56 -0
  274. package/dist/university-content/quizzes/Q-para-601-maestro-team-collab.yaml +86 -0
  275. package/dist/university-content/quizzes/Q-para-601-nominations-debates.yaml +66 -0
  276. package/dist/university-content/quizzes/Q-para-701-agent-notebooks.yaml +66 -0
  277. package/dist/university-content/quizzes/Q-para-701-agent-pods-nevrland.yaml +66 -0
  278. package/dist/university-content/quizzes/Q-para-701-agent-profiles.yaml +66 -0
  279. package/dist/university-content/quizzes/Q-para-701-agent-roster.yaml +66 -0
  280. package/dist/university-content/quizzes/Q-para-701-agent-state.yaml +66 -0
  281. package/dist/university-content/quizzes/Q-para-701-learning-feedback-loop.yaml +66 -0
  282. package/dist/university-content/quizzes/Q-para-701-model-tier-resolution.yaml +66 -0
  283. package/dist/university-content/quizzes/Q-para-701-orchestration-enforcement.yaml +66 -0
  284. package/dist/university-content/quizzes/Q-para-701-per-project-rosters.yaml +66 -0
  285. package/dist/university-content/quizzes/Q-para-701-symphony-visibility.yaml +66 -0
  286. package/dist/university-content/quizzes/Q-plsat-v2.yaml +904 -0
  287. package/dist/university-content/quizzes/Q-plsat-v3.yaml +2909 -0
  288. package/dist/university-content/reference.json +2 -2
  289. package/dist/university-ui/assets/{index-CecQrfSn.js → index-nNgzO1il.js} +2 -2
  290. package/dist/university-ui/assets/{index-CecQrfSn.js.map → index-nNgzO1il.js.map} +1 -1
  291. package/dist/university-ui/index.html +1 -1
  292. package/dist/{upgrade-GX56QE3C.js → upgrade-NKN63VTY.js} +2 -2
  293. package/dist/{validate-VZXTJHGO.js → validate-BB6LRWIY.js} +1 -1
  294. package/dist/validate-XUQZTF3H.js +9 -0
  295. package/dist/{watch-YCODNIET.js → watch-25GJHQYT.js} +1 -1
  296. package/dist/workspace-VMSPYIBV.js +2 -0
  297. package/lore-ui/dist/assets/{index-Bk-K0qgN.js → index-DKhNxgtW.js} +10 -10
  298. package/lore-ui/dist/index.html +1 -1
  299. package/package.json +3 -2
  300. package/platform-ui/dist/assets/{AmbientSection-BYjt75R1.js → AmbientSection-CwatqcBD.js} +1 -1
  301. package/platform-ui/dist/assets/{CanvasSection-rKvA_vZj.js → CanvasSection-dFAthehN.js} +1 -1
  302. package/platform-ui/dist/assets/{DocsSection-CI9K73M-.js → DocsSection-BZ2SFJBZ.js} +1 -1
  303. package/platform-ui/dist/assets/{GitSection-DSGj_c6S.js → GitSection-MNNYU1tO.js} +1 -1
  304. package/platform-ui/dist/assets/{GraphSection-CawN7pC5.js → GraphSection-COYjb4Pt.js} +1 -1
  305. package/platform-ui/dist/assets/LoreSection-B0hUbfsJ.js +1 -0
  306. package/platform-ui/dist/assets/{SentinelSection-DNgoYMH0.js → SentinelSection-BCxW1DCp.js} +1 -1
  307. package/platform-ui/dist/assets/{SymphonySection-C0zfcqv3.js → SymphonySection-BsucZRqy.js} +1 -1
  308. package/platform-ui/dist/assets/{TeamSection-Bzd3Dt9Q.js → TeamSection-C0QNTudW.js} +1 -1
  309. package/platform-ui/dist/assets/{UniversitySection-tBr62R0S.js → UniversitySection-DN1-g9pw.js} +1 -1
  310. package/platform-ui/dist/assets/{index-BaOmyn11.js → index-DwUT8pju.js} +2 -2
  311. package/platform-ui/dist/index.html +1 -1
  312. package/templates/paradigm/specs/symbols.md +4 -2
  313. package/dist/add-P76GEMGF.js +0 -8
  314. package/dist/chunk-3TR6LLXP.js +0 -111
  315. package/dist/chunk-G7XFK2GI.js +0 -11
  316. package/dist/chunk-J6KWGCHN.js +0 -24
  317. package/dist/chunk-JQKKVAAN.js +0 -2
  318. package/dist/chunk-ODVKPZZ4.js +0 -2
  319. package/dist/chunk-Q2J542ST.js +0 -2
  320. package/dist/chunk-QT2LKB3P.js +0 -7
  321. package/dist/chunk-SHD27BQX.js +0 -6
  322. package/dist/chunk-WS2N27RX.js +0 -3
  323. package/dist/chunk-YT52WLBF.js +0 -521
  324. package/dist/compliance-WJINB5DM.js +0 -6
  325. package/dist/config-schema-GUQY2QN7.js +0 -2
  326. package/dist/decision-loader-2XPZE4EZ.js +0 -2
  327. package/dist/dist-R3RWD35F.js +0 -2
  328. package/dist/dist-VXCZWVVJ.js +0 -2
  329. package/dist/doctor-QJ47XAUP.js +0 -2
  330. package/dist/init-HIBRSVUB.js +0 -2
  331. package/dist/list-5IUGP3ZB.js +0 -7
  332. package/dist/lore-loader-RVQI5GXL.js +0 -2
  333. package/dist/lore-loader-XY5MZRR2.js +0 -2
  334. package/dist/migrate-assessments-GEI5WMI2.js +0 -4
  335. package/dist/portal-check-Z3OCQEQR.js +0 -8
  336. package/dist/quiz-FE5UGAY2.js +0 -10
  337. package/dist/reindex-FO5VMZVQ.js +0 -2
  338. package/dist/serve-OY6XYL7F.js +0 -12
  339. package/dist/server-2MNROHF6.js +0 -7
  340. package/dist/session-tracker-MWJAJA6Z.js +0 -2
  341. package/dist/show-BOAVWZPZ.js +0 -7
  342. package/dist/status-A37ECYNJ.js +0 -6
  343. package/dist/sync-DLUBV5HQ.js +0 -2
  344. package/dist/team-NSP6PMPS.js +0 -2
  345. package/dist/tools-CERDNVCG.js +0 -2
  346. package/dist/university-content/courses/.purpose +0 -492
  347. package/dist/university-content/courses/para-001.json +0 -166
  348. package/dist/university-content/courses/para-101.json +0 -615
  349. package/dist/university-content/courses/para-201.json +0 -794
  350. package/dist/university-content/courses/para-301.json +0 -830
  351. package/dist/university-content/courses/para-401.json +0 -868
  352. package/dist/university-content/courses/para-501.json +0 -1166
  353. package/dist/university-content/courses/para-601.json +0 -719
  354. package/dist/university-content/courses/para-701.json +0 -807
  355. package/dist/university-content/plsat/.purpose +0 -162
  356. package/dist/university-content/plsat/v2.0.json +0 -760
  357. package/dist/university-content/plsat/v3.0.json +0 -3453
  358. package/dist/validate-C6SMKGYD.js +0 -9
  359. package/dist/workspace-MKSQN7B2.js +0 -2
  360. package/platform-ui/dist/assets/LoreSection-oO5dCe6O.js +0 -1
  361. /package/dist/{chunk-BV5PRPLB.js → chunk-IZSBGW6E.js} +0 -0
  362. /package/templates/paradigm/specs/{scan.md → probe.md} +0 -0
@@ -1,1166 +0,0 @@
1
- {
2
- "id": "para-501",
3
- "title": "PARA 501: Advanced Systems",
4
- "description": "Master Paradigm's advanced operational systems — Lore for project memory, Sentinel for incident intelligence, Habits for behavioral discipline, Session Intelligence for crash recovery, and Hook Enforcement for automated compliance. Ties everything together into the complete Paradigm workflow.",
5
- "lessons": [
6
- {
7
- "id": "lore-system",
8
- "title": "The Lore System",
9
- "content": "## Why Projects Forget\n\nEvery software project accumulates institutional knowledge — why a migration was attempted then rolled back, which approach was chosen for caching and why, what the team learned when the billing system went down at 2 AM. Without a system for capturing this knowledge, it lives only in the heads of the people who were there. When they leave, context-switch, or simply forget, the project loses its memory.\n\nParadigm's Lore system is a structured project timeline. It records sessions, decisions, milestones, incidents, and reviews as date-partitioned YAML entries that both humans and AI agents can search, filter, and learn from.\n\n## Anatomy of a Lore Entry\n\nEvery lore entry follows a consistent structure:\n\n```yaml\nid: L-2026-02-21-001\ntype: agent-session\ntimestamp: \"2026-02-21T14:30:00Z\"\nduration_minutes: 45\nauthor:\n type: agent\n id: claude-opus-4\n model: claude-opus-4-6\ntitle: \"Add JWT authentication to user routes\"\nsummary: \"Implemented RS256 JWT auth middleware, added ^authenticated and ^project-admin gates to portal.yaml, created refresh token rotation.\"\nsymbols_touched: [\"#auth-middleware\", \"^authenticated\", \"^project-admin\"]\nsymbols_created: [\"#refresh-token-handler\"]\nfiles_modified: [\"src/middleware/auth.ts\", \"portal.yaml\"]\nfiles_created: [\"src/handlers/refresh-token.ts\"]\nlines_added: 247\nlines_removed: 12\ncommit: \"a1b2c3d\"\ndecisions:\n - id: jwt-signing\n decision: \"Use RS256 over HS256\"\n rationale: \"Allows public key verification without sharing the signing secret\"\nlearnings:\n - \"Express v5 requires explicit async error wrapping for middleware\"\nverification:\n status: pass\n details: { \"unit-tests\": pass, \"integration\": pass }\ntags: [security, auth]\n```\n\nThe `id` field is auto-generated: `L-{date}-{sequence}`, where the sequence resets daily. This creates a natural chronological index.\n\n## Entry Types\n\nLore recognizes six entry types, each capturing a different kind of project event:\n\n| Type | When to Use |\n|---|---|\n| `agent-session` | An AI agent completed a work session (most common) |\n| `human-note` | A human records context, rationale, or tribal knowledge |\n| `decision` | An architectural or design decision with rationale |\n| `review` | A code review, PR review, or post-mortem |\n| `incident` | A production incident or significant failure |\n| `milestone` | A release, launch, migration completion, or major achievement |\n\nThe type drives how the entry appears in timeline views and which filters surface it.\n\n## Storage: Date-Partitioned YAML\n\nLore entries live in `.paradigm/lore/entries/` organized by date:\n\n```\n.paradigm/lore/\n timeline.yaml # Index metadata\n entries/\n 2026-02-19/\n L-2026-02-19-001.yaml\n L-2026-02-19-002.yaml\n 2026-02-20/\n L-2026-02-20-001.yaml\n 2026-02-21/\n L-2026-02-21-001.yaml\n```\n\nThe `timeline.yaml` index tracks total entry count, last updated timestamp, and known authors. Date partitioning keeps directories small and makes time-range queries efficient — to find entries from last week, you only read 7 directories.\n\n## CLI Tools\n\nThe CLI provides full lore management:\n\n- `paradigm lore list` — List entries with filters (author, type, symbol, date range, tags)\n- `paradigm lore show <id>` — Full detail view of a single entry\n- `paradigm lore record` — Record a new entry with expanded fields (files-modified, files-created, commit, learnings, duration)\n- `paradigm lore edit <id>` — Edit entry fields (title, summary, type, symbols, tags, learnings)\n- `paradigm lore delete <id>` — Delete an entry (with --yes to skip confirmation)\n- `paradigm lore timeline` — Timeline view grouped by date with hot symbols\n- `paradigm lore review <id>` — Add review scores to an entry\n- `paradigm lore` — Launch the web timeline UI\n\n## MCP Tools\n\nSix MCP tools power the Lore system:\n\n**`paradigm_lore_record`** — Create a new entry. Requires `type`, `title`, `summary`, and `symbols_touched`. Optional fields include files, decisions, learnings, and verification status. The entry is written to the correct date directory with an auto-incremented ID. When `validateSymbols: true` is passed, the tool checks each symbol in `symbols_touched` against registered symbols in `.purpose` files, `flows.yaml`, and `portal.yaml`. Unregistered symbols produce advisory warnings (the entry is always recorded regardless).\n\n**`paradigm_lore_search`** — Query entries with filters: by symbol, author, type, date range, tags, review status, and minimum completeness score. Returns matching entries sorted by recency.\n\n**`paradigm_lore_timeline`** — Get a high-level view: recent entries, active authors, hot symbols (most-referenced in recent entries), and timeline metadata. Use this for orientation — it tells you what has been happening in the project.\n\n**`paradigm_lore_get`** — Fetch a single entry by ID. Returns the full entry with all fields, including decisions, learnings, and review data.\n\n**`paradigm_lore_update`** — Update an existing entry. Pass the entry ID and the fields to change (title, summary, type, symbols, tags, learnings). Only specified fields are modified.\n\n**`paradigm_lore_delete`** — Delete an entry by ID. Requires `confirm: true` to prevent accidental deletion.\n\n## Lore Reviews\n\nEntries can be reviewed by humans after the fact. A review adds a `completeness` score (1-5), a `quality` score (1-5), and optional notes. This creates a feedback loop: agents learn which sessions produced high-quality entries and can adjust their recording behavior. You can filter entries by `hasReview` and `minCompleteness` to surface only verified project history.\n\n## When to Record\n\nThe general rule: **record lore when a session modifies 3 or more source files**. This threshold captures significant work sessions while ignoring trivial edits. The stop hook enforces this — if you modified 3+ files without recording a lore entry, it will block your session from completing.\n\nBeyond the threshold, always record lore for: architectural decisions (even if only 1 file changed), production incidents, milestone completions, and any session where you learned something the next developer should know.",
10
- "keyConcepts": [
11
- "Lore entries record sessions, decisions, milestones, incidents, and reviews",
12
- "Six entry types: agent-session, human-note, decision, review, incident, milestone",
13
- "Date-partitioned YAML storage in .paradigm/lore/entries/{YYYY-MM-DD}/",
14
- "Auto-generated IDs: L-{date}-{sequence}",
15
- "Six MCP tools: paradigm_lore_record, paradigm_lore_search, paradigm_lore_timeline, paradigm_lore_get, paradigm_lore_update, paradigm_lore_delete",
16
- "Review scores (completeness 1-5, quality 1-5) enable feedback loops",
17
- "Recording trigger: 3+ modified source files = significant session",
18
- "Optional symbol validation checks symbols_touched against registered .purpose, flows, and portal symbols",
19
- "timeline.yaml index tracks entry counts, authors, and last-updated"
20
- ],
21
- "quiz": [
22
- {
23
- "id": "q1",
24
- "question": "You just finished a 40-minute session where you added caching to the API, modifying 5 files and creating 2 new ones. You also decided to use Redis over in-memory caching. Which lore entry type is most appropriate?",
25
- "choices": {
26
- "A": "`decision` — because you made an architectural choice about Redis",
27
- "B": "`agent-session` — because this captures the full work session including the decision",
28
- "C": "`milestone` — because adding caching is a significant achievement",
29
- "D": "`review` — because you reviewed caching options before choosing",
30
- "E": "`human-note` — because you want to record the Redis rationale"
31
- },
32
- "correct": "B",
33
- "explanation": "An `agent-session` entry captures the full work session — files modified, symbols touched, decisions made, and learnings. The Redis decision belongs in the entry's `decisions` array. Use `decision` type only when you're recording a standalone architectural decision without associated implementation work."
34
- },
35
- {
36
- "id": "q2",
37
- "question": "Where does a lore entry created on February 21, 2026 (the third entry that day) get stored?",
38
- "choices": {
39
- "A": "`.paradigm/lore/L-2026-02-21-003.yaml`",
40
- "B": "`.paradigm/lore/entries/2026-02-21/L-2026-02-21-003.yaml`",
41
- "C": "`.paradigm/lore/entries/L-2026-02-21-003.yaml`",
42
- "D": "`.paradigm/lore/2026/02/21/003.yaml`",
43
- "E": "`.paradigm/history/lore/2026-02-21-003.yaml`"
44
- },
45
- "correct": "B",
46
- "explanation": "Lore uses date-partitioned storage: `.paradigm/lore/entries/{YYYY-MM-DD}/L-{date}-{NNN}.yaml`. The date directory groups entries by day, and the sequence number (003) auto-increments within each day."
47
- },
48
- {
49
- "id": "q3",
50
- "question": "You want to find all lore entries related to the payment system from the last month. Which MCP tool and approach is correct?",
51
- "choices": {
52
- "A": "`paradigm_lore_timeline` with a symbol filter",
53
- "B": "`paradigm_lore_search` with `symbol: '#payment-service'` and `dateFrom` set to 30 days ago",
54
- "C": "`paradigm_search` with `query: 'payment'` and `type: 'lore'`",
55
- "D": "Read all files in `.paradigm/lore/entries/` and grep for 'payment'",
56
- "E": "`paradigm_lore_record` with a search query parameter"
57
- },
58
- "correct": "B",
59
- "explanation": "`paradigm_lore_search` accepts filters including `symbol` (to match entries that touched a specific symbol) and `dateFrom`/`dateTo` for time ranges. `paradigm_lore_timeline` gives a high-level overview but doesn't support detailed filtering. `paradigm_search` searches the symbol index, not lore entries."
60
- },
61
- {
62
- "id": "q4",
63
- "question": "An agent modified 2 source files and did not record a lore entry. What happens at session end?",
64
- "choices": {
65
- "A": "The stop hook blocks the session — all modifications require lore entries",
66
- "B": "Nothing — the 2-file threshold is below the recording trigger",
67
- "C": "A warning is issued but the session completes normally",
68
- "D": "The system auto-generates a lore entry from the git diff",
69
- "E": "The post-write hook retroactively creates a minimal entry"
70
- },
71
- "correct": "B",
72
- "explanation": "The lore recording threshold is 3+ modified source files. With only 2 files modified, the session is not considered significant enough to require a lore entry. The stop hook only enforces lore recording when 3 or more source files were modified."
73
- },
74
- {
75
- "id": "q5",
76
- "question": "A team lead reviews a lore entry and gives it completeness: 3, quality: 5. What does this tell you?",
77
- "choices": {
78
- "A": "The entry is high quality but missing some information about what was done",
79
- "B": "The entry is poorly written but covers everything",
80
- "C": "The review scores are contradictory and invalid",
81
- "D": "The entry should be deleted and re-recorded",
82
- "E": "Both scores must be the same for a valid review"
83
- },
84
- "correct": "A",
85
- "explanation": "Completeness and quality are independent scores. A completeness of 3 means the entry is missing some details (perhaps decisions weren't documented or files weren't fully listed). A quality of 5 means what IS there is excellent — well-written, accurate, and useful. The review suggests the entry should be enriched with more detail while preserving its good writing."
86
- }
87
- ]
88
- },
89
- {
90
- "id": "sentinel-deep-dive",
91
- "title": "Sentinel Deep Dive",
92
- "content": "## Beyond Stack Traces\n\nTraditional error tracking gives you a stack trace and a count. Paradigm Sentinel gives you *symbolic context* — which component failed, where in a flow it failed, what gate was being evaluated, and which known pattern matches the failure. This transforms incident response from \"read the stack trace and hope\" to \"match against institutional knowledge and follow a resolution strategy.\"\n\n## Symbolic Incident Records\n\nWhen Sentinel records an incident, it captures both technical and symbolic context:\n\n```yaml\nid: INC-042\ntimestamp: \"2026-02-21T02:15:00Z\"\nstatus: open\nerror:\n message: \"Cannot read property 'id' of null\"\n stack: \"at PaymentProcessor.processRefund (payment-processor.ts:142)\"\n type: TypeError\nsymbols:\n component: \"#payment-processor\"\n flow: \"$refund-flow\"\n gate: \"^authenticated\"\nflowPosition:\n flowId: \"$refund-flow\"\n expected: [\"^authenticated\", \"^refund-eligible\", \"#process-refund\", \"!refund-completed\"]\n actual: [\"^authenticated\", \"^refund-eligible\", \"#process-refund\"]\n missing: [\"!refund-completed\"]\n failedAt: \"#process-refund\"\nenvironment: production\n```\n\nThe `flowPosition` field is critical — it tells you exactly where in the defined flow the failure occurred. The refund flow expected 4 steps; only 3 completed. The failure happened at `#process-refund`, and the `!refund-completed` signal never fired. This immediately narrows the investigation to the refund processing logic.\n\n## Incident Grouping\n\nSentinel automatically groups related incidents using symbolic similarity. When two incidents share the same component, flow, and error pattern, they form a group. The grouping algorithm uses a similarity threshold of 0.6 — incidents must share at least 60% of their symbolic context to cluster.\n\nAn `IncidentGroup` tracks the common symbols, error patterns, occurrence count, first/last seen timestamps, and which environments are affected. If a group matches a known failure pattern, Sentinel attaches it as a `suggestedPattern`.\n\n## Failure Patterns\n\nPatterns are the institutional knowledge of your error handling. Each pattern defines matching criteria and a resolution strategy:\n\n```yaml\nid: payment-null-ref-001\nname: \"Null reference in payment processing\"\npattern:\n symbols:\n component: \"#payment-processor\"\n errorType: [TypeError]\n errorContains: [\"Cannot read property\", \"null\"]\nresolution:\n description: \"Add null check before accessing refund object properties\"\n strategy: fix-code\n priority: high\n symbolsToModify: [\"#payment-processor\"]\n filesLikelyInvolved: [\"src/services/payment-processor.ts\"]\nconfidence:\n score: 85\n timesMatched: 12\n timesResolved: 10\n timesRecurred: 2\n```\n\nSix resolution strategies exist: `retry` (transient failure), `fallback` (use alternative path), `fix-data` (data issue), `fix-code` (bug), `ignore` (known harmless), and `escalate` (needs human decision). Pattern priority ranges from `low` through `medium` and `high` to `critical`.\n\nPatterns come from four sources: `manual` (team-created), `suggested` (Sentinel auto-generated from groups), `imported` (from another project), and `community` (shared patterns). Paradigm ships 26 seed patterns covering common failures like incomplete flows, gate bypasses, state race conditions, and unhandled signals.\n\n## The Triage Workflow\n\nSentinel follows a defined lifecycle for incidents:\n\n1. **Record** — `paradigm_sentinel_record` creates the incident with error details, symbolic context, and optional flow position. The incident starts as `open`.\n\n2. **Triage** — `paradigm_sentinel_triage` lists incidents filtered by status, symbol, environment, or error text. The matcher automatically suggests patterns that fit each incident.\n\n3. **Investigate** — `paradigm_sentinel_show` with `includeTimeline: true` shows the full flow timeline — every gate passed, signal emitted, and state change leading up to the failure. With `includeSimilar: true`, it surfaces related incidents that may share a root cause.\n\n4. **Resolve** — `paradigm_sentinel_resolve` closes the incident with a resolution: which pattern applied (if any), the fix commit hash, PR URL, and notes. Resolved incidents feed back into pattern confidence scores.\n\n5. **Pattern** — `paradigm_sentinel_add_pattern` creates new patterns from resolved incidents. When you fix a novel failure, capture the fix as a pattern so the next occurrence resolves faster.\n\nThe sequence is: **record → triage → show → resolve → add pattern**. This cycle builds institutional knowledge with every incident.\n\n## Stats and Health Metrics\n\n`paradigm_sentinel_stats` provides operational intelligence for a given time period: total incidents, open vs resolved counts, incidents by environment and day, pattern effectiveness (which patterns resolve most incidents vs which recur), symbol hotspots (components with the highest incident rates), and resolution metrics (average time to resolve, pattern vs manual resolution rates).\n\nThe `symbolHealth` view shows per-symbol incident history — use it to identify which components need hardening or refactoring.\n\n## Logger Transports\n\nSentinel integrates with the Paradigm logger through a transport layer. The `LogTransport` interface defines a simple contract: a transport receives structured log entries and delivers them somewhere — a file, a remote API, a database, or Sentinel's ingestion endpoint.\n\n```typescript\ninterface LogTransport {\n name: string;\n send(entry: LogEntry): void | Promise<void>;\n}\n```\n\nThe logger supports multiple transports simultaneously via `addTransport(transport)` and `removeTransport(name)`. By default, logs go to the console. Adding a `SentinelTransport` sends them to Sentinel's server as well, without changing any of your existing logging calls.\n\n## The SentinelTransport Bridge\n\nConnecting the Paradigm logger to Sentinel is a one-liner:\n\n```typescript\nimport { enableSentinel } from '@a-company/sentinel';\n\nenableSentinel({ endpoint: 'http://localhost:3001' });\n```\n\nThis call creates a `SentinelTransport` instance and registers it with the logger via `addTransport`. From that point forward, every `log.component(...)`, `log.gate(...)`, and `log.signal(...)` call is forwarded to Sentinel as a structured log entry. Error-level logs are automatically promoted to incident candidates.\n\nThe beauty of this design is zero code changes to your application. Your existing logger calls remain unchanged — the transport layer silently bridges them to Sentinel's observability pipeline.\n\n## Metrics API\n\nSentinel's server exposes a metrics API for recording and querying application metrics:\n\n**POST /api/metrics** — Record a metric data point. Supports three metric types:\n- `counter` — Monotonically increasing values (e.g., request count, error count)\n- `gauge` — Point-in-time values that can go up or down (e.g., active connections, queue depth)\n- `histogram` — Distribution of values over time (e.g., response latency, payload size)\n\n```json\n{\n \"name\": \"api.requests.total\",\n \"type\": \"counter\",\n \"value\": 1,\n \"labels\": { \"method\": \"POST\", \"route\": \"/api/payments\" },\n \"timestamp\": \"2026-02-21T14:30:00Z\"\n}\n```\n\n**GET /api/metrics** — Query metrics with optional filters by name, type, labels, and time range. Returns aggregated data suitable for dashboards and alerting.\n\n## Traces API\n\nSentinel supports distributed tracing through span trees:\n\n**POST /api/traces** — Record a trace span. Each span has a `traceId`, `spanId`, optional `parentSpanId`, `operationName`, `startTime`, `endTime`, and `tags`. Spans with the same `traceId` form a tree — the root span has no parent, and child spans reference their parent via `parentSpanId`.\n\n**GET /api/traces** — Query traces by operation name, service, time range, or minimum duration. Returns full span trees with timing breakdowns.\n\n## Service Registry\n\nSentinel maintains a live registry of services reporting data:\n\n**POST /api/services** — Register or update a service. Each service entry includes name, version, environment, health status, and last-seen timestamp.\n\n**GET /api/services** — List all registered services with their current health status and metadata. This provides a real-time view of what is running and where.",
93
- "keyConcepts": [
94
- "Symbolic incident records capture component, flow, gate, and signal context",
95
- "Flow position tracking shows exactly where in a flow a failure occurred",
96
- "Automatic incident grouping with 0.6 similarity threshold",
97
- "Failure patterns define matching criteria and resolution strategies",
98
- "Six resolution strategies: retry, fallback, fix-data, fix-code, ignore, escalate",
99
- "Triage lifecycle: record → triage → show → resolve → add pattern",
100
- "26 seed patterns ship with Paradigm covering common failure modes",
101
- "Stats surface symbol hotspots, pattern effectiveness, and resolution rates",
102
- "LogTransport interface enables pluggable log delivery via addTransport/removeTransport",
103
- "enableSentinel() one-liner bridges the Paradigm logger to Sentinel with zero code changes",
104
- "Metrics API supports counter, gauge, and histogram metric types",
105
- "Traces API records distributed span trees with parent-child relationships",
106
- "Service registry provides live health status for all reporting services"
107
- ],
108
- "quiz": [
109
- {
110
- "id": "q1",
111
- "question": "An incident record shows `flowPosition.actual` has 3 entries and `flowPosition.expected` has 5. The `failedAt` field points to the third step. What does this tell you?",
112
- "choices": {
113
- "A": "Three out of five flow steps completed successfully before the failure",
114
- "B": "The flow is missing 2 step definitions and needs to be updated",
115
- "C": "The third step failed, preventing the last 2 steps (including their signals) from executing",
116
- "D": "Only the last 2 steps need to be investigated",
117
- "E": "The flow validation is broken and should be re-run"
118
- },
119
- "correct": "C",
120
- "explanation": "The `actual` array shows what actually executed, `expected` shows what should have. Three steps executed, meaning the first two succeeded and the third (`failedAt`) is where the failure occurred. The remaining 2 expected steps (in `missing`) never ran because execution stopped at the failure point."
121
- },
122
- {
123
- "id": "q2",
124
- "question": "A failure pattern has confidence scores: timesMatched=20, timesResolved=15, timesRecurred=5. What does a recurrence rate of 25% suggest?",
125
- "choices": {
126
- "A": "The pattern is highly effective and should be trusted",
127
- "B": "The resolution strategy may not fully address the root cause — the fix works sometimes but the issue returns",
128
- "C": "The pattern's matching criteria are too broad and catching unrelated incidents",
129
- "D": "25% is normal and healthy for any pattern",
130
- "E": "The pattern should be deleted and replaced"
131
- },
132
- "correct": "B",
133
- "explanation": "timesRecurred (5) out of timesResolved (15) gives a 33% recurrence rate after resolution. This suggests the resolution strategy addresses symptoms but not the root cause. The pattern still has value (it resolves 67% permanently), but the resolution description should be updated with a more thorough fix, or a second pattern created for the recurring variant."
134
- },
135
- {
136
- "id": "q3",
137
- "question": "You receive a 2 AM production alert. What is the correct Sentinel-powered response sequence?",
138
- "choices": {
139
- "A": "`paradigm_sentinel_triage` → `paradigm_sentinel_record` → fix → `paradigm_sentinel_resolve`",
140
- "B": "`paradigm_sentinel_record` → `paradigm_sentinel_patterns` → `paradigm_wisdom_context` → fix → `paradigm_sentinel_resolve`",
141
- "C": "`paradigm_status` → read all files → find bug → `paradigm_sentinel_record`",
142
- "D": "`paradigm_sentinel_stats` → identify hotspot → fix → `paradigm_sentinel_resolve`",
143
- "E": "`paradigm_sentinel_record` → `paradigm_orchestrate_inline` → deploy agents → wait"
144
- },
145
- "correct": "B",
146
- "explanation": "First, record the incident with `paradigm_sentinel_record` to start the clock and capture the error. Then check `paradigm_sentinel_patterns` for known fixes — this could save hours if the failure matches an existing pattern. Then check `paradigm_wisdom_context` for antipatterns on the failing component. Fix with full context, then resolve. Recording first is critical — you can't triage what you haven't recorded."
147
- },
148
- {
149
- "id": "q4",
150
- "question": "Sentinel ships with 26 seed patterns. Which pattern source would a team-created pattern from a post-mortem use?",
151
- "choices": {
152
- "A": "`community`",
153
- "B": "`imported`",
154
- "C": "`manual`",
155
- "D": "`suggested`",
156
- "E": "`seed`"
157
- },
158
- "correct": "C",
159
- "explanation": "Patterns have four sources: `manual` (team-created), `suggested` (auto-generated by Sentinel from incident groups), `imported` (from another project), and `community` (shared open-source patterns). A pattern created by the team during a post-mortem is `manual`. The seed patterns that ship with Paradigm use `manual` source as well."
160
- },
161
- {
162
- "id": "q5",
163
- "question": "Two incidents share the same component (#auth-service) and error type (TypeError) but different flows. Sentinel's similarity threshold is 0.6. Will they be grouped?",
164
- "choices": {
165
- "A": "Yes — sharing a component and error type exceeds the 0.6 threshold",
166
- "B": "No — different flows always prevent grouping",
167
- "C": "It depends on what other symbolic context they share — 0.6 means 60% overlap required",
168
- "D": "Only if they occurred within the same hour",
169
- "E": "Only if a human manually groups them"
170
- },
171
- "correct": "C",
172
- "explanation": "The 0.6 similarity threshold means 60% of symbolic context must overlap. Sharing a component and error type provides some overlap, but different flows reduce it. Whether they cross 0.6 depends on other shared context — same gate, same environment, similar error message. Grouping is automatic but similarity-driven, not based on any single field."
173
- },
174
- {
175
- "id": "q6",
176
- "question": "How do you connect the Paradigm logger to Sentinel's observability pipeline?",
177
- "choices": {
178
- "A": "Replace all `log.component()` calls with `sentinel.log()` calls throughout your codebase",
179
- "B": "Call `enableSentinel({ endpoint: '...' })` once — it registers a SentinelTransport via addTransport with zero changes to existing logging code",
180
- "C": "Configure a `sentinel` key in `.paradigm/config.yaml` and restart the application",
181
- "D": "Import SentinelTransport in every file that uses the logger",
182
- "E": "Set the `SENTINEL_ENDPOINT` environment variable — the logger auto-detects it"
183
- },
184
- "correct": "B",
185
- "explanation": "The SentinelTransport bridge is designed for zero-code-change adoption. Calling `enableSentinel()` once creates a SentinelTransport and registers it with the logger via `addTransport`. From that point, all existing `log.component()`, `log.gate()`, and `log.signal()` calls are automatically forwarded to Sentinel. No changes to individual logging calls are needed."
186
- },
187
- {
188
- "id": "q7",
189
- "question": "You want to track API response latency in Sentinel. Which metric type should you use?",
190
- "choices": {
191
- "A": "`counter` — increment it by the latency value on each request",
192
- "B": "`gauge` — set it to the current response time",
193
- "C": "`histogram` — record each response time to build a distribution over time",
194
- "D": "`timer` — Sentinel has a dedicated timer metric type for latency",
195
- "E": "`counter` with a `latency` label containing the value"
196
- },
197
- "correct": "C",
198
- "explanation": "Histogram is the correct metric type for distributions like response latency. A histogram records individual values and lets you compute percentiles (p50, p95, p99), averages, and distributions over time. A counter only tracks cumulative totals, a gauge only captures point-in-time snapshots, and there is no dedicated timer type — histograms serve that purpose."
199
- }
200
- ]
201
- },
202
- {
203
- "id": "aspect-graph-internals",
204
- "title": "Aspect Graph Internals",
205
- "content": "## SQLite Schema\n\nThe aspect graph database at `.paradigm/aspect-graph.db` uses six core tables that model the full aspect ecosystem:\n\n**`aspects`** — The primary table storing aspect metadata. Columns include `id` (the aspect symbol, e.g., `token-expiry-24h`), `description`, `value`, `category` (rule/decision/constraint/configuration/invariant), `severity` (low/medium/high/critical), and `content_hash` (SHA-256 of the combined anchor code for drift detection). Each row represents one aspect from a `.purpose` file.\n\n**`anchors`** — Stores code anchor locations. Columns: `aspect_id` (foreign key to aspects), `file_path`, `start_line`, `end_line`, and `content_hash` (SHA-256 of the code at those lines). An aspect can have multiple anchors across different files.\n\n**`edges`** — The graph edges connecting aspects to other symbols. Columns: `source` (the aspect), `target` (any symbol), `relation` (enforced-by, depends-on, contradicts, supersedes, related-to), `weight` (numeric confidence, default 1.0 for explicit edges), and `origin` (explicit, inferred, or learned). This table is what makes the aspect system a graph rather than a flat list.\n\n**`lore_links`** — Connects aspects to lore entries. Columns: `aspect_id` and `lore_id`. These links are materialized from the `lore` field in aspect YAML definitions, and additional links are inferred when two aspects share lore references.\n\n**`search_weights`** — The learning system's memory. Columns: `query` (the search string), `aspect_id` (the result), and `weight` (accumulated confidence). This table powers Tier 1 of the three-tier search — when a query matches a stored mapping with sufficient weight, the result is returned immediately without FTS5 or fuzzy matching.\n\n**`heatmap`** — Tracks aspect access patterns. Columns: `aspect_id`, `access_type` (search, ripple, navigate, direct), `count`, and `last_accessed`. This data drives the `paradigm_aspect_heatmap` tool, revealing which aspects are most frequently referenced and how they are typically discovered.\n\n## Recursive Ripple\n\nThe aspect graph enables recursive ripple analysis — when you call `paradigm_ripple` on a symbol that has aspect edges, the ripple follows those edges to discover indirect impacts. The algorithm uses weighted breadth-first search (BFS) with three configurable parameters:\n\n- **maxDepth** — How many hops to traverse. Default is 5, maximum is 10. Each hop follows one edge in the graph. At depth 1, you see direct connections. At depth 5, you see connections five edges away.\n- **minWeight** — The minimum cumulative weight to continue traversal. Default is 0.1. As the BFS traverses edges, it multiplies the current weight by each edge's weight (multiplicative decay). When the cumulative weight drops below minWeight, that branch is pruned.\n- **Queue limit** — Maximum BFS queue size: 1000 nodes. This prevents runaway traversals in densely connected graphs. If the queue exceeds 1000 entries, the oldest entries are dropped.\n\nThe multiplicative decay is the key mechanism. An explicit edge with weight 1.0 passes full confidence to the next hop. An inferred edge with weight 0.5 halves the confidence. After two inferred edges, the weight is 0.25 — and after four, it drops to 0.0625, below the default minWeight threshold. This naturally limits traversal depth through low-confidence paths while allowing full traversal through high-confidence ones.\n\n## Heatmap Tracking\n\nEvery time an aspect is accessed through any MCP tool, the heatmap table records the access. Four access types are tracked:\n\n- **search** — The aspect was found via `paradigm_aspect_search`\n- **ripple** — The aspect was encountered during `paradigm_ripple` traversal\n- **navigate** — The aspect was discovered via `paradigm_navigate`\n- **direct** — The aspect was accessed by ID via `paradigm_aspect_get`\n\nThe heatmap serves two purposes. First, it powers the `paradigm_aspect_heatmap` tool, which ranks aspects by access frequency and reveals usage patterns. Second, it provides data for project health analysis — aspects that are never accessed may be stale or poorly named, while aspects accessed frequently across multiple types are clearly central to the project.\n\n## Materialization Pipeline\n\nThe aspect graph is rebuilt during `paradigm_reindex` through a five-step pipeline:\n\n1. **openAspectGraph** — Opens (or creates) the SQLite database at `.paradigm/aspect-graph.db`. If the database exists, all tables are cleared for a fresh rebuild. This ensures the graph always reflects the current state of YAML files.\n\n2. **materializeAspects** — Reads all `.purpose` files, extracts aspect definitions, and writes them to the `aspects`, `anchors`, and `edges` tables. For each anchor, the pipeline reads the actual source code at the specified line range and computes a SHA-256 content hash. Explicit edges from the YAML `edges` field are written with origin `explicit` and weight 1.0. Inferred edges from `applies-to` references are written with origin `inferred` and weight 0.5.\n\n3. **materializeLoreLinks** — Reads the `lore` field from each aspect and creates entries in the `lore_links` table connecting aspects to their referenced lore entries.\n\n4. **inferLoreEdges** — Scans the `lore_links` table for aspects that share lore references. When two aspects both reference the same lore entry, a learned edge is created between them with origin `learned` and a weight proportional to the number of shared references. This discovers implicit relationships that were not explicitly declared.\n\n5. **closeAspectGraph** — Commits all changes, runs ANALYZE for query optimization, and closes the database connection.\n\nBecause the entire graph is rebuilt from YAML on every reindex, there is no migration or versioning concern. If the schema changes in a future Paradigm version, the next reindex simply creates the new schema.\n\n## Category Inference\n\nWhen an aspect definition omits the `category` field, the materialization pipeline attempts to infer it from the description using keyword matching:\n\n- Descriptions containing \"must\", \"always\", \"never\", \"required\" suggest `rule`\n- Descriptions containing \"decided\", \"chosen\", \"selected\", \"opted\" suggest `decision`\n- Descriptions containing \"limit\", \"maximum\", \"minimum\", \"cannot exceed\" suggest `constraint`\n- Descriptions containing \"configured\", \"set to\", \"defaults to\", \"environment\" suggest `configuration`\n- Descriptions containing \"always true\", \"never negative\", \"invariant\", \"guarantee\" suggest `invariant`\n\nSimilarly, severity can be inferred from tags: aspects tagged `[critical]` or `[security]` default to `high` severity, aspects tagged `[compliance]` default to `critical`, and untagged aspects default to `medium`.\n\nInference is a fallback — explicit `category` and `severity` fields in YAML always take precedence.\n\n## Weight Decay in Search Learning\n\nThe search learning system uses a reinforcement model. When `paradigm_aspect_confirm` is called with a query and aspect ID:\n\n1. The selected result's weight for that query gets **+1.0** added to its current weight in the `search_weights` table. If no entry exists, one is created with weight 1.0.\n2. All other aspects that were previously returned for the same query get their weights multiplied by **0.95** (a 5% decay). This applies only to aspects that have existing `search_weights` entries for this query — it does not penalize aspects that were never returned for this query.\n\nThis mechanism is self-correcting. If result A is consistently confirmed for a query, its weight grows (1.0, 2.0, 3.0, ...) while alternatives decay (1.0, 0.95, 0.9025, ...). After enough confirmations, the learned mapping becomes dominant and Tier 1 returns it instantly. But if the user later confirms a different result B for the same query, B starts climbing while A begins decaying — the system adapts to changing preferences without requiring manual intervention.",
206
- "keyConcepts": [
207
- "Six SQLite tables: aspects, anchors, edges, lore_links, search_weights, heatmap",
208
- "Recursive ripple uses weighted BFS with multiplicative decay across edges",
209
- "Default maxDepth is 5 (max 10), default minWeight is 0.1, queue limit is 1000",
210
- "Four heatmap access types: search, ripple, navigate, direct",
211
- "Five-step materialization pipeline: open, materialize aspects, materialize lore links, infer lore edges, close",
212
- "Category inference uses description keywords when category is not explicitly set",
213
- "Search weight decay: +1.0 to confirmed result, *0.95 to all others for the same query",
214
- "Inferred edges from applies-to have weight 0.5 and origin 'inferred'"
215
- ],
216
- "quiz": [
217
- {
218
- "id": "q1",
219
- "question": "What is the default maxDepth for recursive ripple in the aspect graph?",
220
- "choices": {
221
- "A": "3 — to keep traversals fast and focused",
222
- "B": "5 — balancing depth of discovery with performance",
223
- "C": "10 — the maximum allowed value",
224
- "D": "Unlimited — ripple traverses until minWeight is reached",
225
- "E": "1 — only direct connections are followed by default"
226
- },
227
- "correct": "B",
228
- "explanation": "The default maxDepth for recursive ripple is 5, with a maximum configurable value of 10. This default balances discovery depth with performance — at 5 hops, you see a meaningful neighborhood without traversing the entire graph. The minWeight threshold (default 0.1) provides additional pruning by cutting off low-confidence paths before they reach maxDepth."
229
- },
230
- {
231
- "id": "q2",
232
- "question": "What happens to search weights when a result is confirmed via paradigm_aspect_confirm?",
233
- "choices": {
234
- "A": "The confirmed result gets +0.5 weight and all others are deleted",
235
- "B": "The confirmed result gets +1.0 weight and all other results for the same query decay by *0.95",
236
- "C": "All results for the query get +1.0 weight to reinforce the entire set",
237
- "D": "The confirmed result is permanently pinned and decay is disabled",
238
- "E": "The confirmed result replaces all other entries for that query"
239
- },
240
- "correct": "B",
241
- "explanation": "The search learning system adds +1.0 to the confirmed result's weight for that query and multiplies all other existing results for the same query by 0.95 (a 5% decay). This self-correcting mechanism lets the best result rise to the top over time while alternatives gradually fade. The decay only applies to aspects that have existing search_weights entries for the query — it does not penalize unrelated aspects."
242
- },
243
- {
244
- "id": "q3",
245
- "question": "Which SQLite table stores aspect access frequency for the heatmap tool?",
246
- "choices": {
247
- "A": "aspects — in an access_count column",
248
- "B": "edges — access frequency is tracked per edge",
249
- "C": "search_weights — all access types feed into search weights",
250
- "D": "heatmap — with columns for aspect_id, access_type, count, and last_accessed",
251
- "E": "anchors — access is tracked per anchor location"
252
- },
253
- "correct": "D",
254
- "explanation": "The `heatmap` table stores aspect access frequency with columns for `aspect_id`, `access_type` (search, ripple, navigate, direct), `count`, and `last_accessed`. This dedicated table allows the `paradigm_aspect_heatmap` tool to rank aspects by usage frequency and break down how each aspect is typically discovered — whether through search, ripple analysis, navigation, or direct access."
255
- },
256
- {
257
- "id": "q4",
258
- "question": "What is the queue limit for recursive ripple BFS traversal?",
259
- "choices": {
260
- "A": "100 nodes — to keep memory usage minimal",
261
- "B": "500 nodes — a balance between coverage and performance",
262
- "C": "1000 nodes — preventing runaway traversals in dense graphs",
263
- "D": "Unlimited — the queue grows until maxDepth is reached",
264
- "E": "10000 nodes — large enough for enterprise-scale graphs"
265
- },
266
- "correct": "C",
267
- "explanation": "The BFS queue limit is 1000 nodes. This prevents runaway traversals in densely connected aspect graphs where the number of reachable nodes could grow exponentially with depth. When the queue exceeds 1000 entries, the oldest entries are dropped, ensuring the algorithm completes in bounded time and memory regardless of graph density."
268
- },
269
- {
270
- "id": "q5",
271
- "question": "How are aspect edges inferred from existing data during materialization?",
272
- "choices": {
273
- "A": "By analyzing import statements in source code files",
274
- "B": "From applies-to references with weight 0.5 and origin 'inferred', and from shared lore references with origin 'learned'",
275
- "C": "By running static analysis on anchor code blocks",
276
- "D": "From git commit history showing which aspects changed together",
277
- "E": "Only explicit edges are created — no inference occurs"
278
- },
279
- "correct": "B",
280
- "explanation": "The materialization pipeline creates inferred edges in two ways. First, `materializeAspects` generates edges from `applies-to` references with weight 0.5 and origin 'inferred' — when an aspect applies to a component, a relationship edge is created. Second, `inferLoreEdges` scans for aspects sharing lore references and creates edges with origin 'learned' and weight proportional to the overlap. These supplement explicit YAML edges to build a richer graph."
281
- }
282
- ]
283
- },
284
- {
285
- "id": "habits-practice",
286
- "title": "Habits & Practice",
287
- "content": "## Instinct vs Habit\n\nWhen you first learn to drive, you consciously think about every action — check mirrors, signal, check blind spot, change lanes. After thousands of miles, these become habits: automatic behaviors you execute without conscious effort. The Habits system brings this concept to AI-assisted development.\n\nWithout habits, an agent must be told every time: \"check ripple before modifying,\" \"validate flows after changing gates,\" \"record lore for significant sessions.\" With habits, these checks become automatic behavioral triggers — the system evaluates them at defined points and reports compliance. Over time, agents internalize the patterns, and the habit checks become confirmation rather than correction.\n\n## Habit Definitions\n\nEach habit is a structured rule with six fields:\n\n```yaml\nid: ripple-before-modify\nname: Check Ripple Before Modifying\ndescription: Always call paradigm_ripple before modifying any symbol\ncategory: discovery\ntrigger: preflight\nseverity: advisory\ncheck:\n type: tool-called\n params:\n tools: [paradigm_ripple]\nenabled: true\n```\n\n**Categories** classify what kind of discipline the habit enforces. There are six:\n- `discovery` — Exploring before acting (ripple, navigate, search)\n- `verification` — Validating after implementing (postflight, reindex)\n- `testing` — Ensuring test coverage for new code\n- `documentation` — Keeping .purpose files and lore entries current\n- `collaboration` — Checking team wisdom and expert knowledge\n- `security` — Validating gates and portal.yaml compliance\n\n**Triggers** define when the habit is evaluated. There are four:\n- `preflight` — Before starting implementation\n- `postflight` — After completing implementation\n- `on-commit` — Before committing changes\n- `on-stop` — Before the session ends (stop hook)\n\n**Severity** determines what happens when a habit is violated:\n- `advisory` — Log a note, don't block anything\n- `warn` — Show a warning to the agent/user\n- `block` — Prevent session completion until resolved (enforced by stop hook)\n\n## Check Types\n\nHabits verify compliance through twelve check types:\n\n| Check Type | What It Verifies |\n|---|---|\n| `tool-called` | Specified MCP tools were invoked during the session |\n| `file-exists` | Files matching glob patterns exist (e.g., test files) |\n| `file-modified` | Files matching patterns were modified during session |\n| `lore-recorded` | A lore entry was created (for 3+ file sessions) |\n| `symbols-registered` | New code is registered in .purpose files |\n| `gates-declared` | Routes have corresponding gates in portal.yaml |\n| `tests-exist` | Test files exist for modified components |\n| `git-clean` | Git working tree is clean — all changes committed |\n| `commit-message-format` | Commit messages match regex patterns (default: conventional commit prefix + Symbols: trailer) |\n| `flow-coverage` | Changes spanning 3+ components have a documented $flow |\n| `context-checked` | Session context/recovery tools (paradigm_session_health, paradigm_session_recover) were called |\n| `aspect-anchored` | Touched aspects (~) have valid code anchors verified via paradigm_aspect_check |\n\n## The 14 Seed Habits\n\nParadigm ships with 14 built-in habits that establish baseline discipline:\n\n1. **explore-before-implement** (preflight/advisory/discovery) — Called paradigm_ripple, paradigm_navigate, paradigm_search, or paradigm_related before coding\n2. **ripple-before-modify** (preflight/advisory/discovery) — Called paradigm_ripple specifically before modifying symbols\n3. **check-fragility** (preflight/advisory/discovery) — Called paradigm_history_fragility before touching symbols\n4. **wisdom-before-implement** (preflight/advisory/collaboration) — Checked paradigm_wisdom_context or paradigm_wisdom_expert\n5. **verify-before-done** (on-stop/warn/verification) — Called paradigm_pm_postflight before finishing\n6. **postflight-compliance** (on-stop/advisory/verification) — Ran postflight and reindex\n7. **test-new-components** (postflight/advisory/testing) — Test files exist for new components\n8. **purpose-coverage** (postflight/warn/documentation) — .purpose files cover modified directories\n9. **record-lore-for-significant** (on-stop/warn/documentation) — Lore recorded for 3+ file sessions\n10. **gates-for-routes** (postflight/warn/security) — Routes have portal.yaml gate coverage\n11. **commit-message-symbols** (on-commit/advisory/documentation) — Commit messages follow type(#symbol): format with Symbols: trailer\n12. **flow-coverage-for-multi-component** (postflight/advisory/documentation) — Changes spanning 3+ components have a documented $flow\n13. **context-session-awareness** (preflight/advisory/discovery) — Session recovery or context check tools were called for continuity\n14. **aspect-anchors-valid** (postflight/advisory/verification) — Aspects touched during the session have valid code anchors\n\n## Habit Loading and Overrides\n\nHabits load from three sources, merged in order (later wins):\n\n1. **Seed habits** — The 10 built-in habits (always present)\n2. **Global habits** — `~/.paradigm/habits.yaml` (optional, applies to all projects)\n3. **Project habits** — `.paradigm/habits.yaml` (optional, project-specific)\n\nOverrides let you adjust severity or disable habits without redefining them:\n\n```yaml\n# .paradigm/habits.yaml\noverrides:\n ripple-before-modify:\n severity: block # Upgrade from advisory to blocking\n test-new-components:\n enabled: false # Disable for this project\ncustom:\n - id: check-migrations\n name: Verify DB Migrations\n category: verification\n trigger: on-commit\n severity: warn\n check:\n type: file-exists\n params:\n patterns: [\"migrations/*.sql\"]\n```\n\n## Practice Profiles\n\nEvery habit evaluation is recorded as a practice event with a result: `followed`, `skipped`, or `partial`. These events accumulate into practice profiles that show compliance rates over time.\n\n`paradigm_habits_status` returns a practice profile with: overall compliance rate, strongest and weakest categories, per-category breakdowns, trend analysis (improving/declining/stable), and incident correlations — habits whose skipped evaluations correlate with higher incident rates.\n\nThe incident correlation is powerful: if skipping `ripple-before-modify` correlates with a 3x higher incident rate for the modified symbols, that is concrete evidence for upgrading the habit's severity.\n\n## MCP Tools\n\n**`paradigm_habits_check`** — Evaluate habits for a trigger point. Pass the trigger (`preflight`, `postflight`, `on-stop`), optionally with `filesModified` and `symbolsTouched` for context. Returns evaluations with follow/skip/partial results and whether any blocking violations exist.\n\n**`paradigm_habits_status`** — Get the practice profile for an engineer over a time period (7d, 30d, 90d, or all). Shows compliance rates, category breakdowns, trends, and incident correlations.\n\n**`paradigm_practice_context`** — Before modifying symbols, get habit-aware warnings. Pass the symbols you are about to touch, and it returns relevant habits, recent compliance rates, and suggestions based on your weak areas.\n\n## CLI Commands\n\nThe CLI provides full habit management:\n\n- `paradigm habits list` — List all habits with trigger, severity, and enabled status\n- `paradigm habits add` — Add a custom habit with check type, patterns, and tools\n- `paradigm habits edit <id>` — Edit habit fields (for seed habits: severity and enabled only)\n- `paradigm habits remove <id>` — Remove a custom habit\n- `paradigm habits enable/disable <id>` — Toggle a habit on or off\n- `paradigm habits check --trigger <trigger>` — Evaluate compliance for a specific trigger\n- `paradigm habits status` — Practice profile with compliance rates and trends\n- `paradigm habits init` — Initialize a habits.yaml file for the project\n\n## Platform Targeting\n\nHabits support a `platforms` field to restrict evaluation to specific platforms. For example, a habit with `platforms: ['claude', 'cursor']` will only be evaluated when running in those environments. A habit with `platforms: ['cli']` will only fire during CLI-driven workflows. When `platforms` is omitted, the habit applies everywhere.",
288
- "keyConcepts": [
289
- "Six categories: discovery, verification, testing, documentation, collaboration, security",
290
- "Four triggers: preflight, postflight, on-commit, on-stop",
291
- "Three severity levels: advisory (note), warn (visible), block (prevents completion)",
292
- "14 seed habits establish baseline discipline across all categories",
293
- "Three-layer loading: seed → global (~/.paradigm/) → project (.paradigm/)",
294
- "Practice profiles track compliance rates and trend direction",
295
- "Incident correlations link skipped habits to higher incident rates",
296
- "Twelve check types: tool-called, file-exists, file-modified, lore-recorded, symbols-registered, gates-declared, tests-exist, git-clean, commit-message-format, flow-coverage, context-checked, aspect-anchored"
297
- ],
298
- "quiz": [
299
- {
300
- "id": "q1",
301
- "question": "A project wants the `ripple-before-modify` habit to block session completion instead of just advising. How should they configure this?",
302
- "choices": {
303
- "A": "Delete the seed habit and create a new one with `severity: block`",
304
- "B": "Add an override in `.paradigm/habits.yaml` setting `severity: block` for `ripple-before-modify`",
305
- "C": "Edit the seed-habits.json file directly in node_modules",
306
- "D": "Create a global override in `~/.paradigm/habits.yaml` — project-level overrides cannot change severity",
307
- "E": "Set `PARADIGM_HABIT_SEVERITY=block` environment variable"
308
- },
309
- "correct": "B",
310
- "explanation": "Project-level overrides in `.paradigm/habits.yaml` can change any field of a seed habit, including severity. The three-layer merge means project settings override global settings, which override seed defaults. You never edit seed habits directly."
311
- },
312
- {
313
- "id": "q2",
314
- "question": "An agent's practice profile shows: discovery compliance 95%, verification 40%, testing 30%. The agent frequently skips `verify-before-done` and `test-new-components`. What does this pattern reveal?",
315
- "choices": {
316
- "A": "The agent is good at exploring but poor at following through — it rushes to finish without validating",
317
- "B": "The discovery habits are too easy and should be made harder",
318
- "C": "The agent needs more seed habits in the testing category",
319
- "D": "This is a healthy pattern — discovery is the most important category",
320
- "E": "The verification and testing habits should be disabled since the agent skips them"
321
- },
322
- "correct": "A",
323
- "explanation": "High discovery compliance with low verification and testing shows an agent that does good pre-work but doesn't follow through with validation. This is the 'explore well, ship hastily' antipattern. The fix is to upgrade verification and testing habits to `warn` or `block` severity, not to disable them."
324
- },
325
- {
326
- "id": "q3",
327
- "question": "The `record-lore-for-significant` habit fires on which trigger and at what threshold?",
328
- "choices": {
329
- "A": "`preflight` — checks if lore was recorded in the previous session",
330
- "B": "`on-stop` — checks if lore was recorded when 3+ source files were modified",
331
- "C": "`postflight` — always checks for lore regardless of file count",
332
- "D": "`on-commit` — requires lore for every commit",
333
- "E": "`on-stop` — checks if lore was recorded when any files were modified"
334
- },
335
- "correct": "B",
336
- "explanation": "The `record-lore-for-significant` habit triggers `on-stop` (before session end) and uses the `lore-recorded` check type, which fires when 3 or more source files were modified. This threshold captures significant sessions while ignoring trivial edits."
337
- },
338
- {
339
- "id": "q4",
340
- "question": "Practice profiles show that skipping `wisdom-before-implement` correlates with a 3x incident rate. What action should the team take?",
341
- "choices": {
342
- "A": "Disable the habit since it is not preventing incidents anyway",
343
- "B": "Upgrade its severity from `advisory` to `warn` or `block` based on the evidence",
344
- "C": "Add more discovery habits to compensate",
345
- "D": "The correlation is coincidental — ignore it",
346
- "E": "Move the habit from `preflight` to `on-stop` trigger"
347
- },
348
- "correct": "B",
349
- "explanation": "Incident correlations provide concrete evidence for severity decisions. A 3x incident rate when skipping wisdom checks is strong evidence that the check prevents real problems. Upgrading to `warn` makes it visible; upgrading to `block` enforces it. This is the feedback loop in action — practice data drives policy changes."
350
- }
351
- ]
352
- },
353
- {
354
- "id": "session-intelligence",
355
- "title": "Session Intelligence",
356
- "content": "## The Session Problem\n\nAI agent sessions are ephemeral. When a session ends — whether by completion, crash, context exhaustion, or human interruption — everything the agent knew vanishes. The next session starts blank, with no memory of what was explored, decided, or partially implemented. Session Intelligence solves this with checkpoints, breadcrumbs, and a global brain that persists knowledge across sessions and even across projects.\n\n## Session Checkpoints\n\nCheckpoints are deliberate snapshots saved at phase transitions. There are four phases:\n\n| Phase | When to Checkpoint | What to Capture |\n|---|---|---|\n| `planning` | After reading requirements, before coding | Plan, approach, key decisions |\n| `implementing` | After starting code changes | Modified files, symbols touched, decisions made |\n| `validating` | After implementation, before tests | All modified files, test plan |\n| `complete` | Task finished | Summary, final file list |\n\nCreate a checkpoint with `paradigm_session_checkpoint`:\n\n```\nparadigm_session_checkpoint({\n phase: \"implementing\",\n context: \"Adding JWT auth middleware — RS256 signing, httpOnly refresh tokens\",\n modifiedFiles: [\"src/middleware/auth.ts\", \"src/handlers/refresh.ts\"],\n symbolsTouched: [\"#auth-middleware\", \"^authenticated\"],\n decisions: [\"RS256 over HS256 for public key verification\"]\n})\n```\n\nOnly `phase` and `context` are required — everything else is optional. The context field should be a concise 1-3 sentence summary of your current state of mind. Think of it as answering \"if I were teleported into this session right now, what would I need to know?\"\n\nCheckpoints are stored in `.paradigm/session-checkpoint.json` and auto-expire after 7 days.\n\n## Breadcrumb Tracking\n\nWhile checkpoints are deliberate, breadcrumbs are automatic. Every MCP tool call generates a breadcrumb recording the timestamp, tool name, symbol being modified (if applicable), and a human-readable summary. Breadcrumbs are stored in `.paradigm/session-breadcrumbs.json` with a maximum of 50 entries (auto-rotating — oldest dropped when full).\n\nBreadcrumbs capture the narrative of a session: \"searched for payment symbols → checked ripple on #payment-service → read auth middleware → modified #auth-handler → created ^refund-eligible gate.\" This trail lets the next session understand not just what was done but the reasoning path.\n\n## Session Recovery\n\nRecovery is the payoff. Call `paradigm_session_recover` (or let it happen automatically — recovery data is surfaced on your first Paradigm tool call in a new session) to get:\n\n- **breadcrumbs** — The last session's tool call trail\n- **lastCheckpoint** — The most recent checkpoint with phase, context, and details\n- **symbolsModified** — All symbols that were changed\n- **recentActivity** — A human-readable summary of what happened\n\nThis is crash recovery for AI agents. If a session dies at 87% context with half-finished auth middleware, the next session immediately knows: phase was `implementing`, auth middleware was being added, RS256 was chosen, these files were modified, and tests still need to be written.\n\n## The Global Brain\n\nSession Intelligence extends beyond individual projects through the Global Brain at `~/.paradigm/`. This user-level directory stores:\n\n- **Global wisdom** — Antipatterns and decisions that apply everywhere (e.g., \"never use HS256 for JWT signing in production\")\n- **Global habits** — Behavioral overrides that apply to all projects\n- **Cross-project practice events** — Compliance data aggregated across projects\n\nThe distinction between project scope and global scope is important:\n\n| Scope | Location | Applies To | Example |\n|---|---|---|---|\n| Project | `.paradigm/` | This project only | \"Use Redis for caching in this app\" |\n| Global | `~/.paradigm/` | All projects | \"Always check fragility before modifying critical symbols\" |\n\n## Wisdom Promotion\n\nWhen a project-local wisdom entry proves universally valuable, promote it to global scope with `paradigm_wisdom_promote`. This copies the entry from `.paradigm/wisdom/` to `~/.paradigm/wisdom/`, making it available in every project.\n\nFor example, if a team discovers that \"always wrap Express v5 async middleware in try-catch\" prevents errors across multiple projects, promoting this wisdom means every future project session gets this advice automatically when touching Express middleware.\n\n## Handoff Persistence\n\nWhen context usage exceeds 80-85%, `paradigm_session_health` recommends a handoff. `paradigm_handoff_prepare` creates a structured handoff document with: summary of work done, modified files, symbols touched, next steps, and open questions. This document is stored alongside session data so the receiving session can `paradigm_session_recover` and pick up exactly where the previous session left off.\n\nThe handoff is not just a note — it is a contract between sessions. The outgoing session declares what was done and what remains. The incoming session validates against the actual file state and continues.\n\n## Best Practices\n\n- Checkpoint at every phase transition — the cost is ~100 tokens, the value is crash recovery\n- Write `context` as if briefing a stranger with no prior knowledge\n- Promote wisdom that survives 3+ projects to global scope\n- Use handoffs proactively at 80% context, not reactively at 95%\n- Let breadcrumbs accumulate naturally — don't try to manage them manually",
357
- "keyConcepts": [
358
- "Four checkpoint phases: planning, implementing, validating, complete",
359
- "Breadcrumbs auto-track every MCP tool call (max 50, auto-rotating)",
360
- "paradigm_session_recover restores last checkpoint and breadcrumb trail",
361
- "Auto-recovery surfaces data on first tool call of a new session",
362
- "Global Brain at ~/.paradigm/ stores cross-project wisdom and habits",
363
- "paradigm_wisdom_promote moves project wisdom to global scope",
364
- "Checkpoints stored in .paradigm/session-checkpoint.json, expire after 7 days",
365
- "Handoff at 80% context creates structured continuation contract"
366
- ],
367
- "quiz": [
368
- {
369
- "id": "q1",
370
- "question": "You have finished implementing a feature and are about to write tests. Which checkpoint phase should you save?",
371
- "choices": {
372
- "A": "`implementing` — you just finished implementing",
373
- "B": "`validating` — you are transitioning from implementation to validation",
374
- "C": "`complete` — the feature code is done",
375
- "D": "`testing` — you are about to test",
376
- "E": "`planning` — you need to plan the tests first"
377
- },
378
- "correct": "B",
379
- "explanation": "Checkpoint at the phase you are entering, not the one you are leaving. The `validating` phase captures the state after implementation is complete but before tests/review. If the session crashes now, recovery knows implementation is done and testing is the next step. `complete` is only for when everything (including tests) is finished."
380
- },
381
- {
382
- "id": "q2",
383
- "question": "What is the maximum number of breadcrumbs stored, and what happens when the limit is reached?",
384
- "choices": {
385
- "A": "100 breadcrumbs, then the file is archived and a new one starts",
386
- "B": "50 breadcrumbs, then the oldest are dropped as new ones are added",
387
- "C": "Unlimited — breadcrumbs grow until the session ends",
388
- "D": "50 breadcrumbs, then tracking stops until the next session",
389
- "E": "25 breadcrumbs per phase, resetting at each checkpoint"
390
- },
391
- "correct": "B",
392
- "explanation": "Breadcrumbs have a hard cap of 50 entries with auto-rotation — when the 51st breadcrumb is recorded, the oldest one is dropped. This keeps the file small and focused on recent activity while still providing enough trail for meaningful recovery."
393
- },
394
- {
395
- "id": "q3",
396
- "question": "A team discovers that 'always validate JWT expiry before refresh' prevents bugs across 4 different projects. What should they do?",
397
- "choices": {
398
- "A": "Add the wisdom to each project's `.paradigm/wisdom/` individually",
399
- "B": "Use `paradigm_wisdom_promote` to move it to `~/.paradigm/wisdom/` for global scope",
400
- "C": "Create a new seed habit for JWT validation",
401
- "D": "Add it to the CLAUDE.md file in each project",
402
- "E": "Record it as a lore entry in the most recent project"
403
- },
404
- "correct": "B",
405
- "explanation": "When wisdom proves valuable across multiple projects, `paradigm_wisdom_promote` copies it from project scope (`.paradigm/wisdom/`) to global scope (`~/.paradigm/wisdom/`). This makes it available automatically in every future session across all projects. Adding it individually (A) or to CLAUDE.md (D) works but doesn't leverage the Global Brain."
406
- },
407
- {
408
- "id": "q4",
409
- "question": "Your session is at 82% context usage. What should you do?",
410
- "choices": {
411
- "A": "Continue working — 82% is still plenty of room",
412
- "B": "Immediately stop and call `paradigm_handoff_prepare` with summary and next steps",
413
- "C": "Call `paradigm_session_health` to confirm, then proactively prepare a handoff while finishing current work",
414
- "D": "Delete old messages to free up context space",
415
- "E": "Save a checkpoint and keep working until 95%"
416
- },
417
- "correct": "C",
418
- "explanation": "At 80-85%, the recommendation is proactive handoff preparation. Call `paradigm_session_health` to confirm the recommendation, then prepare the handoff with `paradigm_handoff_prepare` while completing your current task. Waiting until 95% (E) risks running out of context mid-task. The sweet spot is preparing the handoff while you still have room to finish current work cleanly."
419
- },
420
- {
421
- "id": "q5",
422
- "question": "A new session starts and the agent calls `paradigm_status`. What happens with session recovery?",
423
- "choices": {
424
- "A": "The agent must explicitly call `paradigm_session_recover` — recovery is never automatic",
425
- "B": "Recovery data is automatically surfaced on the first Paradigm tool call",
426
- "C": "Recovery only works if the previous session saved a checkpoint",
427
- "D": "The agent must read `.paradigm/session-checkpoint.json` manually",
428
- "E": "Recovery data is shown only if the previous session crashed"
429
- },
430
- "correct": "B",
431
- "explanation": "Auto-recovery is triggered on the first Paradigm MCP tool call in a new session — whether that is `paradigm_status`, `paradigm_navigate`, or any other tool. The system surfaces the last checkpoint and recent breadcrumbs without the agent needing to explicitly request recovery. This ensures continuity even if the agent doesn't know to ask."
432
- }
433
- ]
434
- },
435
- {
436
- "id": "hook-enforcement",
437
- "title": "Hook Enforcement & Automation",
438
- "content": "## The Compliance Gap\n\nParadigm's value depends on discipline. Purpose files must be updated when code changes. Portal.yaml must reflect route additions. Lore must be recorded for significant sessions. Aspect anchors must point to real code. Without enforcement, these requirements become suggestions that erode over time.\n\nHooks close this gap. They are automated checks that run at specific points in the development workflow, catching violations before they become technical debt. Paradigm uses three hooks, each with a distinct role and severity.\n\n## The Stop Hook\n\nThe stop hook is the primary enforcer. It runs before an agent session completes and can **block** the session from finishing if compliance checks fail.\n\n**Trigger**: Before agent session end (Claude Code: Stop hook, Cursor: pre-finish)\n\n**Seven checks, in order:**\n\n1. **Source files modified without .purpose updates** — If 2+ source files were modified but zero paradigm metadata files (.purpose, portal.yaml, etc.) were updated, the hook blocks. This catches the \"implement and forget\" pattern.\n\n2. **Modified directories missing .purpose coverage** — The hook walks up the directory tree from each modified source file looking for a covering .purpose file. If no .purpose exists anywhere in the ancestor chain (including the project root), it blocks.\n\n3. **Route patterns without portal.yaml** — The hook scans modified files for route declaration patterns (Express `.get()`, `.post()`, decorators like `@Get()`, Rust macros like `#[actix_web::get]`). If routes are detected and portal.yaml was neither present nor modified, it blocks.\n\n4. **Stale aspect anchors** — The hook parses .purpose files for `anchors:` sections and validates that each referenced file still exists. If an anchor points to a deleted file, it blocks.\n\n5. **Pending .purpose freshness** — The post-write hook tracks files edited without .purpose updates in `.paradigm/.pending-review`. The stop hook checks this list: if source files are pending and their covering .purpose was not also modified during the session, it blocks.\n\n6. **Aspect coverage advisory** — If the project uses `~aspects`, the hook advises (non-blocking) to verify that anchor line numbers are still accurate after code changes.\n\n7. **Lore entry for significant sessions** — If 3+ source files were modified and no lore entry was recorded in `.paradigm/lore/entries/`, the hook blocks.\n\n**When blocked**, the hook outputs a clear list of violations with remediation steps. Fix the violations, then complete the session.\n\n## The Post-Write Hook\n\nThe post-write hook runs after every file edit (Edit or Write tool calls). It is **advisory only** — it never blocks.\n\n**Trigger**: After Edit or Write tool completes\n\n**Actions:**\n1. Extracts the edited file path\n2. Skips non-source files (.purpose, portal.yaml, .md, .lock, .json, .yaml, .gitignore, .env files) and paradigm directories (.paradigm/, .claude/, .cursor/)\n3. Appends source file paths to `.paradigm/.pending-review` (deduplicated)\n4. Checks if a .purpose file covers the edited directory\n5. If no .purpose exists: reminds \"No .purpose file covers {dir}/ — Create one\"\n6. Every 3 source files edited: general reminder to update .purpose files\n\nThe `.pending-review` file is the bridge between the post-write hook and the stop hook. Post-write accumulates the list; stop hook validates against it.\n\n## The Pre-Commit Hook\n\nThe pre-commit hook runs before `git commit` and handles index maintenance. It **never blocks**.\n\n**Trigger**: Before Bash commands containing `git commit`\n\n**Actions:**\n1. Runs `paradigm index --quiet` to rebuild scan-index.json, navigator.yaml, and flow-index.json\n2. Stages the rebuilt files so they are included in the commit\n3. Exits 0 (always succeeds)\n\nThis ensures that every commit has a fresh symbol index. Without this hook, the index would drift from the actual codebase between manual `paradigm scan` runs.\n\n## Hook Installation\n\nHooks are installed automatically by `paradigm shift` (full setup) or manually with `paradigm hooks install`. The installer detects the IDE (Claude Code or Cursor) and writes the appropriate hook format.\n\nFor Claude Code, hooks are configured in `.claude/settings.json` using the hooks API — stop hooks, PreToolUse matchers (for Bash commands matching `git commit`), and PostToolUse matchers (for Edit/Write tool calls).\n\n## Remediation Workflow\n\nWhen the stop hook blocks you:\n\n1. **Read the violation list** — Each violation names the specific check that failed\n2. **Update .purpose files** — For modified directories without coverage, create or update the nearest .purpose file\n3. **Update portal.yaml** — If routes were added, add the route and gate definitions\n4. **Fix stale anchors** — If aspect anchors point to deleted/moved files, update the anchor paths\n5. **Record lore** — If 3+ files were modified, call `paradigm_lore_record` with the session summary\n6. **Run `paradigm_reindex`** — Rebuild the index to reflect your updates\n7. **Complete the session** — The stop hook runs again and should pass\n\nThe key insight is that the stop hook is not punitive — it is protective. Every check it enforces prevents a real problem: stale documentation, unprotected routes, orphaned anchors, or lost institutional knowledge.",
439
- "keyConcepts": [
440
- "Stop hook blocks session completion on compliance failures (7 checks)",
441
- "Post-write hook tracks edited files in .paradigm/.pending-review (advisory only)",
442
- "Pre-commit hook auto-rebuilds index before git commit (never blocks)",
443
- ".pending-review bridges post-write tracking to stop hook validation",
444
- "2+ source files + 0 paradigm updates = stop hook violation",
445
- "3+ source files without lore entry = stop hook violation",
446
- "Routes detected without portal.yaml = stop hook violation",
447
- "paradigm hooks install or paradigm shift for automatic installation"
448
- ],
449
- "quiz": [
450
- {
451
- "id": "q1",
452
- "question": "You modified 4 source files and updated one .purpose file but did not record a lore entry. The stop hook runs. What happens?",
453
- "choices": {
454
- "A": "It passes — the .purpose update satisfies all requirements",
455
- "B": "It blocks on two violations: .purpose freshness for the other 3 files, and missing lore entry for a 4-file session",
456
- "C": "It blocks only on the missing lore entry — 4 files exceeds the 3-file threshold",
457
- "D": "It passes — .purpose coverage is sufficient, lore is optional",
458
- "E": "It blocks only on .purpose freshness — the other 3 files need coverage"
459
- },
460
- "correct": "C",
461
- "explanation": "The stop hook checks multiple conditions independently. The '.purpose freshness' check passes because you did update a .purpose file (the '2+ source files with 0 paradigm updates' check fails only when zero paradigm files were touched). However, 4 modified source files exceeds the 3-file lore recording threshold, so the missing lore entry causes a block. Whether the other files need .purpose coverage depends on whether they have covering .purpose files in ancestor directories."
462
- },
463
- {
464
- "id": "q2",
465
- "question": "The post-write hook just fired after you edited `src/services/payment.ts`. Which of these files would it skip tracking?",
466
- "choices": {
467
- "A": "`src/services/payment.ts` — it tracks this file",
468
- "B": "`.paradigm/config.yaml` — it skips paradigm directory files",
469
- "C": "`src/middleware/auth.ts` — it would track this too",
470
- "D": "`package.json` — it skips .json files",
471
- "E": "Both B and D are skipped"
472
- },
473
- "correct": "E",
474
- "explanation": "The post-write hook skips non-source files (.json, .yaml, .md, .lock, .env, .gitignore) and paradigm directories (.paradigm/, .claude/, .cursor/). Both `.paradigm/config.yaml` (paradigm directory) and `package.json` (.json extension) are skipped. Only actual source code files like .ts, .js, .rs, .py are tracked in .pending-review."
475
- },
476
- {
477
- "id": "q3",
478
- "question": "What does the pre-commit hook do, and can it block a commit?",
479
- "choices": {
480
- "A": "Runs all habit checks and blocks if any severity=block habits are violated",
481
- "B": "Validates portal.yaml and blocks if gates are undefined",
482
- "C": "Rebuilds the symbol index and stages the updated files — never blocks",
483
- "D": "Checks .purpose freshness and blocks if files are stale",
484
- "E": "Records a lore entry for the commit — never blocks"
485
- },
486
- "correct": "C",
487
- "explanation": "The pre-commit hook has a single job: rebuild the symbol index (`paradigm index --quiet`) and stage the rebuilt files (scan-index.json, navigator.yaml, flow-index.json) so they are included in the commit. It always exits 0 — it never blocks. This ensures every commit has a fresh index without manual intervention."
488
- },
489
- {
490
- "id": "q4",
491
- "question": "The stop hook detects that `src/routes/api.ts` contains Express `.post()` calls but `portal.yaml` does not exist. What happens?",
492
- "choices": {
493
- "A": "Advisory warning — portal.yaml is recommended but not required",
494
- "B": "The hook blocks — route patterns detected without portal.yaml is a violation",
495
- "C": "The hook skips this check — portal.yaml is only required for projects that already have one",
496
- "D": "The hook creates a minimal portal.yaml automatically",
497
- "E": "The hook blocks only if the route handles user data"
498
- },
499
- "correct": "B",
500
- "explanation": "The stop hook scans modified files for route declaration patterns (`.get()`, `.post()`, etc.). If route patterns are found and portal.yaml was neither present in the project nor modified during the session, the hook blocks. This enforces the rule that all protected routes must be declared in portal.yaml."
501
- },
502
- {
503
- "id": "q5",
504
- "question": "You are blocked by the stop hook. The violations list shows: 'stale aspect anchor: src/old/audit.ts no longer exists'. How do you fix this?",
505
- "choices": {
506
- "A": "Delete the aspect from the .purpose file — aspects with stale anchors are invalid",
507
- "B": "Create an empty file at `src/old/audit.ts` to satisfy the anchor check",
508
- "C": "Update the anchor path in the .purpose file to point to the new location of the audit code",
509
- "D": "Run `paradigm_aspect_check` — it auto-fixes stale anchors",
510
- "E": "Ignore it — stale anchors are advisory only"
511
- },
512
- "correct": "C",
513
- "explanation": "Aspects require valid code anchors. If the file was moved or renamed, update the anchor path in the .purpose file to point to the new location. If the code was deleted entirely, you may need to remove the aspect or create new enforcement code. Creating an empty file (B) is a hack that defeats the purpose. `paradigm_aspect_check` validates but doesn't auto-fix."
514
- }
515
- ]
516
- },
517
- {
518
- "id": "advanced-workflows",
519
- "title": "The Complete Workflow",
520
- "content": "## Putting It All Together\n\nYou have learned the five advanced systems individually. Now let's see how they work together in a complete development workflow. Every system has a role, and the handoffs between them create a feedback loop that gets smarter with every session.\n\n## The Full Cycle\n\nHere is the complete Paradigm workflow for a non-trivial task:\n\n### Phase 1: Preflight\n\n```\n1. paradigm_session_recover → Load previous session context\n2. paradigm_pm_preflight → Get compliance plan for the task\n3. paradigm_habits_check(preflight) → Verify discovery habits are followed\n4. paradigm_ripple → Check impact of planned changes\n5. paradigm_wisdom_context → Get team knowledge for affected symbols\n6. paradigm_practice_context → Get habit-aware warnings for symbols\n7. paradigm_session_checkpoint(planning) → Save plan before coding\n```\n\nNotice the layering: session recovery provides continuity, preflight ensures preparation, habits check enforces discovery discipline, ripple and wisdom provide context, practice context adds behavioral awareness, and the checkpoint enables crash recovery.\n\n### Phase 2: Implementation\n\n```\n8. Write code → Implement the feature\n → Post-write hook fires → Tracks edited files in .pending-review\n → Post-write advisory → Reminds about .purpose coverage\n9. Update .purpose files → Document new/changed symbols\n10. Update portal.yaml → Add routes and gates (if applicable)\n11. paradigm_session_checkpoint(implementing) → Save progress\n```\n\nThe post-write hook acts as a running tally. Every source file edit is tracked, and periodic reminders keep documentation top of mind. Updating .purpose and portal.yaml during implementation (not after) prevents the stop hook from blocking at the end.\n\n### Phase 3: Validation\n\n```\n12. paradigm_flow_check → Verify flows are complete\n13. paradigm_aspect_check → Verify aspect anchors are valid\n14. paradigm_pm_postflight → Run post-implementation governance\n15. paradigm_habits_check(postflight) → Verify documentation/testing habits\n16. paradigm_session_checkpoint(validating) → Save pre-test state\n```\n\nValidation catches issues before they become stop hook violations. Flow validation ensures multi-step processes are complete. Aspect checks confirm anchors point to real code. Postflight governance catches missing .purpose files and undefined gates.\n\n### Phase 4: Recording\n\n```\n17. paradigm_lore_record → Record the session's work\n18. paradigm_history_record → Log implementation to symbol history\n19. paradigm_reindex → Rebuild the symbol index\n20. paradigm_session_checkpoint(complete) → Mark task complete\n```\n\nRecording preserves institutional knowledge. The lore entry captures what was done and why. History record logs implementation details to individual symbol timelines. Reindexing ensures the symbol index reflects all changes.\n\n### Phase 5: Commit\n\n```\n21. git commit → Commit changes\n → Pre-commit hook fires → Auto-rebuilds index, stages updated files\n → Stop hook fires → Validates all compliance checks\n22. If stop hook blocks → Fix violations, re-attempt\n23. If stop hook passes → Session complete\n```\n\nThe commit phase is where enforcement happens. The pre-commit hook ensures the index is fresh. The stop hook validates everything: .purpose coverage, portal.yaml compliance, aspect anchors, lore recording, and pending review freshness.\n\n## How Systems Reinforce Each Other\n\nThe power of the complete workflow is in the feedback loops:\n\n**Sentinel catches what Habits miss.** If an agent skips the `ripple-before-modify` habit and introduces a breaking change, Sentinel records the incident. The practice profile then shows that skipping ripple correlates with incidents — evidence to upgrade the habit severity.\n\n**Lore preserves what Sessions forget.** Session breadcrumbs and checkpoints are ephemeral — they expire after 7 days. Lore entries are permanent. The checkpoint gets you through a crash; the lore entry gets the team through the next 6 months.\n\n**Wisdom surfaces what Lore accumulates.** Lore entries record individual sessions. Wisdom distills patterns across sessions: \"every time we modify #payment-service, check for null references on the refund object.\" Wisdom is lore, refined.\n\n**Hooks enforce what Habits recommend.** Habits at `advisory` severity are suggestions. The stop hook at `block` severity is enforcement. The workflow starts with advice (habits check) and ends with enforcement (stop hook). This graduated approach teaches good behavior before punishing bad behavior.\n\n## Capstone Scenario\n\nImagine you are adding a refund endpoint to a payment system. Here is how the complete workflow plays out:\n\n1. **Session recover** reveals the previous session added the payment processor but did not add refunds\n2. **Preflight** shows you need to check `#payment-service`, `$checkout-flow`, and `^authenticated`\n3. **Habits check** confirms you called ripple and wisdom — discovery habits followed\n4. **Ripple** shows `#payment-service` has 4 downstream dependents\n5. **Wisdom** warns: \"always null-check refund objects — see incident INC-042\"\n6. You implement the refund endpoint with proper null checks\n7. **Post-write hook** tracks 5 edited files in `.pending-review`\n8. You update .purpose with `#refund-handler` and portal.yaml with `^refund-eligible` gate\n9. **Postflight** confirms all gates are declared and flows are valid\n10. **Lore record** captures the session with the decision to require `^refund-eligible`\n11. **Commit** triggers pre-commit (index rebuild) and stop hook (all checks pass)\n12. Three weeks later, a similar null reference hits — **Sentinel** matches pattern `payment-null-ref-001` and resolves it in 5 minutes using the recorded fix\n\nThis is Paradigm at full power: every system contributing, every session building on the last, every incident making the next resolution faster.",
521
- "keyConcepts": [
522
- "Five-phase workflow: preflight → implement → validate → record → commit",
523
- "Session recovery provides continuity between sessions",
524
- "Post-write hook tracks files during implementation for stop hook validation",
525
- "Recording phase preserves lore, history, and fresh index before commit",
526
- "Stop hook is the final enforcement gate before session completion",
527
- "Sentinel catches what Habits miss — incidents drive severity upgrades",
528
- "Lore preserves what Sessions forget — permanent vs ephemeral knowledge",
529
- "Graduated enforcement: habits advise, hooks enforce"
530
- ],
531
- "quiz": [
532
- {
533
- "id": "q1",
534
- "question": "In the complete workflow, why does `paradigm_habits_check(preflight)` run BEFORE `paradigm_ripple` and `paradigm_wisdom_context`?",
535
- "choices": {
536
- "A": "To block the session if discovery habits were violated in the previous session",
537
- "B": "To verify that the agent intends to call discovery tools — the habit check reminds and tracks, while the actual tools provide the context",
538
- "C": "Because habits must always run first regardless of workflow position",
539
- "D": "To generate the list of symbols that ripple and wisdom should check",
540
- "E": "The order does not matter — they can run in any sequence"
541
- },
542
- "correct": "B",
543
- "explanation": "The preflight habits check evaluates whether discovery habits (ripple, navigate, wisdom) are being followed. It runs early to remind and track compliance. The actual MCP tools (ripple, wisdom_context) run after to provide the substantive context. The habit check is about behavioral discipline; the tools are about information gathering."
544
- },
545
- {
546
- "id": "q2",
547
- "question": "An agent implements a feature, updates .purpose files, but forgets to record lore before committing. The session modified 5 source files. What sequence of events occurs?",
548
- "choices": {
549
- "A": "Pre-commit hook blocks the commit until lore is recorded",
550
- "B": "Stop hook blocks, citing missing lore entry → agent records lore → re-attempts commit → stop hook passes",
551
- "C": "Commit succeeds but the next session receives a warning about missing lore",
552
- "D": "The post-write hook retroactively creates a lore entry from tracked files",
553
- "E": "The commit succeeds — lore is enforced by habits, not hooks"
554
- },
555
- "correct": "B",
556
- "explanation": "The stop hook checks for lore entries when 3+ source files were modified. With 5 files and no lore entry, it blocks. The agent must then call `paradigm_lore_record` with the session summary, and re-attempt the commit. The pre-commit hook only rebuilds the index — it doesn't check compliance. Lore enforcement lives in the stop hook."
557
- },
558
- {
559
- "id": "q3",
560
- "question": "How does Sentinel benefit from the Habits system?",
561
- "choices": {
562
- "A": "Sentinel directly calls habit checks during incident recording",
563
- "B": "Practice profiles show correlations between skipped habits and incident rates, providing evidence for severity upgrades",
564
- "C": "Habits automatically resolve Sentinel incidents when compliance is high",
565
- "D": "Sentinel and Habits are independent systems with no interaction",
566
- "E": "Habits disable Sentinel checks when compliance is above 90%"
567
- },
568
- "correct": "B",
569
- "explanation": "The feedback loop between Habits and Sentinel works through practice profiles. When an agent frequently skips `ripple-before-modify` and the symbols it touches have higher incident rates, the practice profile surfaces this correlation. This provides data-driven evidence to upgrade the habit's severity from advisory to warn or block — closing the loop between behavior and outcomes."
570
- },
571
- {
572
- "id": "q4",
573
- "question": "What is the relationship between Lore entries and Session checkpoints?",
574
- "choices": {
575
- "A": "They are the same thing — checkpoints are stored as lore entries",
576
- "B": "Checkpoints are ephemeral (7-day expiry) for crash recovery; lore entries are permanent for institutional memory",
577
- "C": "Lore entries are auto-generated from checkpoints at session end",
578
- "D": "Checkpoints replace lore entries in Paradigm v2",
579
- "E": "Lore entries expire after 30 days; checkpoints are permanent"
580
- },
581
- "correct": "B",
582
- "explanation": "Checkpoints and lore serve different purposes with different lifespans. Checkpoints are ephemeral snapshots for crash recovery — they expire after 7 days because their value is immediate continuity. Lore entries are permanent project history — they capture decisions, learnings, and context that remain valuable months or years later. You need both: checkpoints for resilience, lore for memory."
583
- },
584
- {
585
- "id": "q5",
586
- "question": "A team's practice profile shows high compliance across all categories, yet incidents keep occurring in `#payment-service`. What system should they investigate?",
587
- "choices": {
588
- "A": "Habits — add more habits targeting the payment service",
589
- "B": "Hooks — the stop hook might not be running for payment-related changes",
590
- "C": "Sentinel — check `paradigm_sentinel_patterns` and `paradigm_sentinel_stats` for the symbol to identify recurring failure patterns and resolution gaps",
591
- "D": "Lore — the payment service lore entries might be inaccurate",
592
- "E": "Session Intelligence — breadcrumbs might be losing payment context"
593
- },
594
- "correct": "C",
595
- "explanation": "High habit compliance means the behavioral discipline is fine — agents are doing the right things. If incidents persist despite good practices, the issue is likely in the code or architecture, not the process. Sentinel's pattern analysis (`paradigm_sentinel_patterns`) can reveal if the same failure keeps recurring despite resolutions, and `paradigm_sentinel_stats` can show the symbol's incident rate and resolution effectiveness. The answer lives in the incident data, not the compliance data."
596
- }
597
- ]
598
- },
599
- {
600
- "id": "aspect-graph-advanced",
601
- "title": "The Aspect Graph at Scale",
602
- "content": "## Beyond the Basics\n\nPARA 201 introduced the Aspect Graph's internals — the SQLite schema, materialization pipeline, and recursive ripple. This lesson takes you deeper: building custom detectors, advanced graph queries, drift detection in CI/CD, search learning optimization, and governing aspects at enterprise scale.\n\n## Building Custom Aspect Detection Patterns\n\nParadigm ships with 8 built-in detectors that `paradigm_aspect_suggest_scan` uses to find undocumented aspects in source code:\n\n1. **Magic numbers** — Numeric literals that aren't 0 or 1 (e.g., `timeout: 30000`, `maxRetries: 3`)\n2. **Hardcoded strings** — String literals used in conditionals or assignments that smell like configuration (e.g., `'production'`, `'us-east-1'`)\n3. **Rate limits** — Patterns like `rateLimit(100)`, `throttle(1000)`, or variable names containing `limit`, `throttle`, `quota`\n4. **Time values** — Durations, timeouts, TTLs, and expiry values (e.g., `86400`, `24 * 60 * 60`)\n5. **Environment checks** — `process.env`, `std::env`, `os.environ` patterns that branch on environment variables\n6. **Feature flags** — Conditional logic gated on feature names (e.g., `isEnabled('new-checkout')`, `featureFlags.get()`)\n7. **Regex patterns** — Regular expressions used for validation (e.g., email patterns, URL matchers)\n8. **Assertion guards** — Invariant checks using `assert`, `invariant()`, `expect()` that enforce guarantees\n\nTo extend the detection system, you define custom detectors in `.paradigm/aspect-detectors.yaml`:\n\n```yaml\ndetectors:\n - id: compliance-annotation\n name: Compliance Annotations\n description: Detects SOC2/GDPR compliance annotations in code\n patterns:\n - regex: \"@(SOC2|GDPR|PCI|HIPAA)\"\n languages: [typescript, javascript, java]\n - regex: \"#\\[compliance\\(\"\n languages: [rust]\n suggestedCategory: rule\n suggestedSeverity: critical\n suggestedTags: [compliance, security]\n\n - id: retry-policy\n name: Retry Policies\n description: Detects retry/backoff configurations\n patterns:\n - regex: \"(retryPolicy|backoff|maxAttempts|retryCount)\"\n languages: [typescript, javascript, python]\n suggestedCategory: configuration\n suggestedSeverity: medium\n```\n\nCustom detectors are loaded alongside the built-in 8 during `paradigm_aspect_suggest_scan`. They follow the same interface: match source code patterns, suggest a category and severity, and let the user decide whether to formalize the finding as a `~aspect`.\n\n## Graph Querying Strategies\n\nThe aspect graph supports three primary querying patterns, each suited to different use cases:\n\n### BFS Traversal (Neighborhood Analysis)\n\n`paradigm_aspect_graph` uses breadth-first search to explore the neighborhood of a symbol. The `hops` parameter controls how far to traverse:\n\n- **1 hop** — Direct connections only. Use this when you need to know what a single aspect directly relates to. Fast, focused, minimal noise.\n- **2 hops** — Friends-of-friends. Reveals indirect relationships: \"this aspect relates to that aspect, which relates to that component.\" The sweet spot for most queries.\n- **3+ hops** — Extended neighborhood. Useful for understanding how distant parts of the codebase connect through aspects. Gets noisy in dense graphs.\n\nThe multiplicative weight decay means that each hop reduces confidence. An explicit edge (weight 1.0) followed by an inferred edge (weight 0.5) produces a path weight of 0.5. Two inferred edges produce 0.25. The `minWeight` threshold (default 0.1) prunes low-confidence paths automatically.\n\n### Heatmap-Driven Exploration\n\n`paradigm_aspect_heatmap` ranks aspects by access frequency. This is not about what aspects ARE important — it is about what aspects are USED most. The distinction matters:\n\n- An aspect accessed 50 times via search but never via ripple might have a discoverability problem — people search for it because it is hard to find through the graph.\n- An aspect accessed primarily via ripple has good graph connectivity — it naturally surfaces during impact analysis.\n- An aspect with zero access across all types may be stale, poorly named, or irrelevant.\n\nHeatmap data is the starting point for governance reviews. Aspects that nobody accesses should be evaluated for removal or renaming.\n\n### Edge-Filtered Queries\n\nWhen calling `paradigm_aspect_graph`, you can filter by edge relation to narrow results:\n\n- `enforced-by` — Find all aspects that enforce a given component. Useful when changing a component to know what rules apply.\n- `depends-on` — Find dependency chains. If `~token-expiry-24h` depends-on `~jwt-signing-rs256`, changing JWT signing affects token expiry.\n- `contradicts` — Find conflicting aspects. Two aspects that contradict each other signal an architectural tension that needs resolution.\n- `supersedes` — Find deprecated-but-still-referenced aspects. The superseding aspect should be the authoritative one.\n- `related-to` — The weakest relation. Useful for discovery but not for impact analysis.\n\n## Drift Detection in CI/CD\n\nAspect drift occurs when the code at an anchor location changes without updating the aspect definition. The `paradigm_aspect_drift` tool detects this using SHA-256 content hashes.\n\nDuring materialization, the pipeline computes a SHA-256 hash of the code at each anchor's line range and stores it in the `anchors.content_hash` column. When `paradigm_aspect_drift` runs later, it re-reads the code at those line ranges, computes a new hash, and compares. A mismatch means the code changed — the anchor is drifted.\n\nFor CI/CD integration, add drift detection as a pipeline step:\n\n```yaml\n# .github/workflows/paradigm.yml\nsteps:\n - name: Check aspect drift\n run: |\n paradigm scan --quiet\n paradigm doctor --strict --json | jq '.aspects.drifted'\n if [ $(paradigm doctor --json | jq '.aspects.drifted | length') -gt 0 ]; then\n echo \"::error::Aspect anchors have drifted\"\n exit 1\n fi\n```\n\nThe `--strict` flag treats drifted anchors as errors rather than warnings. In a mature project, you want drift detection to block merges — it ensures that aspect documentation stays synchronized with code changes.\n\nDrift detection is also available per-aspect via the MCP tool:\n\n```\nparadigm_aspect_drift({ aspectId: 'token-expiry-24h' })\n```\n\nThis returns: the aspect ID, each anchor with its stored hash vs current hash, whether each anchor has drifted, and the specific lines that changed. Use this during code review to verify that refactors updated their aspect anchors.\n\n## Search Learning Loop Optimization\n\nThe three-tier search system improves over time through the confirm-and-decay mechanism. Here is how to optimize it:\n\n### Tier Priority\n\n1. **Tier 1: Learned mappings** — Query-to-aspect weights in the `search_weights` table. If a query matches a stored mapping with weight >= 1.0, the result is returned immediately. This is instant because it is a simple key-value lookup.\n2. **Tier 2: FTS5 full-text search** — SQLite's FTS5 engine searches aspect descriptions, values, and categories. Returns results ranked by BM25 relevance. Accurate but slower than Tier 1.\n3. **Tier 3: Fuzzy matching** — Levenshtein distance-based matching with a configurable threshold. Catches typos and partial matches. Slowest but most forgiving.\n\n### Warming the Learning System\n\nA new project's search starts cold — no learned mappings exist. Every search falls through to Tier 2 or 3. To warm the system:\n\n1. Run common queries for your project's domain (e.g., search for 'expiry', 'rate limit', 'auth')\n2. Confirm the best result with `paradigm_aspect_confirm` for each query\n3. After 3-5 confirmations per query, the learned weight exceeds the Tier 1 threshold\n\nThe decay mechanism (confirmed +1.0, others *0.95) means that a single confirmation is enough to create a Tier 1 entry. But multiple confirmations build a stronger mapping that resists displacement.\n\n### Diagnosing Search Issues\n\nWhen search returns unexpected results:\n\n- Check `search_weights` table entries for the query — are stale mappings dominating?\n- Verify aspect descriptions contain the keywords you are searching for (FTS5 searches descriptions)\n- Check for typos in the query that might prevent Tier 2 matches but trigger Tier 3 fuzzy results\n- Use `paradigm_aspect_heatmap` to see if the expected aspect is ever accessed — a zero-access aspect might have a discovery problem\n\n## Aspect Governance at Scale\n\nWhen a project exceeds 100 aspects, governance becomes critical. Without it, aspects accumulate as stale documentation, anchor drift goes undetected, and the graph becomes noisy rather than useful.\n\n### The Governance Review Cycle\n\nRun quarterly aspect reviews using this process:\n\n1. **Heatmap analysis** — `paradigm_aspect_heatmap({ limit: 0 })` returns ALL aspects ranked by access. The bottom 20% are candidates for removal or consolidation.\n2. **Drift audit** — `paradigm doctor --strict` catches all drifted anchors. Drifted aspects either need anchor updates or should be marked stale.\n3. **Category distribution** — Check that aspect categories are balanced. A project with 80 rules and 2 decisions might be over-documenting constraints while missing strategic choices.\n4. **Edge health** — Check for orphaned aspects (no edges to any other symbol). An aspect with zero edges is either standalone (legitimate but rare) or poorly connected.\n5. **Search weight review** — Check the `search_weights` table for queries with multiple high-weight mappings, which indicate ambiguous terminology.\n\n### Naming Conventions at Scale\n\nWith 100+ aspects, naming collisions and ambiguity become real problems. Establish conventions:\n\n- **Category prefix** — Prefix aspects with their category: `~rule-no-console-log`, `~decision-use-redis`, `~constraint-max-upload-10mb`\n- **Domain grouping** — Group related aspects by domain: `~auth-token-expiry`, `~auth-session-timeout`, `~auth-refresh-rotation`\n- **Version suffix** — When aspects evolve: `~rate-limit-v2` supersedes `~rate-limit-v1` with an explicit `supersedes` edge\n\n### Delegation and Ownership\n\nFor large teams, assign aspect ownership:\n\n```yaml\n~payment-idempotency:\n description: Payment operations must be idempotent\n owner: payments-team\n reviewers: [platform-team, security-team]\n```\n\nThe `owner` field indicates who maintains the aspect, and `reviewers` lists teams that should be consulted when the aspect changes. This is purely metadata — Paradigm does not enforce it — but it guides humans and AI agents when modifications are needed.",
603
- "keyConcepts": [
604
- "8 built-in detectors: magic numbers, hardcoded strings, rate limits, time values, env checks, feature flags, regex patterns, assertion guards",
605
- "Custom detectors defined in .paradigm/aspect-detectors.yaml extend the suggest-scan system",
606
- "BFS traversal with multiplicative weight decay prunes low-confidence paths automatically",
607
- "Heatmap-driven exploration reveals usage patterns vs importance assumptions",
608
- "Five edge relations for filtered queries: enforced-by, depends-on, contradicts, supersedes, related-to",
609
- "Drift detection uses SHA-256 content hashes comparing stored vs current code at anchor line ranges",
610
- "CI/CD integration via paradigm doctor --strict --json blocks merges on drifted anchors",
611
- "Three-tier search: learned mappings (instant) -> FTS5 (accurate) -> fuzzy (forgiving)",
612
- "Warm the learning system with 3-5 confirmations per common query",
613
- "Governance review cycle: heatmap analysis, drift audit, category distribution, edge health, search weight review"
614
- ],
615
- "quiz": [
616
- {
617
- "id": "q1",
618
- "question": "How many built-in detectors does paradigm_aspect_suggest_scan use, and which of these is NOT one of them?",
619
- "choices": {
620
- "A": "8 built-in detectors; 'database schema' is not one of them",
621
- "B": "6 built-in detectors; 'magic numbers' is not one of them",
622
- "C": "8 built-in detectors; 'rate limits' IS one of them (trick question)",
623
- "D": "10 built-in detectors; 'feature flags' is not one of them",
624
- "E": "5 built-in detectors; 'environment checks' is not one of them"
625
- },
626
- "correct": "A",
627
- "explanation": "paradigm_aspect_suggest_scan uses 8 built-in detectors: magic numbers, hardcoded strings, rate limits, time values, environment checks, feature flags, regex patterns, and assertion guards. 'Database schema' is not among them. Custom detectors can be added via .paradigm/aspect-detectors.yaml to extend the detection system."
628
- },
629
- {
630
- "id": "q2",
631
- "question": "You want to find all rules that enforce constraints on #payment-service through the aspect graph. Which query approach is most effective?",
632
- "choices": {
633
- "A": "paradigm_aspect_search({ query: 'payment rules' }) to find them by text",
634
- "B": "paradigm_aspect_graph({ symbol: '#payment-service', hops: 1 }) filtered by 'enforced-by' edge relation",
635
- "C": "paradigm_aspect_heatmap({ limit: 100 }) and manually scan for payment-related aspects",
636
- "D": "paradigm_aspect_drift({ aspectId: '#payment-service' }) to find stale rules",
637
- "E": "paradigm_ripple({ symbol: '#payment-service' }) without any graph filtering"
638
- },
639
- "correct": "B",
640
- "explanation": "An edge-filtered graph query at 1 hop with the 'enforced-by' relation is the most direct approach. It returns exactly the aspects that enforce rules on the target component. Search (A) finds by text, not by graph relationship. Heatmap (C) ranks by usage, not by target. Drift (D) checks anchor freshness, not relationships."
641
- },
642
- {
643
- "id": "q3",
644
- "question": "Your CI pipeline should fail when aspect anchors have drifted. Which command configuration achieves this?",
645
- "choices": {
646
- "A": "paradigm doctor with no flags — drift is always a blocking error",
647
- "B": "paradigm doctor --strict — treats drifted anchors as errors that cause a non-zero exit code",
648
- "C": "paradigm scan --fix — automatically fixes drifted anchors",
649
- "D": "paradigm_aspect_drift with no arguments — checks all aspects and exits non-zero on drift",
650
- "E": "paradigm lint --strict — lint checks include drift detection"
651
- },
652
- "correct": "B",
653
- "explanation": "paradigm doctor --strict treats warnings (including drifted anchors) as errors, producing a non-zero exit code that fails the CI step. Without --strict, drifted anchors are warnings that do not block. paradigm scan rebuilds the index but does not check drift. paradigm lint checks .purpose file structure, not anchor content hashes."
654
- },
655
- {
656
- "id": "q4",
657
- "question": "A new project's aspect search always falls to Tier 3 (fuzzy matching). How do you warm the learning system so common queries use Tier 1?",
658
- "choices": {
659
- "A": "Manually edit the search_weights SQLite table to insert mappings",
660
- "B": "Run paradigm_reindex with a --warm-search flag",
661
- "C": "Run common queries with paradigm_aspect_search, then confirm the best results with paradigm_aspect_confirm for each query",
662
- "D": "Wait for 100+ searches to accumulate — Tier 1 learns automatically without confirmation",
663
- "E": "Set limits.searchLearningRate to a higher value in config.yaml"
664
- },
665
- "correct": "C",
666
- "explanation": "The learning system requires explicit confirmation via paradigm_aspect_confirm. When you search for a term and confirm the best result, the confirmed aspect gets +1.0 weight for that query. After 3-5 confirmations, the weight exceeds the Tier 1 threshold and future queries return instantly. There is no automatic learning without confirmation — the system relies on user feedback to improve."
667
- },
668
- {
669
- "id": "q5",
670
- "question": "During a quarterly governance review, the heatmap shows that 30 aspects out of 120 have zero access across all types (search, ripple, navigate, direct). What does this indicate and what should you do?",
671
- "choices": {
672
- "A": "These aspects are well-documented and need no changes — zero access means no issues",
673
- "B": "Delete all 30 immediately — unused aspects are always stale",
674
- "C": "These aspects may be stale, poorly named, or irrelevant — evaluate each for removal, renaming, or consolidation as part of the governance review",
675
- "D": "Increase their severity to 'critical' to force agents to access them",
676
- "E": "Move them to a separate 'archive' section in the .purpose files"
677
- },
678
- "correct": "C",
679
- "explanation": "Zero-access aspects are candidates for review, not automatic deletion. Some may be legitimate but poorly named (rename to improve discoverability). Some may be truly stale with drifted anchors (remove or update). Some may have been superseded by newer aspects (consolidate with supersedes edges). The governance review evaluates each case individually."
680
- }
681
- ]
682
- },
683
- {
684
- "id": "task-management",
685
- "title": "Task Management",
686
- "content": "## Why Tasks Exist\n\nAI agent sessions are stateless. You can discuss a plan, identify five things that need doing, and then the session ends. The next session starts blank — those five items are gone. Sticky notes on a monitor do not help when your developer is a language model.\n\nParadigm's Task Management system provides a persistent scratch pad that survives context windows. Tasks are lightweight, date-partitioned YAML entries that capture what needs doing, how urgent it is, and what project knowledge relates to it. They are not a full project management system — they are the missing short-term memory between sessions.\n\nThe key difference from lore: lore records what happened (past tense). Tasks record what should happen (future tense). Together they form a complete timeline — memory of the past and intention for the future.\n\n## Anatomy of a Task\n\nEvery task follows a consistent structure:\n\n```yaml\nid: T-2026-02-26-001\nblurb: \"Add rate limiting to the /api/payments endpoint\"\npriority: high\nstatus: open\ntags: [security, payments]\nrelated_lore: [L-2026-02-25-003]\ncreated: \"2026-02-26T10:15:00Z\"\nupdated: \"2026-02-26T10:15:00Z\"\n```\n\nThe `id` field is auto-generated: `T-{date}-{sequence}`, following the same date-partitioned pattern as lore entries. The `blurb` is the only required field — a concise description of what needs to be done. Everything else is optional but useful.\n\nThree priority levels exist: `high` (do this soon), `medium` (do this eventually), and `low` (nice to have). Tasks without an explicit priority default to `medium`.\n\nThree statuses track lifecycle: `open` (needs doing), `done` (completed), and `shelved` (parked for later — not abandoned, just deferred).\n\n## Storage: Date-Partitioned YAML\n\nTasks live in `.paradigm/tasks/entries/` organized by creation date:\n\n```\n.paradigm/tasks/\n entries/\n 2026-02-25/\n T-2026-02-25-001.yaml\n T-2026-02-25-002.yaml\n 2026-02-26/\n T-2026-02-26-001.yaml\n```\n\nDate partitioning keeps directories small. Each task is a standalone YAML file, making them easy to read, edit, and version-control. The date in the path matches the date in the task ID.\n\n## The Five MCP Tools\n\n**`paradigm_task_create`** — Create a new task. The `blurb` field is required — a short description of what needs to be done. Optional fields include `priority` (high/medium/low), `tags` (for categorization and filtering), and `related_lore` (linking to lore entries that provide context). The task is written to the correct date directory with an auto-incremented ID and starts with status `open`.\n\n**`paradigm_task_list`** — List tasks with filters. Filter by `status` (open/done/shelved/all), `priority` (high/medium/low), or `tag`. Results are sorted by priority (high first) then by date (newest first). Without filters, it returns all open tasks.\n\n**`paradigm_task_update`** — Update any field on an existing task by ID. You can change the blurb, priority, status, tags, or related_lore. Only specified fields are modified — everything else is preserved.\n\n**`paradigm_task_done`** — Shorthand to mark a task as complete. Pass the task ID and the status changes to `done` with an updated timestamp. This is equivalent to `paradigm_task_update` with `status: done` but more ergonomic for the common case.\n\n**`paradigm_task_shelve`** — Shorthand to shelve a task for later. Pass the task ID and the status changes to `shelved`. Shelved tasks are not deleted — they remain searchable and can be reopened by updating their status back to `open`.\n\n## Session Recovery Integration\n\nThe top 5 open tasks are automatically surfaced during session recovery. When a new session starts and the agent calls any Paradigm MCP tool, the recovery data includes the highest-priority open tasks alongside the usual breadcrumbs and checkpoint data.\n\nThis means every session begins with awareness of outstanding work. The agent does not need to ask \"what should I work on?\" — the task list is already there, sorted by priority. This is the scratch-pad-that-survives pattern: write tasks in one session, see them in the next.\n\n## When to Create Tasks\n\nCreate tasks when:\n- You identify work that cannot be completed in the current session\n- A code review surfaces follow-up items\n- You discover a bug or improvement while working on something else\n- The user mentions something that should be tracked but is not the current focus\n- A handoff needs to communicate specific next steps\n\nDo not use tasks for:\n- Tracking completed work (that is what lore is for)\n- Long-term roadmap items (use your project management tool)\n- Architectural decisions (use lore entries with type `decision`)\n\nTasks are ephemeral intentions — they should be created quickly, completed or shelved promptly, and never allowed to accumulate into a backlog of hundreds. If your task list grows beyond 20-30 open items, it is time to triage: shelve the low-priority items and focus on what matters.",
687
- "keyConcepts": [
688
- "Tasks are persistent scratch-pad items that survive context windows",
689
- "Auto-generated IDs: T-{date}-{sequence}, date-partitioned in .paradigm/tasks/entries/{YYYY-MM-DD}/",
690
- "Three priority levels: high, medium (default), low",
691
- "Three statuses: open, done, shelved",
692
- "Five MCP tools: paradigm_task_create, paradigm_task_list, paradigm_task_update, paradigm_task_done, paradigm_task_shelve",
693
- "Top 5 open tasks surfaced automatically on session recovery",
694
- "Tasks record future intention; lore records past action"
695
- ],
696
- "quiz": [
697
- {
698
- "id": "q1",
699
- "question": "You are midway through adding authentication when you notice the payment service has a null-check bug. You cannot fix it now. What is the correct action?",
700
- "choices": {
701
- "A": "Record a lore entry describing the bug for future reference",
702
- "B": "Call `paradigm_task_create` with a blurb describing the null-check bug, priority high, and tags [bug, payments]",
703
- "C": "Add a TODO comment in the payment service code and move on",
704
- "D": "Call `paradigm_sentinel_record` to log it as an incident",
705
- "E": "Mention it in the session's handoff summary and hope the next session picks it up"
706
- },
707
- "correct": "B",
708
- "explanation": "This is exactly what tasks are for — capturing work you cannot complete in the current session. A task with priority `high` ensures it surfaces at the top of the next session's recovery data. A lore entry (A) records what happened, not what needs to happen. A TODO comment (C) is invisible to Paradigm. Sentinel (D) is for production incidents. A handoff mention (E) is fragile — tasks are persistent."
709
- },
710
- {
711
- "id": "q2",
712
- "question": "A new session starts. The agent calls `paradigm_status`. Among the recovery data, it sees 3 high-priority tasks and 12 medium-priority tasks. How were these surfaced?",
713
- "choices": {
714
- "A": "The agent must explicitly call `paradigm_task_list` to see tasks — they are not in recovery data",
715
- "B": "All 15 tasks appear in the recovery data automatically",
716
- "C": "The top 5 open tasks (sorted by priority then date) are automatically included in recovery data on the first Paradigm tool call",
717
- "D": "Only high-priority tasks are surfaced during recovery",
718
- "E": "Tasks are only surfaced if the previous session created a handoff"
719
- },
720
- "correct": "C",
721
- "explanation": "Session recovery automatically includes the top 5 open tasks, sorted by priority (high first) then by date (newest first). So the agent would see the 3 high-priority tasks plus 2 of the most recent medium-priority tasks. The remaining 10 medium-priority tasks are available via `paradigm_task_list` but are not in the initial recovery data."
722
- },
723
- {
724
- "id": "q3",
725
- "question": "Your task list has grown to 35 open items. What should you do?",
726
- "choices": {
727
- "A": "Delete the oldest tasks to keep the list manageable",
728
- "B": "Increase the session recovery limit from 5 to 35 so all tasks are visible",
729
- "C": "Triage: shelve low-priority items with `paradigm_task_shelve` and focus on what matters — tasks should not accumulate into a large backlog",
730
- "D": "Convert all tasks to lore entries since the list is too long for task management",
731
- "E": "Tasks have no practical limit — 35 is fine, just filter by priority when working"
732
- },
733
- "correct": "C",
734
- "explanation": "Tasks are meant to be a lightweight scratch pad, not a project management backlog. When the list grows beyond 20-30 items, it is time to triage. Use `paradigm_task_shelve` to park items that are not immediately relevant. Shelved tasks are not deleted — they remain searchable and can be reopened. This keeps the active list focused and the session recovery data meaningful."
735
- }
736
- ]
737
- },
738
- {
739
- "id": "assessment-loops",
740
- "title": "Lore as Unified Project Memory",
741
- "content": "## Lore: The Single Source of Project Memory\n\nParadigm uses lore as its unified project memory system. Every piece of project knowledge — session records, retrospectives, insights, decisions, milestones — lives in lore entries, differentiated by `type` and classified by tags.\n\nThe model is simple: **one system, tags drive classification.**\n\n| Entry Type | When to Use |\n|---|---|\n| `agent-session` | Automated record of an AI-assisted work session |\n| `human-note` | Manual note from a human developer |\n| `decision` | Strategic or architectural decision with rationale |\n| `review` | Quality review of a previous entry |\n| `incident` | Production issue or bug report |\n| `milestone` | Significant project achievement |\n| `retro` | Retrospective — looking back at completed work |\n| `insight` | A realization or pattern discovered across sessions |\n\nLore entries are stored as YAML files in `.paradigm/lore/entries/{date}/` with the `.lore` extension. Each entry has a unique ID: `L-{date}-{author}-{HHMMSS}-{NNN}`.\n\n## Tags Drive Classification\n\nTags are the primary classification mechanism in lore. Any string can be a tag, but certain prefixes carry special meaning:\n\n| Tag Prefix | Meaning | Example |\n|---|---|---|\n| `arc:` | Groups entries into a thematic arc | `arc:auth-hardening`, `arc:v2-migration` |\n| `assessment:` | Marks the reflection type | `assessment:retro`, `assessment:insight` |\n| `arc-closed` | Arc is no longer active | Added when an arc is complete |\n| `arc-status:` | Arc status metadata | `arc-status:complete`, `arc-status:archived` |\n\nArcs are simply tag prefixes — no separate storage or management needed. To create an arc, just start tagging lore entries with `arc:my-arc-name`. To close an arc, add `arc-closed` and `arc-status:complete` tags to its entries.\n\n## The Body Field\n\nFor entries that need more than a 2-3 sentence summary, lore entries support a `body` field for long-form content. This is where retrospective narratives, detailed decision rationale, and multi-paragraph reflections live:\n\n```yaml\nid: L-2026-03-02-ascend-164500-001\ntype: retro\ntitle: \"JWT refresh token rotation — what we learned\"\nsummary: Completed refresh token rotation with httpOnly cookie storage.\nbody: |\n After three sessions implementing refresh token rotation,\n the key insight is that storing refresh tokens in httpOnly\n cookies eliminates an entire class of XSS vulnerabilities.\nsymbols_touched: [\"#refresh-token-handler\", \"^authenticated\"]\nlinked_lore: [L-2026-02-10-003, L-2026-02-12-001]\nlinked_commits: [a1b2c3d, e4f5g6h]\ntags: [arc:auth-hardening, assessment:retro, security, auth, jwt]\n```\n\n## Cross-Referencing\n\nLore entries can link to other project artifacts:\n\n- **`linked_lore`** — References to other lore entry IDs, creating a web of related records\n- **`linked_tasks`** — References to paradigm task IDs\n- **`linked_commits`** — Git commit SHAs related to this entry\n\nThese links create traceability. A retrospective entry can point to the three session records that produced it and the five commits that implemented it.\n\n## Working with Lore\n\n**Recording:** Use `paradigm_lore_record` with `type`, `title`, `summary`, and `symbols_touched`. Add `body` for long-form content, `tags` with `arc:*` prefixes for arc grouping, and `linked_lore`/`linked_commits` for cross-references.\n\n**Searching:** Use `paradigm_lore_search` with filters:\n- `tag: \"arc:auth-hardening\"` — Find all entries in an arc\n- `type: \"retro\"` — Find all retrospectives\n- `hasBody: true` — Find entries with detailed content\n- `symbol: \"#payment-service\"` — Find entries touching a symbol\n\n## The Reflection Loop\n\nLore supports a natural reflection cycle:\n\n1. **Session records** — Automatically captured during work sessions (type: `agent-session`)\n2. **Reflection entries** — Manually recorded at natural pause points (type: `retro`, `insight`, `decision`, `milestone`)\n3. **Arc grouping** — Related reflections tagged with `arc:*` for thematic organization\n4. **Cross-referencing** — Reflection entries link back to the sessions that produced them\n\nWhen a task is marked complete via `paradigm_task_done`, the system suggests recording a lore entry as a natural reflection point.\n\n## When to Record Reflective Entries\n\n- **After completing a multi-session feature** — What did we learn? (`retro` with `arc:feature-name`)\n- **When a pattern emerges** — \"Every time we touch auth, we find token edge cases\" (`insight`)\n- **When making a strategic choice** — \"Switching from REST to GraphQL\" (`decision`)\n- **When reaching a milestone** — \"v2.0 shipped to production\" (`milestone`)\n\nThe general rule: if the knowledge would be valuable in 3 months, record it as a reflective lore entry with appropriate tags.\n\n## Migration from Assessments\n\nProjects that used the older separate assessment system can migrate with `paradigm lore migrate-assessments`. This converts assessment entries to lore entries with `arc:{arc_id}` and `assessment:{type}` tags, preserving all data.",
742
- "keyConcepts": [
743
- "Lore is the unified project memory — one system, tags drive classification",
744
- "Eight entry types: agent-session, human-note, decision, review, incident, milestone, retro, insight",
745
- "Arc tags (arc:*) group related entries by theme — no separate arc management needed",
746
- "The body field supports long-form content for detailed reflections",
747
- "Cross-referencing via linked_lore, linked_tasks, and linked_commits creates traceability",
748
- "paradigm_lore_search with tag filter finds entries across arcs",
749
- "Task completion nudges lore recording as a natural reflection point"
750
- ],
751
- "quiz": [
752
- {
753
- "id": "q1",
754
- "question": "You have completed a three-session effort to add rate limiting. You want to record a retrospective grouped with other rate-limiting work. What is the correct approach?",
755
- "choices": {
756
- "A": "Call `paradigm_lore_record` with `type: \"retro\"`, a body with your reflection, and `tags: [\"arc:rate-limiting\"]`",
757
- "B": "Call `paradigm_assessment_record` with `arc_id: \"arc-rate-limiting\"` and `type: \"retro\"`",
758
- "C": "Call `paradigm_lore_record` with `type: \"milestone\"` — completing features is always a milestone",
759
- "D": "Create a separate `.paradigm/assessments/` directory and write the entry manually",
760
- "E": "Call `paradigm_lore_record` with `type: \"agent-session\"` — all lore is session-level"
761
- },
762
- "correct": "A",
763
- "explanation": "Reflective entries are recorded via `paradigm_lore_record` with the appropriate type and arc tag. A retro with `tags: [\"arc:rate-limiting\"]` groups it with other entries in that arc. The body field holds the detailed reflection. The assessment tools (B) are deprecated wrappers. Milestones (C) mark significant project events, not feature completions. Agent-session (E) is for automated session records, not deliberate reflections."
764
- },
765
- {
766
- "id": "q2",
767
- "question": "A lore entry has `linked_lore: [L-2026-02-10-003, L-2026-02-12-001]` and `linked_commits: [a1b2c3d]`. What does this cross-referencing enable?",
768
- "choices": {
769
- "A": "It automatically updates the linked entries with backlinks",
770
- "B": "It creates traceability — readers can drill from the synthesized insight down to the specific sessions and code changes",
771
- "C": "It prevents the referenced lore entries from being deleted",
772
- "D": "It triggers Sentinel to check those commits for incidents",
773
- "E": "It merges the linked entries into a single combined entry"
774
- },
775
- "correct": "B",
776
- "explanation": "Cross-references create a traceability chain. A reader encountering an insight entry can follow `linked_lore` to see the full session context, and `linked_commits` to see the exact code changes. This is the core value of linking — each entry adds interpretation to what it references, with links to drill down for evidence."
777
- },
778
- {
779
- "id": "q3",
780
- "question": "You want to find every retrospective in the `arc:auth-hardening` arc that mentions `#payment-service`. Which approach is correct?",
781
- "choices": {
782
- "A": "Call `paradigm_lore_search` with `tag: \"arc:auth-hardening\"`, `type: \"retro\"`, and `symbol: \"#payment-service\"`",
783
- "B": "Call `paradigm_assessment_search` with `symbol: \"#payment-service\"` — it searches the old assessment system",
784
- "C": "Call `paradigm_lore_search` with `tags: [\"arc:auth-hardening\", \"retro\"]`",
785
- "D": "Call `paradigm_search` with `query: \"payment retro auth\"` — general search covers lore",
786
- "E": "Read every file in `.paradigm/lore/entries/` and filter manually"
787
- },
788
- "correct": "A",
789
- "explanation": "`paradigm_lore_search` supports combining filters: `tag` for arc prefix matching, `type` for entry type, and `symbol` for symbol references. These filters combine (AND logic), so you get only retro entries in the auth-hardening arc that touch the payment service. The assessment tools (B) are deprecated. Using `tags` array (C) uses OR logic, not AND. General search (D) searches the symbol index, not lore content."
790
- }
791
- ]
792
- },
793
- {
794
- "id": "symphony-a-mail",
795
- "title": "Symphony: Multi-Agent Messaging with The Score",
796
- "content": "## Agents Need to Talk\n\nUntil now, every Paradigm agent has worked in isolation. A Claude Code session modifying the backend has no awareness of what the session working on the frontend is doing. Two developers on the same team, each with their own AI assistant, have no way for those assistants to coordinate — even when they are working on the same project at the same time.\n\nSymphony changes this. It is Paradigm's multi-agent, multi-human collaborative intelligence layer. And its foundation is The Score: a lightweight, file-based messaging protocol that gives every Claude Code session its own mailbox.\n\n## The Metaphor: Email for AI Agents\n\nThe Score works exactly like email. Each agent has an identity, an inbox, and an outbox. Messages are delivered as JSONL files on the filesystem. Agents poll for new messages on a timer. There is no persistent server, no WebSocket connection, and no cloud dependency. If two agents are running on the same machine, they can message each other through nothing more than file reads and writes.\n\nThis simplicity is deliberate. The Score is the CLI-only foundation of Symphony — it works with zero dependencies beyond the Paradigm CLI. No Conductor, no Sentinel, no network configuration. The only requirement is that agents are running on the same machine (or connected via a lightweight TCP relay for cross-machine scenarios).\n\n## Agent Identity and Discovery\n\nEvery Claude Code session that participates in The Score has a stable identity. The identity is derived from the project directory and the agent's role — for example, `a-paradigm/backend` or `a-kamiki/frontend`. This deterministic naming means the same project opened in the same context always gets the same identity, even across session restarts.\n\nWhen you run `paradigm symphony join`, the CLI discovers all Claude Code sessions on the current machine and connects them into a mail network. Each session gets a mailbox directory at `~/.paradigm/score/agents/{agent-id}/` containing four files:\n\n- **`inbox.jsonl`** — Messages waiting for this agent, one per line, append-only\n- **`outbox.jsonl`** — Replies from this agent, append-only\n- **`ack.json`** — The ID of the last acknowledged message (for garbage collection)\n- **`identity.json`** — Agent ID, project, role, PID, and session start time\n\nThe JSONL format — one JSON object per line — makes appending atomic and parsing trivial. No file locking, no corruption risk from concurrent writes, no binary format to decode.\n\n## Messaging and Threading\n\nMessages in The Score carry structured metadata beyond plain text. Every message has an **intent** that classifies its purpose:\n\n| Intent | Meaning |\n|---|---|\n| `question` | Asking for information from other agents |\n| `context` | Providing background or context |\n| `proposal` | Proposing an action or fix |\n| `action` | Announcing an action the agent took |\n| `decision` | Recording a decision |\n| `alert` | Forwarding a Sentinel alert |\n| `approval` / `rejection` | Responding to a proposal |\n| `handoff` | Transferring responsibility to another agent |\n| `fileRequest` | Requesting a file from another agent |\n| `fileDelivery` | Delivering a requested file |\n\nIntents serve two purposes. First, they give the receiving agent structured context about what kind of response is expected — a question needs an answer, a proposal needs approval or rejection, a decision needs acknowledgment. Second, they feed into Lore: when a message has `intent: decision`, Symphony can automatically record it as a lore entry.\n\nMessages belong to **threads**. A thread starts when the first message on a topic is sent (with no `parentId`). Subsequent replies reference the thread root, building a conversation tree. Thread state is tracked in `~/.paradigm/score/threads/{thread-id}.json`, which records the topic, participants, message count, and last activity timestamp.\n\n## The File Pipeline\n\nAgents often need to share files — a type definition, an API contract, a configuration file. The Score's file pipeline enables this with a critical security constraint: **every file transfer requires explicit human approval**.\n\nThe flow works like this: Agent A sends a `fileRequest` message specifying the file path, a reason, and the target agent. The request appears in the owning human's terminal (via `paradigm symphony requests`). The human reviews and either approves, denies, or approves with redaction (stripping sensitive lines). Only after human approval does the file content get written to the requester's inbox.\n\nTrust configuration lives in `~/.paradigm/score/trust.yaml`. You can define auto-approve patterns for trusted users (`docs/**`, `*.md`) and never-approve patterns for sensitive files (`.env*`, `*.key`, `*.pem`, `**/secrets/**`). The never-approve list is enforced absolutely — even clicking approve on a `.env` file will be denied by the system. File requests expire after one hour without action, and all transfers are logged.\n\n## /loop: The Agent Heartbeat\n\nThe glue that makes The Score work is `/loop`. Each Claude Code session runs `/loop 10s paradigm_symphony_poll`, which polls the inbox every 10 seconds for new messages. The `paradigm_symphony_poll` MCP tool reads `inbox.jsonl`, formats messages as structured prompts the agent can reason about, and suggests actions.\n\nWithout `/loop`, messages would accumulate in the inbox with nobody reading them. The loop is the heartbeat — it keeps agents responsive. When an agent processes a message and replies via `paradigm_symphony_send`, the reply goes to `outbox.jsonl`. A mail router (or Conductor, in later phases) picks up outbox messages and delivers them to the appropriate inbox files.\n\nThe convenience command `paradigm symphony join` combines registration and loop setup in one step — it registers the session's identity and starts the polling loop automatically.\n\n## Thread Resolution and Lore Integration\n\nConversation threads are not meant to live forever. When a thread reaches a conclusion, any participant (human or agent) can resolve it with `paradigm symphony resolve <thread-id>`. Resolution triggers an automatic lore entry that captures the full conversation: topic, participants, decisions made, actions taken, and symbols discussed.\n\nThis is the bridge between ephemeral conversation and permanent project memory. A 15-minute exchange between three agents about a serialization bug becomes a searchable lore entry tagged with the relevant symbols and arc. The next developer encountering a similar issue can find the conversation, the decision, and the fix — all linked together.\n\n## CLI Commands\n\nThe `paradigm symphony` command group provides the complete human interface:\n\n- `paradigm symphony whoami` — Show this agent's identity and linked peers\n- `paradigm symphony list` — List all known agents with status (awake/asleep) and location\n- `paradigm symphony join` — Discover and connect Claude Code sessions on this machine\n- `paradigm symphony join --remote <ip>` — Connect to a remote machine's mail server\n- `paradigm symphony send \"message\"` — Broadcast to all linked agents\n- `paradigm symphony send --to <agent> \"message\"` — Direct message to a specific agent\n- `paradigm symphony send --thread <id> \"message\"` — Reply to an existing thread\n- `paradigm symphony read` — Show unread messages\n- `paradigm symphony threads` — List active threads\n- `paradigm symphony resolve <id>` — Resolve a thread, creating a lore entry\n- `paradigm symphony status` — Network overview (agents, threads, unread count)\n\nFor the file pipeline: `paradigm symphony request`, `paradigm symphony requests`, `paradigm symphony approve`, and `paradigm symphony deny`.\n\n## MCP Tools for Agent Participation\n\nSix MCP tools power agent-side Symphony participation:\n\n- **`paradigm_symphony_poll`** — The heartbeat. Reads inbox, returns formatted messages and thread summaries. Called by `/loop`.\n- **`paradigm_symphony_send`** — Send a message with intent, text, optional symbols, diff, or decision. Writes to outbox.\n- **`paradigm_symphony_status`** — Overview of the local network: agents, threads, Sentinel endpoint.\n- **`paradigm_symphony_thread`** — Get full context of a conversation thread with messages, participants, and extracted decisions.\n- **`paradigm_symphony_request_file`** — Request a file from another agent. Returns immediately with `pending` status; delivery arrives via future poll.\n- **`paradigm_symphony_approve_file`** — Approve or deny a pending file request after human confirmation.\n\nThese tools compose naturally with existing Paradigm workflows. An agent can poll for messages, discover a question about `#payment-serializer`, call `paradigm_ripple` to check impact, and respond with full context — all within a single `/loop` cycle.",
797
- "keyConcepts": [
798
- "The Score is Symphony's CLI-only foundation — file-based messaging between agents with zero dependencies beyond the Paradigm CLI",
799
- "Agent identity is derived from project directory + role (e.g., a-paradigm/backend), stable across session restarts",
800
- "Mailbox protocol uses JSONL files: inbox.jsonl (incoming), outbox.jsonl (replies), ack.json (last acknowledged), identity.json (agent metadata)",
801
- "Messages carry structured intents (question, proposal, decision, action, alert, etc.) that classify purpose and drive Lore integration",
802
- "Threads group related messages into conversations, tracked in ~/.paradigm/score/threads/{thread-id}.json",
803
- "The file pipeline requires explicit human approval for every file transfer, with configurable trust levels and a hard deny list for sensitive files",
804
- "/loop is the agent heartbeat — each session runs /loop 10s paradigm_symphony_poll to stay responsive to incoming messages",
805
- "Thread resolution via paradigm symphony resolve triggers automatic lore entry creation, bridging ephemeral conversation to permanent project memory",
806
- "Six MCP tools: paradigm_symphony_poll, paradigm_symphony_send, paradigm_symphony_status, paradigm_symphony_thread, paradigm_symphony_request_file, paradigm_symphony_approve_file",
807
- "paradigm symphony join discovers Claude Code sessions on the local machine; paradigm symphony join --remote <ip> extends to remote machines via TCP"
808
- ],
809
- "quiz": [
810
- {
811
- "id": "q1",
812
- "question": "You have three Claude Code sessions open on your machine: one working on the core library, one on the backend API, and one on the frontend. You want them to communicate. What is the correct setup sequence?",
813
- "choices": {
814
- "A": "Start a Sentinel hub, then connect each session to the WebSocket endpoint",
815
- "B": "Install Conductor, which automatically links all sessions on the machine",
816
- "C": "Run `paradigm symphony join` to discover and connect the sessions, then run `/loop 10s paradigm_symphony_poll` in each session",
817
- "D": "Create a `.paradigm-workspace` file listing all three projects — workspace linking enables messaging",
818
- "E": "Run `paradigm team orchestrate` which automatically sets up inter-agent communication"
819
- },
820
- "correct": "C",
821
- "explanation": "The Score is the CLI-only foundation that requires no Conductor, no Sentinel, and no workspace setup. `paradigm symphony join` discovers Claude Code sessions on the machine and creates mailboxes. Then each session needs `/loop 10s paradigm_symphony_poll` to poll for incoming messages. Conductor (B) auto-links sessions but is not required — The Score works standalone."
822
- },
823
- {
824
- "id": "q2",
825
- "question": "An agent sends a message with `intent: 'decision'` and the text 'Use Redis for session storage instead of in-memory.' What happens beyond normal message delivery?",
826
- "choices": {
827
- "A": "The message is blocked — only humans can make decisions",
828
- "B": "Symphony automatically records a lore entry of type `decision` with the message text, linking it to the conversation thread and referenced symbols",
829
- "C": "The message is flagged for human review before delivery",
830
- "D": "Nothing special — intent is informational only and has no side effects",
831
- "E": "The decision is written to `.paradigm/config.yaml` as a project setting"
832
- },
833
- "correct": "B",
834
- "explanation": "Messages with `intent: decision` trigger automatic Lore integration. Symphony records a lore entry with the decision text, links it to the conversation thread, and tags it with referenced symbols. This bridges ephemeral conversation to permanent project memory without manual recording."
835
- },
836
- {
837
- "id": "q3",
838
- "question": "Agent A (on your machine) requests `src/types.ts` from Agent B (on a teammate's machine). What must happen before Agent A receives the file?",
839
- "choices": {
840
- "A": "Agent B must call `paradigm_symphony_approve_file` — agents can approve their own file requests",
841
- "B": "The file is sent automatically since both agents are linked in the same Symphony network",
842
- "C": "The teammate (human) must explicitly approve the file transfer — every cross-agent file transfer requires human approval from the file owner's side",
843
- "D": "Agent A must have the `^file-access` gate declared in portal.yaml",
844
- "E": "The file is sent if it matches the auto-approve glob patterns, but there is no human gate"
845
- },
846
- "correct": "C",
847
- "explanation": "The file pipeline's core security constraint is that every file transfer requires explicit human approval from the owner's side. When Agent A requests a file, the teammate sees a prompt (via Conductor or `paradigm symphony requests`) and must approve, deny, or approve with redaction. Even if auto-approve patterns exist in trust.yaml, the initial setup still requires human-configured trust levels."
848
- },
849
- {
850
- "id": "q4",
851
- "question": "An agent's `paradigm_symphony_poll` returns 3 new messages in a thread about a failing test. The agent reads them, investigates the code, and finds the bug. How should the agent communicate its findings?",
852
- "choices": {
853
- "A": "Call `paradigm_lore_record` with the findings — lore is the communication channel",
854
- "B": "Call `paradigm_symphony_send` with `intent: 'context'` providing the investigation results, then `intent: 'proposal'` with the fix, writing to `outbox.jsonl` for delivery to the thread",
855
- "C": "Write directly to the other agents' `inbox.jsonl` files for immediate delivery",
856
- "D": "Call `paradigm_sentinel_record` to log the bug as an incident",
857
- "E": "Call `paradigm symphony send` from the CLI — agents cannot use MCP tools to reply"
858
- },
859
- "correct": "B",
860
- "explanation": "Agents communicate via `paradigm_symphony_send`, which writes structured messages to `outbox.jsonl`. Using multiple messages with appropriate intents (context for the investigation, proposal for the fix) gives other agents structured context. Writing directly to inbox files (C) bypasses the routing protocol. Lore (A) and Sentinel (D) are recording systems, not communication channels."
861
- },
862
- {
863
- "id": "q5",
864
- "question": "A thread about a serialization bug has been active for 20 minutes across 4 agents. The team agrees on a fix. What should happen next?",
865
- "choices": {
866
- "A": "Delete the thread files from `~/.paradigm/score/threads/` to clean up",
867
- "B": "Let the thread expire naturally — threads auto-resolve after 1 hour of inactivity",
868
- "C": "Run `paradigm symphony resolve <thread-id>` which marks the thread as resolved and automatically creates a lore entry capturing the full conversation, decisions, and actions",
869
- "D": "Each agent independently records a lore entry about their contribution",
870
- "E": "Run `paradigm_reindex` to incorporate the thread into the project index"
871
- },
872
- "correct": "C",
873
- "explanation": "Thread resolution via `paradigm symphony resolve` is the bridge between ephemeral conversation and permanent project memory. It marks the thread as resolved and automatically creates a comprehensive lore entry with the topic, participants, decisions, actions, and referenced symbols. This single action preserves the entire collaborative context for future reference."
874
- }
875
- ]
876
- },
877
- {
878
- "id": "platform-agent-ui",
879
- "title": "Platform & Agent-Driven UI",
880
- "content": "## The Unified Platform\n\n`paradigm serve` launches the Paradigm Platform — a unified development management interface on port 3850 that absorbs every Paradigm tool (Lore, Graph, Sentinel, University, Symphony) into one browser tab.\n\nThe Platform is built on Express + WebSocket on the server, React 18 + Zustand on the client. Sections are lazy-loaded. A shared design system provides consistent theming and symbol colors.\n\n### Architecture\n\n```\nlocalhost:3850 (Express + WebSocket)\n├── /api/lore/* ← LoreRouter\n├── /api/symbols/* ← SymbolsRouter\n├── /api/graphs/* ← GraphsRouter\n├── /api/platform/* ← PlatformRouter (health, sections, agent-command)\n├── /ws ← WebSocket (agent commands + user activity)\n└── / ← Platform UI SPA\n```\n\n## Agent-Driven UI\n\nThe breakthrough: **the AI agent can drive the browser in real-time.** Five MCP tools let the agent navigate, highlight, annotate, observe, and clear — turning the Platform from a passive viewer into a shared workspace.\n\n### The Pipeline: MCP → HTTP → WebSocket → Browser\n\n```\nAgent (Claude Code) Platform Server Browser\n │ │ │\n │ paradigm_platform_* │ │\n │ POST /api/platform/cmd │ │\n │ ─────────────────────────►│ │\n │ ◄── { ok: true } ──────│ │\n │ │ ws: agent:* │\n │ │──────────────────►│\n │ │ │ UI updates\n```\n\nWhy HTTP not file-based: the <500ms latency requirement rules out file-watching. Why not direct WebSocket from MCP: MCP tools are stdio-based with no event loop for persistent WS connections.\n\n### The Five Tools\n\n| Tool | Purpose |\n|------|---------|\n| `paradigm_platform_navigate` | Switch sections, select symbols, open lore entries |\n| `paradigm_platform_highlight` | Pulsing glow on symbols with color + label, auto-expires |\n| `paradigm_platform_annotate` | Toasts (notifications), callouts (on graph nodes), badges |\n| `paradigm_platform_observe` | Read user's current section, selected symbol, theme, mute state |\n| `paradigm_platform_clear` | Remove all agent highlights and annotations |\n\n### Conflict Resolution: User Always Wins\n\nThe agent must never hijack the user's attention:\n\n- **User idle (>5s):** Agent navigation executes immediately\n- **User active (<5s):** A prompt appears: \"Agent wants to show you #X — [Go there] [Dismiss]\"\n- **User muted:** All agent effects are silently discarded; `observe` returns `{ muted: true }`\n\n### Agent Presence\n\nThe `#AgentPresenceManager` tracks connected agents by their Symphony identity (`{project}/{role}`). Each agent gets a deterministic color from its ID hash. Presence dots appear in the Platform header with a mute toggle.\n\nStale agents are auto-pruned after 2 minutes of inactivity.\n\n### User State Tracking\n\nThe `#UserStateTracker` accumulates user activity — what section they're viewing, what symbol is selected, theme preference. This state is served to `paradigm_platform_observe` so the agent can reason about what the user is looking at.\n\nBrowser clients report activity via WebSocket messages: `user:navigate`, `user:select`, `user:theme`, `user:mute`.\n\n### Visual Treatment\n\n| Element | Human | Agent |\n|---------|-------|-------|\n| Selection ring | Solid 2px blue | Dashed 2px agent-color |\n| Highlight | N/A | Pulsing glow animation |\n| Toast | N/A | Left border + robot icon |\n| Navigation | Instant | 300ms ease + toast notification |\n\n### Browser Architecture\n\nThe agent UI layer sits alongside existing stores:\n\n- `agentStore.ts` — Zustand store managing presence, highlights, annotations, toasts, mute, pending navigation\n- `useAgentEffects` — Hook connecting WebSocket `agent:*` messages to store actions, with auto-reconnect\n- `useActivityReporter` — Hook reporting section/theme changes back to server\n- `AgentToast` — Severity-colored toast component\n- `AgentCallout` — Floating callout overlay + navigation conflict prompt",
881
- "keyConcepts": [
882
- "paradigm serve unifies all tools on port 3850 in one browser tab",
883
- "Agent-Driven UI: 5 MCP tools (navigate, highlight, annotate, observe, clear) control the browser in real-time",
884
- "Pipeline: MCP → HTTP POST → Platform server → WebSocket broadcast → browser Zustand store → visual effect",
885
- "Conflict resolution: user idle → auto-navigate, user active → prompt, user muted → silently discard",
886
- "Agent identity reuses Symphony pattern: {project}/{role} with deterministic color from hash",
887
- "AgentPresenceManager tracks agents, auto-prunes after 2min idle",
888
- "UserStateTracker accumulates section/symbol/theme for observe tool",
889
- "Visual distinction: agent effects use dashed rings, pulsing glow, robot-icon toasts vs. human solid styles"
890
- ],
891
- "quiz": [
892
- {
893
- "id": "q1",
894
- "question": "An AI agent calls `paradigm_platform_navigate({ section: 'graph', symbol: '#payment-service' })` while the user is actively typing in the lore section. What happens in the browser?",
895
- "choices": {
896
- "A": "The browser immediately switches to the graph section and selects the node",
897
- "B": "The command fails with an error because the user is in a different section",
898
- "C": "A prompt appears: 'Agent wants to show you #payment-service — [Go there] [Dismiss]' — the user decides",
899
- "D": "The agent's command is queued and executes when the user next switches sections",
900
- "E": "The browser switches to graph but keeps the lore section visible in a split view"
901
- },
902
- "correct": "C",
903
- "explanation": "When the user is active (last interaction <5s ago), the agent's navigation creates a pending navigation prompt instead of auto-navigating. The user sees 'Agent wants to show you #payment-service — [Go there] [Dismiss]' and chooses whether to follow. This is the conflict resolution model: user always wins."
904
- },
905
- {
906
- "id": "q2",
907
- "question": "What is the communication pipeline when an MCP tool like `paradigm_platform_highlight` sends a command to the browser?",
908
- "choices": {
909
- "A": "MCP tool → direct WebSocket connection to browser → UI update",
910
- "B": "MCP tool → writes to file → browser polls file every 500ms → UI update",
911
- "C": "MCP tool → HTTP POST to Platform server → server broadcasts via WebSocket → browser Zustand store → UI update",
912
- "D": "MCP tool → writes to scan-index.json → browser watches index file → UI update",
913
- "E": "MCP tool → sends message via Symphony mailbox → browser reads mailbox → UI update"
914
- },
915
- "correct": "C",
916
- "explanation": "The pipeline is MCP → HTTP POST → WebSocket broadcast → browser. The MCP tool calls the platform-bridge helper which POSTs to /api/platform/agent-command. The server validates the command, updates server-side state (presence, highlights), and broadcasts a typed WebSocket message (e.g., agent:highlight) to all connected browsers. The browser's useAgentEffects hook receives the message and dispatches to the Zustand agentStore."
917
- },
918
- {
919
- "id": "q3",
920
- "question": "The user clicks the 'Mute' button in the Platform header. An agent then calls `paradigm_platform_annotate({ type: 'toast', message: 'Found a bug in #auth' })`. What happens?",
921
- "choices": {
922
- "A": "The toast appears but with reduced opacity",
923
- "B": "The command returns `{ annotated: false, reason: 'Agent actions are muted by user' }` and no toast appears",
924
- "C": "The toast is queued and shown when the user unmutes",
925
- "D": "The command throws an error that the agent must handle",
926
- "E": "The toast appears regardless — mute only affects navigation, not annotations"
927
- },
928
- "correct": "B",
929
- "explanation": "When the user mutes agent actions, ALL agent effects are silently discarded — navigate, highlight, annotate, and clear commands all return a response with `reason: 'Agent actions are muted by user'`. The server checks UserStateTracker.isMuted() before broadcasting. The agent can detect this via `paradigm_platform_observe` which returns `{ muted: true }`."
930
- },
931
- {
932
- "id": "q4",
933
- "question": "How does the Platform determine an agent's display color in the header presence indicator?",
934
- "choices": {
935
- "A": "Each agent chooses its color when connecting via WebSocket",
936
- "B": "Colors are assigned sequentially from a fixed palette (first agent = blue, second = green, etc.)",
937
- "C": "The color is deterministically computed from a hash of the agent's Symphony identity string ({project}/{role})",
938
- "D": "Colors are stored in .paradigm/config.yaml under platform.agentColors",
939
- "E": "All agents share the same color — they're distinguished by name only"
940
- },
941
- "correct": "C",
942
- "explanation": "Agent colors are deterministic: the AgentPresenceManager computes a hash of the agentId string (e.g., 'a-paradigm/core') and maps it to one of 8 predefined colors. This means the same agent always gets the same color across sessions, making it recognizable. The identity reuses Symphony's {project}/{role} pattern."
943
- },
944
- {
945
- "id": "q5",
946
- "question": "An agent wants to understand what the user is currently looking at before deciding what to highlight. Which approach is correct?",
947
- "choices": {
948
- "A": "Read the platformStore.ts file to check the activeSection variable",
949
- "B": "Call `paradigm_platform_observe()` which returns the current section, selected symbol, theme, mute state, and connected agents",
950
- "C": "Call `paradigm_status` which includes the Platform UI state in its output",
951
- "D": "Check the browser's localStorage via a Bash command",
952
- "E": "Call `paradigm_navigate({ intent: 'context' })` which includes Platform UI state"
953
- },
954
- "correct": "B",
955
- "explanation": "paradigm_platform_observe is the dedicated tool for reading UI state. It sends an 'observe' command to the Platform server, which returns the UserStateTracker's accumulated state: current section, selected symbol, theme, mute status, connected agents, and optionally active highlights/annotations. This is real-time data from the server, not a file read."
956
- }
957
- ]
958
- },
959
- {
960
- "id": "conductor-workspace",
961
- "title": "Conductor: Visual Mission Control",
962
- "content": "## What Is Conductor?\n\nConductor is a native macOS application that serves as the visual mission control for Paradigm. While the CLI and MCP tools handle the automation, Conductor gives you a real-time view of what your agent team is doing — and lets you interact with them visually.\n\nThink of it as the difference between managing a team over email versus walking into a mission control room. Both work, but the room gives you instant awareness.\n\n### Core Capabilities\n\n**Workspace Mode** — A full-screen tiling window manager for Claude Code sessions. Launch multiple terminals side by side, split horizontally or vertically, drag dividers to resize. Six layout presets (single, split-h, split-v, quad, triple, grid) let you quickly arrange your workspace.\n\n**Symphony Integration** — Conductor connects to Symphony, the inter-agent messaging system. When agents communicate during orchestration (handing off context, requesting approval, debating approaches), those messages appear in Conductor's thread view in real time. You can read the full conversation without switching to the CLI.\n\n**Task Protocol** — A structured protocol for human-agent coordination with 7 intents:\n- `task` — assign work to an agent\n- `task-ack` — agent acknowledges receipt\n- `progress` — agent reports progress\n- `approval-request` — agent asks for human approval\n- `approval-response` — human approves or rejects\n- `task-complete` — agent reports success\n- `task-failed` — agent reports failure\n\nThis protocol makes agent work visible and controllable. You see when agents are working, what they are asking, and whether they succeeded.\n\n**Agent Health Dashboard** — Per-agent metrics: success rates, average time-per-task, acceptance rates for contributions. Sparklines show trends over time. When an agent's performance drops, you see it immediately.\n\n**Live Sentinel** — Real-time event viewer with symbol filtering. When Sentinel detects an incident or pattern, it appears in Conductor's event feed with full detail and suggested resolution.\n\n### Architecture\n\nConductor is built with Swift and SwiftUI — a native macOS application, not an Electron wrapper. Key design decisions:\n\n- **Single-owner pattern** — AppDelegate owns the orchestrator, workspace, project store, and agent process manager. No shared mutable state.\n- **Local-only ML** — Gaze tracking, gesture recognition, and voice commands all run locally via CoreML. Zero cloud, zero cost, zero latency.\n- **SwiftTerm embedding** — Terminal sessions use SwiftTerm, a native Swift terminal emulator. Each session is a real PTY with full ANSI support.\n- **7 platform protocols** — Abstraction layer for future portability (the same protocol set would power a Windows or Linux version).\n\n### Getting Started\n\nBuild and install Conductor:\n\n```bash\ncd packages/conductor\n./build-conductor.sh --install\n```\n\nThis produces `Conductor.app` in `/Applications`. Launch it, and it connects to your Paradigm project automatically.\n\n### When to Use Conductor\n\n- **During orchestration** — watch agents work in real time, approve contributions, read debates\n- **Multi-session development** — tile 2-4 Claude Code sessions side by side, each working on different parts of the codebase\n- **Monitoring** — keep Conductor visible on a secondary display to catch Sentinel events and agent health changes\n- **Team collaboration** — when multiple developers use Symphony, Conductor shows cross-session threads and file approval requests\n\nConductor is optional — everything it shows is also available via CLI and MCP tools. But for teams that want visual awareness of their agent team, it is the command center.",
963
- "keyConcepts": [
964
- "Conductor is a native macOS SwiftUI application — not Electron",
965
- "Workspace mode provides tiling terminal management with 6 layout presets",
966
- "Symphony integration shows inter-agent messages in real time",
967
- "Task protocol has 7 intents for structured human-agent coordination",
968
- "Agent health dashboard tracks per-agent success rates and trends",
969
- "All ML inference (gaze, gesture, voice) runs locally via CoreML",
970
- "Optional but powerful — everything is also available via CLI"
971
- ],
972
- "quiz": [
973
- {
974
- "id": "q1",
975
- "question": "During orchestration, the security agent sends an approval-request via Symphony asking to modify portal.yaml. Where would you see and respond to this in Conductor?",
976
- "choices": {
977
- "A": "In the terminal session where the agent is running",
978
- "B": "In the Symphony thread view — approval-request is a task protocol intent that appears as a message you can approve or reject",
979
- "C": "In the agent health dashboard under the security agent's metrics",
980
- "D": "In the Sentinel event feed as a security incident",
981
- "E": "You cannot — approval requests only work via CLI"
982
- },
983
- "correct": "B",
984
- "explanation": "Symphony messages, including task protocol intents like approval-request, appear in Conductor's thread view in real time. The task protocol is designed for human-agent coordination — you see the request, read the context, and respond with approval-response directly in the thread view. This is one of Conductor's key advantages over CLI-only workflows."
985
- },
986
- {
987
- "id": "q2",
988
- "question": "You want to work on the frontend and backend of a feature simultaneously with two Claude Code sessions. How would you set this up in Conductor?",
989
- "choices": {
990
- "A": "Launch two separate Conductor apps",
991
- "B": "Use workspace mode with a split-h or split-v layout preset, launching one terminal session per pane",
992
- "C": "Conductor only supports one session at a time",
993
- "D": "Use the agent health dashboard to assign work to two agents",
994
- "E": "Use Symphony to relay messages between two CLI sessions instead"
995
- },
996
- "correct": "B",
997
- "explanation": "Conductor's workspace mode is a tiling window manager for Claude Code sessions. Choose a split layout preset (horizontal or vertical), and each pane gets its own terminal session. Both sessions connect to Symphony, so they can coordinate via inter-agent messaging while you watch both in a single window."
998
- },
999
- {
1000
- "id": "q3",
1001
- "question": "Conductor's ML features (gaze tracking, gesture recognition, voice commands) all run locally via CoreML. Why is this significant?",
1002
- "choices": {
1003
- "A": "CoreML is faster than cloud APIs for all tasks",
1004
- "B": "It means zero cloud cost, zero data leaving your machine, and zero network latency — critical for a developer tool that sees your code and screen",
1005
- "C": "Apple requires all macOS apps to use CoreML",
1006
- "D": "Cloud ML services do not support gaze tracking",
1007
- "E": "It is not significant — it is just an implementation detail"
1008
- },
1009
- "correct": "B",
1010
- "explanation": "For a developer tool that has access to your codebase, screen content, and camera feed, privacy is paramount. Local-only ML means your data never leaves your machine — no cloud processing, no storage, no costs. The zero-latency benefit is a bonus, but the privacy guarantee is the real reason this design choice matters."
1011
- }
1012
- ]
1013
- },
1014
- {
1015
- "id": "symphony-networking",
1016
- "title": "Symphony Networking: Cross-Machine Relay",
1017
- "content": "## Beyond Single-Machine Messaging\n\nSymphony Phase 0 (A-Mail) established file-based agent-to-agent messaging on a single machine — JSONL mailboxes at `~/.paradigm/score/`, polled via `/loop`. But what happens when two developers on the same WiFi, or at different locations, want their Claude instances to collaborate?\n\nSymphony Phase 1 adds cross-machine networking via a WebSocket relay. The key design principle: **the local mailbox model is unchanged**. Networking is a transparent sync layer that watches local outboxes and delivers remote messages to local inboxes.\n\n## Hub-and-Spoke Topology\n\nOne machine runs `paradigm symphony serve` (the **hub**), and others connect with `paradigm symphony join --remote` (the **spokes**). The hub relays messages between all connected machines.\n\n```\nMachine A (Hub) Machine B (Spoke)\n┌─────────────────────┐ ┌─────────────────────┐\n│ ~/.paradigm/score/ │ │ ~/.paradigm/score/ │\n│ agents/projA/core │ │ agents/projB/core │\n│ inbox.jsonl │◄────ws───►│ inbox.jsonl │\n│ outbox.jsonl │ :3939 │ outbox.jsonl │\n└─────────────────────┘ └─────────────────────┘\n```\n\nThe relay watches each local agent's outbox file (polling every 2 seconds via `fs.stat`). When new messages appear, they're pushed to all connected peers as WebSocket frames. Incoming messages from peers are written to the appropriate local inbox via `appendToInbox()`.\n\n## Pairing & Authentication\n\nSecurity is critical when connecting machines over a network. Symphony uses a pairing code + HMAC challenge-response protocol:\n\n1. The hub generates a 32-byte random secret and derives a **6-digit pairing code**\n2. The code is displayed on the hub's terminal\n3. The spoke connects and receives a `hello` frame with a random challenge nonce\n4. The user enters the code on the spoke terminal (or embeds it in the connection string)\n5. The spoke computes `HMAC-SHA256(challenge, SHA256(code))` and sends an `auth` frame\n6. The hub verifies the code and HMAC proof, then sends `auth_ok` with its agent list\n\nPairing codes rotate every 5 minutes. After 3 failed attempts from the same IP, a 60-second cooldown is enforced. Peer records are saved to `~/.paradigm/score/peers.json` (file mode 0600) for auto-reconnect.\n\n## Two Connection Modes\n\n### LAN Pairing (same WiFi)\n\n```sh\n# Machine A (hub)\nparadigm symphony serve\n# Shows: Pairing Code: 847 291\n\n# Machine B (spoke)\nparadigm symphony join --remote 192.168.1.42:3939\n# Prompted for code\n```\n\n### Internet Direct Connect\n\n```sh\n# Machine A\nparadigm symphony serve --public\n# Shows connection string\n\n# Machine B\nparadigm symphony join --remote 73.162.44.103:3939#847291\n# Code embedded — no prompt\n```\n\nInternet mode requires port 3939 to be reachable (port forward, VPN, or SSH tunnel).\n\n## Trust Management\n\nPeers are managed via CLI commands:\n\n- `paradigm symphony peers` — List trusted peers with agent counts and last-seen times\n- `paradigm symphony peers revoke <id>` — Immediately disconnect and block reconnection\n- `paradigm symphony peers forget --force` — Clear all peer trust records\n\nExisting `trust.yaml` hard-deny patterns (`.env*`, `*.key`, `*.pem`) apply to remote file requests — the trust boundary extends across the network.\n\n## Relay Internals\n\nThe `SymphonyRelay` class handles both server and client modes:\n\n- **Outbox watcher**: Polls every 2s, compares outbox line counts against stored positions, forwards new messages\n- **Deduplication**: Bounded `Set<string>` of message IDs (max 10,000) prevents duplicate delivery\n- **Keepalive**: Ping/pong every 30s with 10s pong timeout. Dead connections are auto-terminated\n- **Auto-reconnect**: Client mode uses exponential backoff (1s → 2s → 4s → ... → 30s max)\n- **Rate limiting**: 3 failed auth attempts from the same IP triggers a 60s cooldown\n\n## Remote Agent Visibility\n\nRemote agents appear throughout the Symphony CLI and MCP tools:\n\n- `paradigm symphony list` shows remote agents with a `(remote: peer-name)` tag\n- `paradigm symphony status` includes peer count and remote agent totals\n- `paradigm_symphony_status` MCP tool returns a `peers` array in its response\n- Platform UI `GET /api/symphony/peers` endpoint returns connected peer data\n\nLocal MCP tools (`peek`, `poll`, `send`) work unchanged — they just read/write local files. The relay handles the network transport transparently.\n\n## Backward Compatibility\n\nIf `paradigm symphony serve` is never run, Symphony operates exactly as Phase 0: local-only file-based messaging. Networking is purely additive — no existing workflows change.",
1018
- "keyConcepts": [
1019
- "Hub-and-spoke topology: one serve (hub), multiple join --remote (spokes)",
1020
- "WebSocket relay watches local outboxes and delivers remote messages to local inboxes",
1021
- "Pairing: 6-digit code + HMAC-SHA256 challenge-response authentication",
1022
- "Codes rotate every 5 minutes; 3 failed auth attempts trigger 60s cooldown",
1023
- "Two modes: LAN pairing (interactive code) and internet direct connect (embedded code in connection string)",
1024
- "Peer trust stored in ~/.paradigm/score/peers.json (mode 0600)",
1025
- "Trust management: peers list/revoke/forget commands",
1026
- "Outbox watcher polls every 2s; dedup via bounded Set<string> (max 10,000)",
1027
- "Auto-reconnect with exponential backoff (1s → 30s max)",
1028
- "Remote agents visible in list/status CLI and MCP tools with peer provenance"
1029
- ],
1030
- "quiz": [
1031
- {
1032
- "id": "net-q1",
1033
- "question": "Developer A runs `paradigm symphony serve` and sees pairing code 472831. Developer B runs `paradigm symphony join --remote 192.168.1.42:3939`. What happens next?",
1034
- "choices": {
1035
- "A": "Developer B's agents automatically appear in A's inbox",
1036
- "B": "Developer B is prompted to enter the 6-digit pairing code from A's terminal",
1037
- "C": "The connection fails because Developer B didn't specify the --public flag",
1038
- "D": "Developer B must also run `paradigm symphony serve` to create a peer-to-peer link",
1039
- "E": "Developer B receives A's full agent list immediately without authentication"
1040
- },
1041
- "correct": "B",
1042
- "explanation": "When connecting via --remote without an embedded code (no # in the address), the user is prompted to enter the 6-digit pairing code displayed on the hub. The code is used to compute an HMAC proof for authentication. Only after successful verification does the agent list exchange occur."
1043
- },
1044
- {
1045
- "id": "net-q2",
1046
- "question": "A spoke machine loses its network connection to the hub. What happens?",
1047
- "choices": {
1048
- "A": "All local messages are deleted and the agent must re-pair from scratch",
1049
- "B": "The spoke immediately attempts to reconnect using a fixed 5-second interval",
1050
- "C": "The spoke schedules reconnect with exponential backoff (1s → 2s → 4s → ... → 30s max)",
1051
- "D": "The spoke shuts down and the user must manually restart `paradigm symphony join`",
1052
- "E": "The hub removes the spoke's peer record from peers.json"
1053
- },
1054
- "correct": "C",
1055
- "explanation": "The client-mode relay uses exponential backoff for auto-reconnect: starting at 1 second and doubling each attempt up to a maximum of 30 seconds. On successful reconnect, the delay resets to 1 second. Local messages are unaffected — they remain in the JSONL files."
1056
- },
1057
- {
1058
- "id": "net-q3",
1059
- "question": "An attacker tries to brute-force the pairing code. After entering 3 wrong codes from IP 10.0.0.5, what happens on the next attempt?",
1060
- "choices": {
1061
- "A": "The pairing code is rotated and a new code must be obtained from the hub",
1062
- "B": "The hub blocks all incoming connections permanently until restarted",
1063
- "C": "The connection is accepted but messages are quarantined for review",
1064
- "D": "The hub sends `auth_fail` with 'Too many failed attempts' and enforces a 60-second cooldown for that IP",
1065
- "E": "The hub disconnects but allows immediate retry with the correct code"
1066
- },
1067
- "correct": "D",
1068
- "explanation": "After 3 failed auth attempts from the same IP address, the relay enforces a 60-second cooldown. During this period, any connection from that IP receives an immediate auth_fail with the message 'Too many failed attempts — try again later' and is disconnected."
1069
- },
1070
- {
1071
- "id": "net-q4",
1072
- "question": "How does the relay prevent duplicate message delivery when two peers forward the same message?",
1073
- "choices": {
1074
- "A": "Messages include a TTL counter that decrements on each hop",
1075
- "B": "A bounded Set of seen message IDs (max 10,000) skips already-processed messages",
1076
- "C": "Each peer maintains a sequence number and only accepts higher-numbered messages",
1077
- "D": "The hub assigns unique relay IDs to prevent duplicates",
1078
- "E": "Messages are only forwarded once per minute, allowing time for dedup"
1079
- },
1080
- "correct": "B",
1081
- "explanation": "The relay maintains a bounded Set<string> of message IDs it has already processed. When a message arrives, if its ID is in the set, the relay sends an ack but skips delivery. When the set exceeds 10,000 entries, the oldest half is evicted. This prevents both echo (receiving your own message back) and multi-path duplicates."
1082
- },
1083
- {
1084
- "id": "net-q5",
1085
- "question": "Developer B wants to connect to Developer A's machine over the internet. Developer A runs `paradigm symphony serve --public`. What is the correct command for Developer B?",
1086
- "choices": {
1087
- "A": "`paradigm symphony join --remote devA.example.com` and enter the code when prompted",
1088
- "B": "`paradigm symphony join --remote 73.162.44.103:3939#847291` with the embedded code",
1089
- "C": "`paradigm symphony serve --connect 73.162.44.103:3939`",
1090
- "D": "`paradigm symphony peers add 73.162.44.103:3939 --code 847291`",
1091
- "E": "`paradigm symphony join --public 73.162.44.103`"
1092
- },
1093
- "correct": "B",
1094
- "explanation": "Internet direct connect uses the connection string format: address:port#code. The --public flag on the hub displays this connection string. The spoke parses the embedded code from after the # character and uses it automatically — no interactive prompt needed. Port 3939 must be reachable from the internet (port forward, VPN, or SSH tunnel)."
1095
- }
1096
- ]
1097
- },
1098
- {
1099
- "id": "review-compliance",
1100
- "title": "Automated Review Pipeline & Compliance Checking",
1101
- "content": "## paradigm review\n\nThe automated review pipeline uses a two-stage protocol:\n\n### Stage 1: Spec Compliance (always runs)\n\n- **Purpose coverage**: All touched symbols registered in .purpose files\n- **Portal gate compliance**: Routes declared in portal.yaml with gates\n- **Aspect anchors**: Anchor files still exist, no drift\n- **Broken references**: Parent symbols exist\n- **Route coverage**: New routes have portal.yaml entries\n\n### Stage 2: Code Quality (--deep only)\n\n- **Security**: eval() detection, hardcoded secrets\n- **Convention**: console.log usage (use Paradigm logger)\n- **Test coverage**: Gaps in test files\n\n### ReviewFinding Format\n\nEach finding has:\n- **type**: blocking (must fix), improvement (should fix), note (informational)\n- **category**: purpose-coverage, portal-compliance, aspect-anchors, security, convention\n- **message**: Human-readable description\n- **suggestion**: How to fix it\n\n### Usage\n\n```bash\nparadigm review # Staged changes\nparadigm review --pr 123 # PR via gh CLI\nparadigm review --ci # Exit 1 on blocking\nparadigm review --deep # Include code quality\nparadigm review --json # JSON output\n```\n\n## Dynamic Tool Loading\n\nTools are organized in three tiers:\n- **Core** (~15 tools): Always loaded (search, ripple, status, navigate, etc.)\n- **Feature**: Auto-detected from filesystem (lore → .paradigm/lore/, etc.)\n- **Advanced**: On-demand via `paradigm_tool_activate`\n\n## Response Format\n\n`response_format: 'concise'` on high-traffic tools strips secondary data:\n- paradigm_search: returns only { symbol, type }\n- paradigm_ripple: returns only { symbol, impact, summary }\n- paradigm_status: returns only { project, counts, total }\n\n## compliance-checker.ts\n\nShared logic extracted from pm.ts postflight. Both `paradigm_pm_postflight` and `paradigm review` use the same compliance checks.",
1102
- "quiz": [
1103
- {
1104
- "id": "Q-501-RC-001",
1105
- "question": "paradigm review --ci finds 2 blocking and 3 improvement findings. What's the exit code?",
1106
- "options": [
1107
- "Exit code 0 — improvements are non-blocking",
1108
- "Exit code 1 — any blocking findings cause non-zero exit in CI mode",
1109
- "Exit code 2 — one per blocking finding",
1110
- "Exit code 5 — total findings count"
1111
- ],
1112
- "correct": 1,
1113
- "explanation": "In CI mode, any blocking findings cause exit code 1. Improvements and notes do not affect the exit code."
1114
- },
1115
- {
1116
- "id": "Q-501-RC-002",
1117
- "question": "A project has no features: section in config.yaml. How many MCP tools are loaded?",
1118
- "options": [
1119
- "Only core tools (~15)",
1120
- "Core + explicitly enabled features",
1121
- "All of them — auto-detection is generous, defaulting to current behavior",
1122
- "None — features must be explicitly configured"
1123
- ],
1124
- "correct": 2,
1125
- "explanation": "No features config + generous auto-detection = all tools loaded, matching pre-4.0 behavior for backward compat."
1126
- },
1127
- {
1128
- "id": "Q-501-RC-003",
1129
- "question": "You call paradigm_search with response_format: 'concise'. What fields are returned?",
1130
- "options": [
1131
- "Full results with descriptions, paths, and fuzzy matches",
1132
- "Only { symbol, type } per result — descriptions and secondary data stripped",
1133
- "Only symbol names as a flat array",
1134
- "Compressed binary format"
1135
- ],
1136
- "correct": 1,
1137
- "explanation": "Concise mode strips results to { symbol, type } per entry and removes fuzzyMatched, fuzzyNote, suggestions, workspace data."
1138
- },
1139
- {
1140
- "id": "Q-501-RC-004",
1141
- "question": "An agent needs the graph generation tool but it's in the advanced tier. What does it do?",
1142
- "options": [
1143
- "Request an admin to enable it in config.yaml",
1144
- "Call paradigm_tool_activate with feature: 'graph'",
1145
- "Modify the agent's permissions to include graph tools",
1146
- "Restart the session with --enable-graph flag"
1147
- ],
1148
- "correct": 1,
1149
- "explanation": "paradigm_tool_activate enables advanced-tier modules for the current session. The tools become available immediately."
1150
- },
1151
- {
1152
- "id": "Q-501-RC-005",
1153
- "question": "What's the relationship between paradigm review Stage 1 and paradigm_pm_postflight?",
1154
- "options": [
1155
- "They are completely independent with different checks",
1156
- "paradigm review calls paradigm_pm_postflight internally",
1157
- "They share the same compliance logic extracted into compliance-checker.ts",
1158
- "paradigm_pm_postflight is deprecated in favor of paradigm review"
1159
- ],
1160
- "correct": 2,
1161
- "explanation": "Both use compliance-checker.ts for shared compliance logic — purpose coverage, portal gates, aspect anchors, and broken refs."
1162
- }
1163
- ]
1164
- }
1165
- ]
1166
- }