@a-company/paradigm 5.38.0 → 6.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (328) hide show
  1. package/dist/{accept-orchestration-OATWIRHP.js → accept-orchestration-QQISPINV.js} +1 -1
  2. package/dist/add-UOR4INIV.js +8 -0
  3. package/dist/{agent-loader-RIVI6QPP.js → agent-loader-2WJHD46U.js} +1 -1
  4. package/dist/{agent-loader-RJRVO5GQ.js → agent-loader-YKS2PQWO.js} +1 -1
  5. package/dist/{ambient-76YMUA5Q.js → ambient-BE3SQXNN.js} +1 -1
  6. package/dist/{ambient-WTLYUAQM.js → ambient-NVKQCW2A.js} +12 -12
  7. package/dist/{assess-UFPYEJKP.js → assess-63WXHWJV.js} +1 -1
  8. package/dist/{calibration-OLJYB5HN.js → calibration-BDHGYJOK.js} +1 -1
  9. package/dist/{chunk-5QOCKWK5.js → chunk-4PSD5R7N.js} +2 -2
  10. package/dist/{chunk-HOBHJPTL.js → chunk-6SKSV5B2.js} +1 -1
  11. package/dist/{chunk-4L7665QV.js → chunk-FEYOQMZ5.js} +1 -1
  12. package/dist/{chunk-NEJ4ZLCY.js → chunk-GAFKOFAV.js} +1 -1
  13. package/dist/chunk-GRZQIKST.js +2 -0
  14. package/dist/{chunk-RLCH7DXQ.js → chunk-K7X3Z3GL.js} +1 -1
  15. package/dist/{chunk-4VKSEOXZ.js → chunk-LPBCQM5Y.js} +3 -3
  16. package/dist/{chunk-74SGKSRQ.js → chunk-M2HKWR25.js} +1 -1
  17. package/dist/{chunk-BOYQAMGC.js → chunk-M3PPXJU4.js} +1 -1
  18. package/dist/chunk-PHEX6LU4.js +111 -0
  19. package/dist/chunk-Q527BPUF.js +2 -0
  20. package/dist/chunk-R5ECMBIV.js +11 -0
  21. package/dist/{chunk-X3U3IGYT.js → chunk-TBWWFRL5.js} +1 -1
  22. package/dist/{chunk-MQIG6SMF.js → chunk-TNVWGPCE.js} +1 -1
  23. package/dist/chunk-TZDYIPVU.js +521 -0
  24. package/dist/{chunk-3XGNXXCT.js → chunk-UZ5H7K6Q.js} +1 -1
  25. package/dist/chunk-VIG5LSGZ.js +2 -0
  26. package/dist/chunk-VNIX5KBT.js +3 -0
  27. package/dist/{chunk-AGFPVSX5.js → chunk-VXIIVMTM.js} +1 -1
  28. package/dist/{chunk-ORDKEGII.js → chunk-WESTEMIM.js} +1 -1
  29. package/dist/{chunk-DOCDDDTD.js → chunk-YNDPSWOE.js} +5 -5
  30. package/dist/chunk-Z5QW6USC.js +2 -0
  31. package/dist/{compliance-D7GD6ZYC.js → compliance-BNFWQPKM.js} +1 -1
  32. package/dist/config-schema-FLHRVZMI.js +2 -0
  33. package/dist/{context-audit-XRPT3OU2.js → context-audit-JVCA6GSV.js} +1 -1
  34. package/dist/{cursorrules-U5O4G5T4.js → cursorrules-ZXPXPZ3P.js} +1 -1
  35. package/dist/decision-loader-HELL2AMX.js +2 -0
  36. package/dist/{delete-P5VULXR4.js → delete-2C6ALLYY.js} +1 -1
  37. package/dist/{diff-YGHBIJY5.js → diff-MF55KQZH.js} +1 -1
  38. package/dist/{dist-KGRCLBJP-2QAPFYNF.js → dist-GQ42YS5N-4HIJZVBB.js} +10 -10
  39. package/dist/{docs-USDAF26F.js → docs-O37YLLRN.js} +1 -1
  40. package/dist/doctor-IG5XM4C4.js +2 -0
  41. package/dist/{edit-GUU3HBVW.js → edit-P3MDAZLU.js} +1 -1
  42. package/dist/{flow-FVZR3YJ4.js → flow-BGXOVE2V.js} +1 -1
  43. package/dist/index.js +6 -6
  44. package/dist/init-M44SO65G.js +2 -0
  45. package/dist/{init-XYB62Q3X.js → init-V4KSEKPK.js} +1 -1
  46. package/dist/{list-YKIQNKGB.js → list-2XIWUEMA.js} +1 -1
  47. package/dist/list-CFHINXIS.js +12 -0
  48. package/dist/lore-loader-D2ISOASW.js +2 -0
  49. package/dist/lore-loader-PXFKMKAN.js +2 -0
  50. package/dist/mcp.js +4 -4
  51. package/dist/metrics-UESGUHTA.js +2 -0
  52. package/dist/migrate-assessments-YSITX7KM.js +4 -0
  53. package/dist/migrate-decisions-NPLQOEEH.js +6 -0
  54. package/dist/migrate-plsat-EM2ACIQ3.js +6 -0
  55. package/dist/{nomination-engine-EALA5MGI.js → nomination-engine-QPZJH6XO.js} +1 -1
  56. package/dist/{notebook-loader-PXNRBBXD.js → notebook-loader-3J2OFMS3.js} +1 -1
  57. package/dist/{orchestrate-M5PBZBJQ.js → orchestrate-RID7HHHH.js} +1 -1
  58. package/dist/{platform-server-DNAMH4YI.js → platform-server-UD45NTGV.js} +1 -1
  59. package/dist/{portal-check-ZMLVBIGW.js → portal-check-DV2VSJ5E.js} +1 -1
  60. package/dist/portal-compliance-JONQ4SOP.js +2 -0
  61. package/dist/{probe-3FTG6LYO.js → probe-5HAXULAD.js} +1 -1
  62. package/dist/{providers-AWA7WLLM.js → providers-4PXMWA7V.js} +1 -1
  63. package/dist/quiz-WYIZJG5K.js +10 -0
  64. package/dist/{record-YXPB34MY.js → record-N3VNYYKJ.js} +1 -1
  65. package/dist/reindex-FWPD2VGM.js +2 -0
  66. package/dist/{retag-N5XF3KXP.js → retag-72R2OSZV.js} +1 -1
  67. package/dist/{review-77QI6VOC.js → review-2INNWLTW.js} +1 -1
  68. package/dist/{sentinel-HYAZ3CO5.js → sentinel-EFPEX246.js} +1 -1
  69. package/dist/{sentinel-bridge-VR357PKL.js → sentinel-bridge-UR2MKARY.js} +1 -1
  70. package/dist/{serve-U47GULB6.js → serve-MO35XIZE.js} +1 -1
  71. package/dist/serve-OQYUO7CR.js +12 -0
  72. package/dist/{server-4YNUIK4W.js → server-4D77LCST.js} +1 -1
  73. package/dist/server-FGUL2FWQ.js +7 -0
  74. package/dist/session-tracker-KGORN6B5.js +2 -0
  75. package/dist/{session-work-log-PAKXOFGL.js → session-work-log-4IEVE4KK.js} +1 -1
  76. package/dist/{session-work-log-ZP45TREI.js → session-work-log-EE4UIZ33.js} +1 -1
  77. package/dist/{setup-FEWSYS3Y.js → setup-ZSEC72BS.js} +1 -1
  78. package/dist/{shift-PC6C7NUX.js → shift-TVNY2CQF.js} +6 -6
  79. package/dist/{show-PJ5LFLIL.js → show-JH7LJ5MT.js} +1 -1
  80. package/dist/show-WVHAL4VU.js +7 -0
  81. package/dist/{spawn-M5BAV252.js → spawn-UH5RENSE.js} +1 -1
  82. package/dist/status-S7Z5FVIE.js +6 -0
  83. package/dist/{summary-PYTEIJ4U.js → summary-WLI3NF4G.js} +2 -2
  84. package/dist/{sweep-HU74OPVW.js → sweep-7TZFN5NS.js} +1 -1
  85. package/dist/sync-55U6QPIA.js +2 -0
  86. package/dist/{sync-llms-7CAI74QL.js → sync-llms-GF7DDQDI.js} +1 -1
  87. package/dist/{team-PDK64JXI.js → team-MGT66HZQ.js} +1 -1
  88. package/dist/{timeline-K3ZFKJ3R.js → timeline-RK7O2SCM.js} +1 -1
  89. package/dist/tools-QJHAVYI6.js +2 -0
  90. package/dist/university-content/notes/N-para-001-build-something.md +126 -0
  91. package/dist/university-content/notes/N-para-001-meet-the-team.md +85 -0
  92. package/dist/university-content/notes/N-para-001-shift-setup.md +74 -0
  93. package/dist/university-content/notes/N-para-101-component-types.md +99 -0
  94. package/dist/university-content/notes/N-para-101-first-steps.md +134 -0
  95. package/dist/university-content/notes/N-para-101-five-symbols.md +128 -0
  96. package/dist/university-content/notes/N-para-101-paradigm-logger.md +89 -0
  97. package/dist/university-content/notes/N-para-101-portal-yaml.md +112 -0
  98. package/dist/university-content/notes/N-para-101-project-structure.md +143 -0
  99. package/dist/university-content/notes/N-para-101-purpose-files.md +121 -0
  100. package/dist/university-content/notes/N-para-101-tags-and-classification.md +93 -0
  101. package/dist/university-content/notes/N-para-101-welcome.md +51 -0
  102. package/dist/university-content/notes/N-para-201-architecture-review.md +175 -0
  103. package/dist/university-content/notes/N-para-201-aspect-graph.md +79 -0
  104. package/dist/university-content/notes/N-para-201-aspects-and-anchors.md +112 -0
  105. package/dist/university-content/notes/N-para-201-component-patterns.md +138 -0
  106. package/dist/university-content/notes/N-para-201-cross-cutting-concerns.md +145 -0
  107. package/dist/university-content/notes/N-para-201-disciplines.md +187 -0
  108. package/dist/university-content/notes/N-para-201-flows-deep-dive.md +119 -0
  109. package/dist/university-content/notes/N-para-201-gates-deep-dive.md +165 -0
  110. package/dist/university-content/notes/N-para-201-portal-protocol.md +133 -0
  111. package/dist/university-content/notes/N-para-201-signal-patterns.md +159 -0
  112. package/dist/university-content/notes/N-para-201-symbol-naming.md +149 -0
  113. package/dist/university-content/notes/N-para-301-context-management.md +53 -0
  114. package/dist/university-content/notes/N-para-301-decisions.md +99 -0
  115. package/dist/university-content/notes/N-para-301-doctor-and-validation.md +70 -0
  116. package/dist/university-content/notes/N-para-301-enforcement-levels.md +102 -0
  117. package/dist/university-content/notes/N-para-301-fragility-tracking.md +50 -0
  118. package/dist/university-content/notes/N-para-301-history-system.md +42 -0
  119. package/dist/university-content/notes/N-para-301-navigation-system.md +55 -0
  120. package/dist/university-content/notes/N-para-301-operations-review.md +55 -0
  121. package/dist/university-content/notes/N-para-301-paradigm-shift.md +93 -0
  122. package/dist/university-content/notes/N-para-301-protocols.md +113 -0
  123. package/dist/university-content/notes/N-para-301-ripple-analysis.md +53 -0
  124. package/dist/university-content/notes/N-para-301-sentinel-observability.md +87 -0
  125. package/dist/university-content/notes/N-para-301-sync-and-maintenance.md +57 -0
  126. package/dist/university-content/notes/N-para-301-wisdom-system.md +89 -0
  127. package/dist/university-content/notes/N-para-401-agent-identity.md +99 -0
  128. package/dist/university-content/notes/N-para-401-agent-interop.md +87 -0
  129. package/dist/university-content/notes/N-para-401-agent-roles.md +107 -0
  130. package/dist/university-content/notes/N-para-401-commit-conventions.md +82 -0
  131. package/dist/university-content/notes/N-para-401-mastery-review.md +71 -0
  132. package/dist/university-content/notes/N-para-401-mcp-tools-overview.md +102 -0
  133. package/dist/university-content/notes/N-para-401-multi-agent-coordination.md +80 -0
  134. package/dist/university-content/notes/N-para-401-notebooks-permissions.md +66 -0
  135. package/dist/university-content/notes/N-para-401-orchestration-workflow.md +101 -0
  136. package/dist/university-content/notes/N-para-401-pm-governance.md +71 -0
  137. package/dist/university-content/notes/N-para-401-provider-cascade.md +75 -0
  138. package/dist/university-content/notes/N-para-401-quick-check.md +95 -0
  139. package/dist/university-content/notes/N-para-501-advanced-workflows.md +122 -0
  140. package/dist/university-content/notes/N-para-501-aspect-graph-advanced.md +195 -0
  141. package/dist/university-content/notes/N-para-501-aspect-graph-internals.md +97 -0
  142. package/dist/university-content/notes/N-para-501-assessment-loops.md +116 -0
  143. package/dist/university-content/notes/N-para-501-conductor-workspace.md +77 -0
  144. package/dist/university-content/notes/N-para-501-habits-practice.md +164 -0
  145. package/dist/university-content/notes/N-para-501-hook-enforcement.md +100 -0
  146. package/dist/university-content/notes/N-para-501-lore-system.md +155 -0
  147. package/dist/university-content/notes/N-para-501-platform-agent-ui.md +108 -0
  148. package/dist/university-content/notes/N-para-501-review-compliance.md +72 -0
  149. package/dist/university-content/notes/N-para-501-sentinel-deep-dive.md +173 -0
  150. package/dist/university-content/notes/N-para-501-session-intelligence.md +104 -0
  151. package/dist/university-content/notes/N-para-501-symphony-a-mail.md +120 -0
  152. package/dist/university-content/notes/N-para-501-symphony-networking.md +119 -0
  153. package/dist/university-content/notes/N-para-501-task-management.md +100 -0
  154. package/dist/university-content/notes/N-para-601-agent-renaissance.md +121 -0
  155. package/dist/university-content/notes/N-para-601-attention-scoring.md +129 -0
  156. package/dist/university-content/notes/N-para-601-context-composition.md +146 -0
  157. package/dist/university-content/notes/N-para-601-data-sovereignty.md +140 -0
  158. package/dist/university-content/notes/N-para-601-event-stream.md +126 -0
  159. package/dist/university-content/notes/N-para-601-knowledge-streams.md +144 -0
  160. package/dist/university-content/notes/N-para-601-learning-loop.md +68 -0
  161. package/dist/university-content/notes/N-para-601-maestro-team-collab.md +136 -0
  162. package/dist/university-content/notes/N-para-601-nominations-debates.md +115 -0
  163. package/dist/university-content/notes/N-para-701-agent-notebooks.md +131 -0
  164. package/dist/university-content/notes/N-para-701-agent-pods-nevrland.md +182 -0
  165. package/dist/university-content/notes/N-para-701-agent-profiles.md +197 -0
  166. package/dist/university-content/notes/N-para-701-agent-roster.md +82 -0
  167. package/dist/university-content/notes/N-para-701-agent-state.md +180 -0
  168. package/dist/university-content/notes/N-para-701-learning-feedback-loop.md +188 -0
  169. package/dist/university-content/notes/N-para-701-model-tier-resolution.md +204 -0
  170. package/dist/university-content/notes/N-para-701-orchestration-enforcement.md +169 -0
  171. package/dist/university-content/notes/N-para-701-per-project-rosters.md +198 -0
  172. package/dist/university-content/notes/N-para-701-symphony-visibility.md +142 -0
  173. package/dist/university-content/paths/LP-para-001.yaml +29 -0
  174. package/dist/university-content/paths/LP-para-101.yaml +59 -0
  175. package/dist/university-content/paths/LP-para-201.yaml +69 -0
  176. package/dist/university-content/paths/LP-para-301.yaml +84 -0
  177. package/dist/university-content/paths/LP-para-401.yaml +74 -0
  178. package/dist/university-content/paths/LP-para-501.yaml +89 -0
  179. package/dist/university-content/paths/LP-para-601.yaml +59 -0
  180. package/dist/university-content/paths/LP-para-701.yaml +64 -0
  181. package/dist/university-content/quizzes/Q-para-001-build-something.yaml +46 -0
  182. package/dist/university-content/quizzes/Q-para-001-meet-the-team.yaml +46 -0
  183. package/dist/university-content/quizzes/Q-para-001-shift-setup.yaml +46 -0
  184. package/dist/university-content/quizzes/Q-para-101-component-types.yaml +46 -0
  185. package/dist/university-content/quizzes/Q-para-101-first-steps.yaml +56 -0
  186. package/dist/university-content/quizzes/Q-para-101-five-symbols.yaml +66 -0
  187. package/dist/university-content/quizzes/Q-para-101-paradigm-logger.yaml +56 -0
  188. package/dist/university-content/quizzes/Q-para-101-portal-yaml.yaml +56 -0
  189. package/dist/university-content/quizzes/Q-para-101-project-structure.yaml +66 -0
  190. package/dist/university-content/quizzes/Q-para-101-purpose-files.yaml +56 -0
  191. package/dist/university-content/quizzes/Q-para-101-tags-and-classification.yaml +56 -0
  192. package/dist/university-content/quizzes/Q-para-101-welcome.yaml +56 -0
  193. package/dist/university-content/quizzes/Q-para-201-architecture-review.yaml +66 -0
  194. package/dist/university-content/quizzes/Q-para-201-aspect-graph.yaml +46 -0
  195. package/dist/university-content/quizzes/Q-para-201-aspects-and-anchors.yaml +56 -0
  196. package/dist/university-content/quizzes/Q-para-201-component-patterns.yaml +56 -0
  197. package/dist/university-content/quizzes/Q-para-201-cross-cutting-concerns.yaml +56 -0
  198. package/dist/university-content/quizzes/Q-para-201-disciplines.yaml +66 -0
  199. package/dist/university-content/quizzes/Q-para-201-flows-deep-dive.yaml +66 -0
  200. package/dist/university-content/quizzes/Q-para-201-gates-deep-dive.yaml +66 -0
  201. package/dist/university-content/quizzes/Q-para-201-portal-protocol.yaml +56 -0
  202. package/dist/university-content/quizzes/Q-para-201-signal-patterns.yaml +56 -0
  203. package/dist/university-content/quizzes/Q-para-201-symbol-naming.yaml +66 -0
  204. package/dist/university-content/quizzes/Q-para-301-context-management.yaml +56 -0
  205. package/dist/university-content/quizzes/Q-para-301-decisions.yaml +76 -0
  206. package/dist/university-content/quizzes/Q-para-301-doctor-and-validation.yaml +66 -0
  207. package/dist/university-content/quizzes/Q-para-301-enforcement-levels.yaml +46 -0
  208. package/dist/university-content/quizzes/Q-para-301-fragility-tracking.yaml +46 -0
  209. package/dist/university-content/quizzes/Q-para-301-history-system.yaml +56 -0
  210. package/dist/university-content/quizzes/Q-para-301-navigation-system.yaml +56 -0
  211. package/dist/university-content/quizzes/Q-para-301-operations-review.yaml +66 -0
  212. package/dist/university-content/quizzes/Q-para-301-paradigm-shift.yaml +46 -0
  213. package/dist/university-content/quizzes/Q-para-301-protocols.yaml +56 -0
  214. package/dist/university-content/quizzes/Q-para-301-ripple-analysis.yaml +56 -0
  215. package/dist/university-content/quizzes/Q-para-301-sentinel-observability.yaml +46 -0
  216. package/dist/university-content/quizzes/Q-para-301-sync-and-maintenance.yaml +46 -0
  217. package/dist/university-content/quizzes/Q-para-301-wisdom-system.yaml +56 -0
  218. package/dist/university-content/quizzes/Q-para-401-agent-identity.yaml +66 -0
  219. package/dist/university-content/quizzes/Q-para-401-agent-interop.yaml +46 -0
  220. package/dist/university-content/quizzes/Q-para-401-agent-roles.yaml +56 -0
  221. package/dist/university-content/quizzes/Q-para-401-commit-conventions.yaml +56 -0
  222. package/dist/university-content/quizzes/Q-para-401-mastery-review.yaml +66 -0
  223. package/dist/university-content/quizzes/Q-para-401-mcp-tools-overview.yaml +66 -0
  224. package/dist/university-content/quizzes/Q-para-401-multi-agent-coordination.yaml +76 -0
  225. package/dist/university-content/quizzes/Q-para-401-notebooks-permissions.yaml +61 -0
  226. package/dist/university-content/quizzes/Q-para-401-orchestration-workflow.yaml +66 -0
  227. package/dist/university-content/quizzes/Q-para-401-pm-governance.yaml +66 -0
  228. package/dist/university-content/quizzes/Q-para-401-provider-cascade.yaml +56 -0
  229. package/dist/university-content/quizzes/Q-para-401-quick-check.yaml +46 -0
  230. package/dist/university-content/quizzes/Q-para-501-advanced-workflows.yaml +66 -0
  231. package/dist/university-content/quizzes/Q-para-501-aspect-graph-advanced.yaml +66 -0
  232. package/dist/university-content/quizzes/Q-para-501-aspect-graph-internals.yaml +66 -0
  233. package/dist/university-content/quizzes/Q-para-501-assessment-loops.yaml +46 -0
  234. package/dist/university-content/quizzes/Q-para-501-conductor-workspace.yaml +46 -0
  235. package/dist/university-content/quizzes/Q-para-501-habits-practice.yaml +56 -0
  236. package/dist/university-content/quizzes/Q-para-501-hook-enforcement.yaml +66 -0
  237. package/dist/university-content/quizzes/Q-para-501-lore-system.yaml +66 -0
  238. package/dist/university-content/quizzes/Q-para-501-platform-agent-ui.yaml +66 -0
  239. package/dist/university-content/quizzes/Q-para-501-review-compliance.yaml +61 -0
  240. package/dist/university-content/quizzes/Q-para-501-sentinel-deep-dive.yaml +86 -0
  241. package/dist/university-content/quizzes/Q-para-501-session-intelligence.yaml +66 -0
  242. package/dist/university-content/quizzes/Q-para-501-symphony-a-mail.yaml +66 -0
  243. package/dist/university-content/quizzes/Q-para-501-symphony-networking.yaml +66 -0
  244. package/dist/university-content/quizzes/Q-para-501-task-management.yaml +46 -0
  245. package/dist/university-content/quizzes/Q-para-601-agent-renaissance.yaml +66 -0
  246. package/dist/university-content/quizzes/Q-para-601-attention-scoring.yaml +56 -0
  247. package/dist/university-content/quizzes/Q-para-601-context-composition.yaml +66 -0
  248. package/dist/university-content/quizzes/Q-para-601-data-sovereignty.yaml +56 -0
  249. package/dist/university-content/quizzes/Q-para-601-event-stream.yaml +66 -0
  250. package/dist/university-content/quizzes/Q-para-601-knowledge-streams.yaml +66 -0
  251. package/dist/university-content/quizzes/Q-para-601-learning-loop.yaml +56 -0
  252. package/dist/university-content/quizzes/Q-para-601-maestro-team-collab.yaml +86 -0
  253. package/dist/university-content/quizzes/Q-para-601-nominations-debates.yaml +66 -0
  254. package/dist/university-content/quizzes/Q-para-701-agent-notebooks.yaml +66 -0
  255. package/dist/university-content/quizzes/Q-para-701-agent-pods-nevrland.yaml +66 -0
  256. package/dist/university-content/quizzes/Q-para-701-agent-profiles.yaml +66 -0
  257. package/dist/university-content/quizzes/Q-para-701-agent-roster.yaml +66 -0
  258. package/dist/university-content/quizzes/Q-para-701-agent-state.yaml +66 -0
  259. package/dist/university-content/quizzes/Q-para-701-learning-feedback-loop.yaml +66 -0
  260. package/dist/university-content/quizzes/Q-para-701-model-tier-resolution.yaml +66 -0
  261. package/dist/university-content/quizzes/Q-para-701-orchestration-enforcement.yaml +66 -0
  262. package/dist/university-content/quizzes/Q-para-701-per-project-rosters.yaml +66 -0
  263. package/dist/university-content/quizzes/Q-para-701-symphony-visibility.yaml +66 -0
  264. package/dist/university-content/quizzes/Q-plsat-v2.yaml +904 -0
  265. package/dist/university-content/quizzes/Q-plsat-v3.yaml +2909 -0
  266. package/dist/university-content/reference.json +2 -2
  267. package/dist/university-ui/assets/{index-CecQrfSn.js → index-nNgzO1il.js} +2 -2
  268. package/dist/university-ui/assets/{index-CecQrfSn.js.map → index-nNgzO1il.js.map} +1 -1
  269. package/dist/university-ui/index.html +1 -1
  270. package/dist/{upgrade-GX56QE3C.js → upgrade-NKN63VTY.js} +2 -2
  271. package/dist/validate-XUQZTF3H.js +9 -0
  272. package/dist/{watch-YCODNIET.js → watch-25GJHQYT.js} +1 -1
  273. package/lore-ui/dist/assets/{index-Bk-K0qgN.js → index-DKhNxgtW.js} +10 -10
  274. package/lore-ui/dist/index.html +1 -1
  275. package/package.json +2 -2
  276. package/platform-ui/dist/assets/{AmbientSection-BYjt75R1.js → AmbientSection-CwatqcBD.js} +1 -1
  277. package/platform-ui/dist/assets/{CanvasSection-rKvA_vZj.js → CanvasSection-dFAthehN.js} +1 -1
  278. package/platform-ui/dist/assets/{DocsSection-CI9K73M-.js → DocsSection-BZ2SFJBZ.js} +1 -1
  279. package/platform-ui/dist/assets/{GitSection-DSGj_c6S.js → GitSection-MNNYU1tO.js} +1 -1
  280. package/platform-ui/dist/assets/{GraphSection-CawN7pC5.js → GraphSection-COYjb4Pt.js} +1 -1
  281. package/platform-ui/dist/assets/LoreSection-B0hUbfsJ.js +1 -0
  282. package/platform-ui/dist/assets/{SentinelSection-DNgoYMH0.js → SentinelSection-BCxW1DCp.js} +1 -1
  283. package/platform-ui/dist/assets/{SymphonySection-C0zfcqv3.js → SymphonySection-BsucZRqy.js} +1 -1
  284. package/platform-ui/dist/assets/{TeamSection-Bzd3Dt9Q.js → TeamSection-C0QNTudW.js} +1 -1
  285. package/platform-ui/dist/assets/{UniversitySection-tBr62R0S.js → UniversitySection-DN1-g9pw.js} +1 -1
  286. package/platform-ui/dist/assets/{index-BaOmyn11.js → index-DwUT8pju.js} +2 -2
  287. package/platform-ui/dist/index.html +1 -1
  288. package/dist/add-P76GEMGF.js +0 -8
  289. package/dist/chunk-JQKKVAAN.js +0 -2
  290. package/dist/chunk-NQ47TA6C.js +0 -111
  291. package/dist/chunk-ODVKPZZ4.js +0 -2
  292. package/dist/chunk-Q2J542ST.js +0 -2
  293. package/dist/chunk-RBLK34IA.js +0 -11
  294. package/dist/chunk-RN4VE6P3.js +0 -521
  295. package/dist/chunk-WS2N27RX.js +0 -3
  296. package/dist/config-schema-GUQY2QN7.js +0 -2
  297. package/dist/decision-loader-2XPZE4EZ.js +0 -2
  298. package/dist/doctor-WMVULMQD.js +0 -2
  299. package/dist/list-5IUGP3ZB.js +0 -7
  300. package/dist/lore-loader-RVQI5GXL.js +0 -2
  301. package/dist/lore-loader-XY5MZRR2.js +0 -2
  302. package/dist/migrate-assessments-GEI5WMI2.js +0 -4
  303. package/dist/portal-compliance-6YR27IQU.js +0 -2
  304. package/dist/quiz-FE5UGAY2.js +0 -10
  305. package/dist/reindex-I6LPAKCC.js +0 -2
  306. package/dist/serve-OY6XYL7F.js +0 -12
  307. package/dist/server-2MNROHF6.js +0 -7
  308. package/dist/session-tracker-MWJAJA6Z.js +0 -2
  309. package/dist/show-BOAVWZPZ.js +0 -7
  310. package/dist/status-A37ECYNJ.js +0 -6
  311. package/dist/sync-DLUBV5HQ.js +0 -2
  312. package/dist/tools-5ITPEPSV.js +0 -2
  313. package/dist/university-content/courses/.purpose +0 -492
  314. package/dist/university-content/courses/para-001.json +0 -166
  315. package/dist/university-content/courses/para-101.json +0 -615
  316. package/dist/university-content/courses/para-201.json +0 -794
  317. package/dist/university-content/courses/para-301.json +0 -830
  318. package/dist/university-content/courses/para-401.json +0 -868
  319. package/dist/university-content/courses/para-501.json +0 -1166
  320. package/dist/university-content/courses/para-601.json +0 -719
  321. package/dist/university-content/courses/para-701.json +0 -807
  322. package/dist/university-content/plsat/.purpose +0 -162
  323. package/dist/university-content/plsat/v2.0.json +0 -760
  324. package/dist/university-content/plsat/v3.0.json +0 -3453
  325. package/dist/validate-C6SMKGYD.js +0 -9
  326. package/platform-ui/dist/assets/LoreSection-oO5dCe6O.js +0 -1
  327. /package/dist/{chunk-BV5PRPLB.js → chunk-IZSBGW6E.js} +0 -0
  328. /package/templates/paradigm/specs/{scan.md → probe.md} +0 -0
@@ -0,0 +1,2909 @@
1
+ id: Q-plsat-v3
2
+ title: The PLSAT — Paradigm Licensure Standardized Assessment Test
3
+ description: 99 questions. 90 minutes. 90% to pass. Good luck, scholar.
4
+ author: paradigm
5
+ created: '2026-04-22'
6
+ updated: '2026-04-22'
7
+ tags:
8
+ - plsat
9
+ - certification
10
+ symbols: []
11
+ difficulty: advanced
12
+ passThreshold: 0.9
13
+ timeLimit: 5400
14
+ totalSlots: 128
15
+ exam:
16
+ kind: proctored
17
+ category: paradigm-core
18
+ origin: imported
19
+ source: plsat/v3.0.json
20
+ questions:
21
+ - id: plsat-001
22
+ scenario: You've just joined a team that uses Paradigm. You open the project and see directories like `.paradigm/`, several `.purpose` files, and a `portal.yaml` at the root. A colleague asks you to document a new utility function they wrote in `src/lib/format-currency.ts`.
23
+ question: Which symbol prefix should you use to document this utility?
24
+ choices:
25
+ A: '`$format-currency` — because it describes a process (formatting)'
26
+ B: '`!format-currency` — because it signals a transformation event'
27
+ C: '`#format-currency` — because it is a documented code unit'
28
+ D: '`~format-currency` — because it applies a rule (formatting rules)'
29
+ E: '`^format-currency` — because it gates what format is allowed'
30
+ correct: C
31
+ explanation: In Paradigm, every documented code unit uses the `#` (Component) symbol. There are only 5 operational symbols, and `#` is the universal prefix for any code unit — utilities, services, handlers, components, hooks, you name it. `$` is for multi-step flows, `!` for signals/events, `~` for aspects with code anchors, and `^` for condition gates. A simple utility function is a component.
32
+ slot: slot-001
33
+ section: para-101
34
+ variants:
35
+ - id: plsat-001b
36
+ scenario: A teammate wrote a helper function in `src/utils/validate-email.ts` that checks email format. They want to document it in Paradigm but aren't sure which symbol to use.
37
+ question: Which symbol prefix is correct for documenting this utility?
38
+ choices:
39
+ A: '`$validate-email` — it describes a validation process'
40
+ B: '`!validate-email` — it signals whether an email is valid'
41
+ C: '`#validate-email` — it is a documented code unit'
42
+ D: '`~validate-email` — it enforces a validation rule'
43
+ E: '`^validate-email` — it gates what emails are accepted'
44
+ correct: C
45
+ explanation: 'Every documented code unit in Paradigm uses the `#` (Component) symbol. A utility function is a code unit — it gets `#`. The other symbols have specific meanings: `$` for multi-step flows, `!` for signals/events, `~` for aspects with code anchors, and `^` for condition gates.'
46
+ - id: plsat-002
47
+ scenario: A component manages user authentication state — tracking the current user, login status, and session tokens. You need to document it in a `.purpose` file.
48
+ question: How should this component be documented in Paradigm?
49
+ choices:
50
+ A: '`#auth-state` with no tags — the component name is descriptive enough'
51
+ B: '`#auth-state` with `tags: [state]` — the `[state]` tag classifies its role'
52
+ C: '`!auth-state` — authentication state changes should be modeled as signals'
53
+ D: '`$auth-state` — state management is a multi-step flow'
54
+ E: '`~auth-state` — authentication state is a cross-cutting concern requiring an aspect'
55
+ correct: B
56
+ explanation: State management components use the `#` (Component) symbol with a `[state]` tag from the tag bank. Paradigm uses only 5 operational symbols for structure, and a tag bank for classification. The `[state]` tag tells humans and AI agents that this component's primary role is managing state, while `#` identifies it as a documented code unit. Signals (`!`) are for events, flows (`$`) are for multi-step processes, and aspects (`~`) are for enforced rules with code anchors.
57
+ slot: slot-002
58
+ section: para-101
59
+ - id: plsat-003
60
+ scenario: Your team is debating logging practices. One developer argues that using `console.log` everywhere is fine because logs are just for debugging. Another insists on using structured, symbol-aware logging with Paradigm's logger (e.g., `log.component('#checkout-service').info('Processing payment', { amount })`).
61
+ question: What is the STRONGEST argument for using Paradigm's structured logger over raw `console.log`?
62
+ choices:
63
+ A: Structured logging is faster at runtime than `console.log`
64
+ B: Paradigm's logger automatically fixes bugs when it detects errors in the logs
65
+ C: Symbol-aware logging connects runtime behavior to the documented architecture, making it possible to trace issues back to specific components, flows, and gates
66
+ D: Using `console.log` will cause Paradigm's CI checks to fail
67
+ E: Structured logging is required by law for production applications
68
+ correct: C
69
+ explanation: 'The core value of Paradigm''s structured logger is traceability: every log line is tagged with a symbol (`#component`, `^gate`, `!signal`, etc.), which means you can correlate runtime behavior with the architectural documentation. When something goes wrong, you can trace from the log back to the component in the `.purpose` file, understand its flows, check its gates, and review its history. Raw `console.log` loses this connection. Performance (A) is not the primary benefit. Paradigm doesn''t auto-fix bugs (B) or enforce logging by law (E). CI checks (D) depend on team configuration, not a universal rule.'
70
+ slot: slot-003
71
+ section: para-101
72
+ variants:
73
+ - id: plsat-003b
74
+ scenario: During a code review, you notice a developer replaced all `log.component('#user-service').info(...)` calls with plain `console.log(...)` to 'simplify things.' The PR has 15 files changed.
75
+ question: What is the MOST important reason to reject this change?
76
+ choices:
77
+ A: '`console.log` is slower than Paradigm''s structured logger'
78
+ B: Paradigm's logger automatically prevents bugs in production
79
+ C: Removing symbol-aware logging breaks the connection between runtime behavior and documented architecture, making it impossible to trace issues to specific components
80
+ D: The CI pipeline requires Paradigm logger calls to pass
81
+ E: It violates JavaScript best practices to use `console.log`
82
+ correct: C
83
+ explanation: The fundamental value of Paradigm's structured logger is traceability. Each log line tagged with a symbol (#, ^, !, etc.) creates a bridge between runtime behavior and the architectural documentation. Removing this breaks the ability to trace from a log entry back to a component, its flows, gates, and history in .purpose files.
84
+ - id: plsat-004
85
+ scenario: You're setting up a brand new project with Paradigm. You run `paradigm shift` and it creates the `.paradigm/` directory structure. Your project will have a REST API with several endpoints that require condition checks — such as authentication, feature flags, and rate limiting.
86
+ question: Which file MUST you create at the project root?
87
+ choices:
88
+ A: '`auth.yaml` — Paradigm''s dedicated authentication config'
89
+ B: '`gates.yaml` — where all `^gate` definitions live'
90
+ C: '`portal.yaml` — where gates and protected routes are defined'
91
+ D: '`.paradigm/security.yaml` — security config goes in the paradigm directory'
92
+ E: No file needed — gates are defined inline in `.purpose` files only
93
+ correct: C
94
+ explanation: '`portal.yaml` is REQUIRED at the project root whenever your project has protected routes. It defines gates (`^` symbols) with their check expressions, and maps routes to the gates that protect them. Gates can represent any condition checkpoint — authentication, authorization, feature flags, rate limits, or custom business rules. Gates can also appear in `.purpose` files for documentation, but `portal.yaml` is the authoritative source for route protection.'
95
+ slot: slot-004
96
+ section: para-101
97
+ - id: plsat-006
98
+ scenario: |-
99
+ Your team uses the following directory structure:
100
+
101
+ ```
102
+ src/
103
+ middleware/auth.ts
104
+ events/payment-events.ts
105
+ services/billing.ts
106
+ flows/onboarding.ts
107
+ aspects/rate-limiter.ts
108
+ ```
109
+
110
+ A new developer asks which Paradigm logger method to use in each file. Your team follows the conventional directory-to-symbol mapping as a guideline.
111
+ question: Which file-to-logger mapping is INCORRECT?
112
+ choices:
113
+ A: '`middleware/auth.ts` → `log.gate(''^auth-check'')`'
114
+ B: '`events/payment-events.ts` → `log.signal(''!payment-received'')`'
115
+ C: '`services/billing.ts` → `log.component(''#billing-service'')`'
116
+ D: '`flows/onboarding.ts` → `log.signal(''!onboarding-started'')`'
117
+ E: '`aspects/rate-limiter.ts` → `log.aspect(''~rate-limited'')`'
118
+ correct: D
119
+ explanation: 'Files in the `flows/` directory conventionally correspond to `$` (Flow) symbols and should use `log.flow()`, not `log.signal()`. The correct call would be `log.flow(''$onboarding'').info(...)`. The conventional directory-to-symbol mapping is: middleware/auth/guards → `^` (gate), events/handlers/listeners → `!` (signal), services/lib/components → `#` (component), flows/sagas/workflows → `$` (flow), aspects/rules → `~` (aspect). While teams may adapt these conventions to their needs, this mapping provides a consistent default that helps developers and AI agents reason about the codebase.'
120
+ slot: slot-006
121
+ section: para-101
122
+ variants:
123
+ - id: plsat-006b
124
+ scenario: Your team follows directory-to-symbol conventions. A new file `src/guards/feature-flags.ts` is created to check whether features are enabled before allowing access. A developer uses `log.component('#feature-flags')` in the file.
125
+ question: Is this logger usage correct according to conventional directory mapping?
126
+ choices:
127
+ A: Yes — all code units use `log.component()`
128
+ B: No — `guards/` maps to `^` (gate), so it should be `log.gate('^feature-flags')`
129
+ C: No — feature flags are signals, so it should be `log.signal('!feature-flags')`
130
+ D: No — feature flags are aspects, so it should be `log.aspect('~feature-flags')`
131
+ E: Yes — but the symbol should be `#feature-flags-guard` for clarity
132
+ correct: B
133
+ explanation: The `guards/` directory conventionally maps to `^` (gate) symbols and should use `log.gate()`. Guards, middleware, and auth directories all map to gates because they represent condition checkpoints. Feature flags check conditions before allowing access — that's a gate. The correct call is `log.gate('^feature-flags').info(...)`.
134
+ - id: plsat-007
135
+ scenario: Your team is prototyping a new feature — an experimental search widget that may or may not ship. You've built the component and want to document it in a `.purpose` file. You also want to make it clear to other developers and AI agents that this is an idea in progress, not a committed part of the product.
136
+ question: How should you classify this component to indicate it is experimental?
137
+ choices:
138
+ A: Use a special `?experimental-widget` symbol prefix reserved for ideas
139
+ B: 'Add `status: experimental` to the component definition'
140
+ C: 'Add the `[idea]` tag to the component: `tags: [idea]`'
141
+ D: Create an aspect `~experimental` and apply it to the component
142
+ E: Comment it out in the `.purpose` file with `# EXPERIMENTAL`
143
+ correct: C
144
+ explanation: 'Paradigm''s tag bank includes the `[idea]` tag for exactly this purpose. You''d define the component as `#experimental-widget` with `tags: [idea]`. Tags are the classification layer in Paradigm: the 5 operational symbols (`#`, `$`, `^`, `!`, `~`) provide structure, while tags from the tag bank provide classification metadata like `[idea]`, `[feature]`, `[state]`, `[integration]`, `[critical]`, etc. Choice B (status field) is plausible but `status` is typically for lifecycle states like `active` or `deprecated`, not for ideation. There is no `?` symbol prefix in Paradigm (A). Commenting it out (E) removes it from the symbol graph entirely.'
145
+ slot: slot-007
146
+ section: para-101
147
+ - id: plsat-008
148
+ scenario: 'A `.paradigm/tags.yaml` file has three sections: `core`, `project`, and `suggested`. The `core` section contains tags like `[feature]`, `[integration]`, `[state]`, `[critical]`, and `[security]`.'
149
+ question: What is the `suggested` section for?
150
+ choices:
151
+ A: Tags that Paradigm automatically generates based on code analysis
152
+ B: Tags proposed by AI agents awaiting human approval before use
153
+ C: Tags from the Paradigm community marketplace that can be installed
154
+ D: Tags that are deprecated and will be removed in the next version
155
+ E: Tags that the framework suggests but are optional to implement
156
+ correct: B
157
+ explanation: 'The `suggested` section in `tags.yaml` holds tags proposed by AI agents (via `paradigm_tags_suggest`) that haven''t been approved by a human yet. This is part of Paradigm''s governance model: AI can propose new classifications, but a human must promote them to `project` or `core` before they become official. This prevents tag sprawl and keeps the taxonomy intentional. The `core` tags ship with Paradigm, `project` tags are team-defined, and `suggested` are AI proposals awaiting review.'
158
+ slot: slot-008
159
+ section: para-101
160
+ - id: plsat-009
161
+ scenario: |-
162
+ You're reading a project's `.paradigm/config.yaml` and see:
163
+
164
+ ```yaml
165
+ discipline: fullstack
166
+ conventions:
167
+ naming: kebab-case
168
+ components: PascalCase
169
+ ```
170
+ question: Based on these conventions, which symbol ID is correctly formatted?
171
+ choices:
172
+ A: '`#paymentService` — camelCase for services'
173
+ B: '`#payment-service` — kebab-case for IDs, PascalCase for class-like references'
174
+ C: '`#Payment_Service` — PascalCase with underscores'
175
+ D: '`#PAYMENT_SERVICE` — SCREAMING_SNAKE for services'
176
+ E: '`#payment.service` — dot notation for namespaced components'
177
+ correct: B
178
+ explanation: 'Paradigm conventions specify kebab-case for all symbol IDs. The `components: PascalCase` convention means that when referring to class-like components in prose or code, you use PascalCase (e.g., `#PaymentService`), but the canonical ID in `.purpose` files is kebab-case (`#payment-service`). This dual convention is documented in the CLAUDE.md: ''Use kebab-case for all symbol IDs'' and ''Use PascalCase for class-like components''.'
179
+ slot: slot-009
180
+ section: para-101
181
+ - id: plsat-011
182
+ scenario: 'You''re designing an e-commerce checkout process that involves: (1) validating the cart, (2) checking inventory, (3) processing payment via Stripe, (4) creating the order record, and (5) sending a confirmation email. This spans 5 different components across 3 directories.'
183
+ question: How should this be documented in Paradigm?
184
+ choices:
185
+ A: As a single `#checkout` component with sub-steps in its description
186
+ B: As a `$checkout-flow` with ordered steps referencing each component
187
+ C: As five separate `!` signals chained together
188
+ D: As a `~checkout-required` aspect applied to all five components
189
+ E: As five `^` gates that must be passed sequentially
190
+ correct: B
191
+ explanation: 'A multi-step process spanning 3+ components is the textbook definition of a Flow (`$`). You''d define `$checkout-flow` with ordered steps, each referencing the responsible component and action: `#cart-validator` validates, `#inventory-checker` checks stock, `#stripe-service` processes payment, `#order-service` creates the record, `#email-sender` confirms. Flows document the sequence and make it visible to `paradigm_flows_affected` for impact analysis. Signals (`!`) may be emitted during the flow, but they don''t define the sequence.'
192
+ slot: slot-011
193
+ section: para-201
194
+ variants:
195
+ - id: plsat-011b
196
+ scenario: 'Your team is building a user registration process: (1) validate form input, (2) check if email is taken, (3) hash the password, (4) create the user record, (5) send a welcome email, (6) trigger analytics. This spans components across `validators/`, `services/`, and `integrations/`.'
197
+ question: What is the correct Paradigm symbol for documenting this process?
198
+ choices:
199
+ A: '`#user-registration` — it''s a single feature component'
200
+ B: '`$user-registration` — it''s a multi-step flow spanning multiple components'
201
+ C: '`!user-registered` — it''s an event that triggers side effects'
202
+ D: '`~registration-required` — it''s a cross-cutting concern'
203
+ E: '`^registration-valid` — it''s a validation gate'
204
+ correct: B
205
+ explanation: A multi-step process spanning 3+ components is a Flow (`$`). You'd define `$user-registration` with ordered steps referencing each responsible component. Flows make the sequence visible for impact analysis via `paradigm_flows_affected`. While `!user-registered` might be emitted at the END of the flow, the process itself is the flow.
206
+ - id: plsat-pg1-q1a
207
+ scenario: ''
208
+ question: A new endpoint `DELETE /api/projects/:id` needs to be added. Only project admins should be able to delete projects. What is the correct route entry in portal.yaml?
209
+ choices:
210
+ A: '`"DELETE /api/projects/:id": [^authenticated, ^project-admin]`'
211
+ B: '`"DELETE /api/projects/:id": [^project-admin]`'
212
+ C: '`"DELETE /api/projects/:id": [^authenticated]`'
213
+ D: '`"DELETE /api/projects/:id": [^project-admin, ^authenticated]`'
214
+ E: '`"DELETE /api/projects/:id": [^admin]`'
215
+ correct: B
216
+ explanation: 'Since `^project-admin` already has `requires: ["^authenticated"]`, listing `^authenticated` in the route is redundant. The gate dependency chain means `^project-admin` will automatically enforce `^authenticated` first. The route only needs to specify `[^project-admin]`.'
217
+ slot: pg-portal-q1
218
+ section: para-201
219
+ passageId: passage-portal-review
220
+ passage: |-
221
+ Your team's portal.yaml for a project management app:
222
+
223
+ ```yaml
224
+ version: "1.0"
225
+ gates:
226
+ ^authenticated:
227
+ description: User must be logged in
228
+ check: req.user != null
229
+ ^project-member:
230
+ description: User must be a member of the project
231
+ check: project.members.includes(req.user.id)
232
+ requires: ["^authenticated"]
233
+ ^project-admin:
234
+ description: User must be an admin of the project
235
+ check: project.admins.includes(req.user.id)
236
+ requires: ["^authenticated"]
237
+ ^comment-author:
238
+ description: User must be the comment author
239
+ check: comment.authorId === req.user.id
240
+
241
+ routes:
242
+ "GET /api/projects": [^authenticated]
243
+ "POST /api/projects": [^authenticated]
244
+ "GET /api/projects/:id": [^authenticated, ^project-member]
245
+ "PUT /api/projects/:id": [^project-admin]
246
+ "DELETE /api/projects/:id": [^project-admin]
247
+ "POST /api/projects/:id/comments": [^authenticated, ^project-member]
248
+ ```
249
+ - id: plsat-pg1-q2a
250
+ scenario: ''
251
+ question: 'A colleague adds a new endpoint: `DELETE /api/projects/:id/comments/:commentId`. Only the comment author should be able to delete their own comment. Which gate configuration follows Paradigm best practices?'
252
+ choices:
253
+ A: Add `[^authenticated, ^project-admin]` — admins can delete anything
254
+ B: Add `[^authenticated, ^project-member, ^comment-author]` — must be a member AND the author
255
+ C: Add `[^comment-author]` — authorship implies authentication and membership
256
+ D: Add `[^authenticated, ^comment-author]` — logged in and owns the comment
257
+ E: Don't add it to portal.yaml — handle it in the route handler code only
258
+ correct: B
259
+ explanation: 'The correct approach is `[^authenticated, ^project-member, ^comment-author]`. Being the comment author doesn''t automatically mean you''re authenticated or a project member. Each gate has one responsibility: `^authenticated` checks login, `^project-member` checks project access, `^comment-author` checks ownership. Choice E violates the cardinal rule: all protected routes MUST be in portal.yaml.'
260
+ slot: pg-portal-q2
261
+ section: para-201
262
+ passageId: passage-portal-review
263
+ passage: |-
264
+ Your team's portal.yaml for a project management app:
265
+
266
+ ```yaml
267
+ version: "1.0"
268
+ gates:
269
+ ^authenticated:
270
+ description: User must be logged in
271
+ check: req.user != null
272
+ ^project-member:
273
+ description: User must be a member of the project
274
+ check: project.members.includes(req.user.id)
275
+ requires: ["^authenticated"]
276
+ ^project-admin:
277
+ description: User must be an admin of the project
278
+ check: project.admins.includes(req.user.id)
279
+ requires: ["^authenticated"]
280
+ ^comment-author:
281
+ description: User must be the comment author
282
+ check: comment.authorId === req.user.id
283
+
284
+ routes:
285
+ "GET /api/projects": [^authenticated]
286
+ "POST /api/projects": [^authenticated]
287
+ "GET /api/projects/:id": [^authenticated, ^project-member]
288
+ "PUT /api/projects/:id": [^project-admin]
289
+ "DELETE /api/projects/:id": [^project-admin]
290
+ "POST /api/projects/:id/comments": [^authenticated, ^project-member]
291
+ ```
292
+ - id: plsat-pg1-q3a
293
+ scenario: ''
294
+ question: Looking at the existing routes, the `PUT /api/projects/:id` route uses `[^project-admin]` without `^authenticated`. Is this a problem?
295
+ choices:
296
+ A: Yes — every route must explicitly list `^authenticated` for security
297
+ B: 'No — `^project-admin` has `requires: ["^authenticated"]`, so authentication is enforced implicitly'
298
+ C: Yes — the `requires` field is only documentation, not enforcement
299
+ D: No — PUT requests don't need authentication because they're idempotent
300
+ E: Yes — portal.yaml should always list gates in order of evaluation
301
+ correct: B
302
+ explanation: 'This is correct portal.yaml usage. The `^project-admin` gate has `requires: ["^authenticated"]`, which means authentication is automatically enforced as a prerequisite. Listing `^authenticated` explicitly would be redundant. The `requires` field creates a dependency chain that gates must satisfy before evaluation.'
303
+ slot: pg-portal-q3
304
+ section: para-201
305
+ passageId: passage-portal-review
306
+ passage: |-
307
+ Your team's portal.yaml for a project management app:
308
+
309
+ ```yaml
310
+ version: "1.0"
311
+ gates:
312
+ ^authenticated:
313
+ description: User must be logged in
314
+ check: req.user != null
315
+ ^project-member:
316
+ description: User must be a member of the project
317
+ check: project.members.includes(req.user.id)
318
+ requires: ["^authenticated"]
319
+ ^project-admin:
320
+ description: User must be an admin of the project
321
+ check: project.admins.includes(req.user.id)
322
+ requires: ["^authenticated"]
323
+ ^comment-author:
324
+ description: User must be the comment author
325
+ check: comment.authorId === req.user.id
326
+
327
+ routes:
328
+ "GET /api/projects": [^authenticated]
329
+ "POST /api/projects": [^authenticated]
330
+ "GET /api/projects/:id": [^authenticated, ^project-member]
331
+ "PUT /api/projects/:id": [^project-admin]
332
+ "DELETE /api/projects/:id": [^project-admin]
333
+ "POST /api/projects/:id/comments": [^authenticated, ^project-member]
334
+ ```
335
+ - id: plsat-pg2-q1a
336
+ scenario: ''
337
+ question: What is the correct way to reference `#email-sender` from another `.purpose` file in a different directory?
338
+ choices:
339
+ A: '`notifications/#email-sender` — use the directory path as a namespace'
340
+ B: '`#email-sender` — symbol IDs are globally unique across the project'
341
+ C: '`&email-sender` — integrations use the `&` prefix when cross-referenced'
342
+ D: '`#notifications.email-sender` — use dot notation for cross-module references'
343
+ E: '`import: #email-sender from notifications` — use import syntax'
344
+ correct: B
345
+ explanation: Symbol IDs are globally unique across the entire Paradigm project. You reference `#email-sender` the same way everywhere — no namespacing, no path prefixes, no import syntax. Paradigm's index tracks where each symbol is defined.
346
+ slot: pg-purpose-q1
347
+ section: para-101
348
+ passageId: passage-purpose-review
349
+ passage: |-
350
+ You're reviewing a `.purpose` file submitted in a pull request for `src/notifications/`:
351
+
352
+ ```yaml
353
+ name: Notifications
354
+ components:
355
+ #email-sender:
356
+ description: Sends transactional emails
357
+ file: email.ts
358
+ tags: [integration, sendgrid]
359
+ #push-notifier:
360
+ description: Sends push notifications
361
+ file: push.ts
362
+ #notification-preferences:
363
+ description: User notification settings
364
+ file: preferences.ts
365
+ tags: [state]
366
+ signals:
367
+ !notification-sent:
368
+ description: Fires after any notification is delivered
369
+ emitters: ["#email-sender", "#push-notifier"]
370
+ !preferences-updated:
371
+ description: User changed notification settings
372
+ emitters: ["#notification-preferences"]
373
+ ```
374
+ - id: plsat-pg2-q2a
375
+ scenario: ''
376
+ question: This `.purpose` file is missing a critical top-level field. What is it?
377
+ choices:
378
+ A: '`version` — every purpose file must specify a schema version'
379
+ B: '`description` — every purpose file should describe what the module does'
380
+ C: '`tags` — top-level tags are required for indexing'
381
+ D: '`gates` — every module must define its authorization requirements'
382
+ E: '`flows` — signals require at least one flow to be meaningful'
383
+ correct: B
384
+ explanation: Every `.purpose` file should have a `description` field explaining what the module/directory does. While `name` identifies it, `description` provides the context that AI agents and developers need to understand the module's role.
385
+ slot: pg-purpose-q2
386
+ section: para-101
387
+ passageId: passage-purpose-review
388
+ passage: |-
389
+ You're reviewing a `.purpose` file submitted in a pull request for `src/notifications/`:
390
+
391
+ ```yaml
392
+ name: Notifications
393
+ components:
394
+ #email-sender:
395
+ description: Sends transactional emails
396
+ file: email.ts
397
+ tags: [integration, sendgrid]
398
+ #push-notifier:
399
+ description: Sends push notifications
400
+ file: push.ts
401
+ #notification-preferences:
402
+ description: User notification settings
403
+ file: preferences.ts
404
+ tags: [state]
405
+ signals:
406
+ !notification-sent:
407
+ description: Fires after any notification is delivered
408
+ emitters: ["#email-sender", "#push-notifier"]
409
+ !preferences-updated:
410
+ description: User changed notification settings
411
+ emitters: ["#notification-preferences"]
412
+ ```
413
+ - id: plsat-pg2-q3a
414
+ scenario: ''
415
+ question: A developer wants to add a new component `#sms-sender` to this module that sends SMS via Twilio. Which definition follows the existing patterns in this file?
416
+ choices:
417
+ A: '`#sms-sender: { description: Sends SMS, file: sms.ts, tags: [integration, twilio] }` and add it to `!notification-sent` emitters'
418
+ B: '`!sms-sent: { description: SMS notification signal }` — SMS is an event'
419
+ C: '`#sms-sender: { description: Sends SMS, file: sms.ts }` with no tags or signal updates'
420
+ D: '`~sms-required: { description: SMS must be available }` — SMS availability is an aspect'
421
+ E: '`$sms-flow: { description: Send SMS process }` — sending SMS is a flow'
422
+ correct: A
423
+ explanation: 'Following the file''s existing pattern: `#email-sender` has `tags: [integration, sendgrid]` and is listed in `!notification-sent` emitters. The new `#sms-sender` should mirror this: use `tags: [integration, twilio]` for classification and add it as an emitter of `!notification-sent`. Consistency with existing patterns is key.'
424
+ slot: pg-purpose-q3
425
+ section: para-101
426
+ passageId: passage-purpose-review
427
+ passage: |-
428
+ You're reviewing a `.purpose` file submitted in a pull request for `src/notifications/`:
429
+
430
+ ```yaml
431
+ name: Notifications
432
+ components:
433
+ #email-sender:
434
+ description: Sends transactional emails
435
+ file: email.ts
436
+ tags: [integration, sendgrid]
437
+ #push-notifier:
438
+ description: Sends push notifications
439
+ file: push.ts
440
+ #notification-preferences:
441
+ description: User notification settings
442
+ file: preferences.ts
443
+ tags: [state]
444
+ signals:
445
+ !notification-sent:
446
+ description: Fires after any notification is delivered
447
+ emitters: ["#email-sender", "#push-notifier"]
448
+ !preferences-updated:
449
+ description: User changed notification settings
450
+ emitters: ["#notification-preferences"]
451
+ ```
452
+ - id: plsat-014
453
+ scenario: 'You''re implementing a webhook handler that receives events from a third-party payment provider. When a payment succeeds, your system needs to: update the order status, send a receipt email, and notify the analytics service. These are independent side effects that don''t need to happen in order.'
454
+ question: What is the MOST appropriate Paradigm modeling for these independent side effects?
455
+ choices:
456
+ A: A `$payment-webhook-flow` with three sequential steps
457
+ B: A `!payment-succeeded` signal with three subscriber components
458
+ C: Three separate `^` gates that the webhook must pass
459
+ D: A `~payment-side-effects` aspect applied to the webhook handler
460
+ E: Three `#` components with no formal connection between them
461
+ correct: B
462
+ explanation: When side effects are independent and don't require ordering, a Signal (`!`) is the right model. You define `!payment-succeeded` and document three subscriber components (`#order-service`, `#email-sender`, `#analytics-tracker`) that react to it. Signals are for events that trigger side effects. A Flow (`$`) would be appropriate if the steps needed to happen in a specific order or if one step depended on the output of another. Gates (`^`) are for condition checks, not business logic.
463
+ slot: slot-014
464
+ section: para-201
465
+ variants:
466
+ - id: plsat-014b
467
+ scenario: 'When a user completes their profile, three independent things should happen: (1) update the user''s completion badge, (2) notify their team admin, (3) log the event to analytics. None of these depend on each other.'
468
+ question: How should these independent reactions be modeled in Paradigm?
469
+ choices:
470
+ A: A `$profile-completion-flow` with three sequential steps
471
+ B: A `!profile-completed` signal with three subscriber components
472
+ C: Three `^` gates that must all pass
473
+ D: A single `#profile-handler` component that does all three things
474
+ E: Three `~` aspects applied to the profile component
475
+ correct: B
476
+ explanation: Independent side effects that don't require ordering are modeled as Signal (`!`) subscribers. Define `!profile-completed` and document three subscriber components that react to it. A Flow (`$`) would be appropriate only if the steps needed specific ordering or depended on each other's output.
477
+ - id: plsat-015
478
+ scenario: 'Your project''s discipline is `fullstack`. You''re adding a new feature: team invitations. The feature involves a UI component for the invite form, an API endpoint, a service for generating invite tokens, and an email sender. You create a new directory `src/features/team-invites/`.'
479
+ question: What should the `.purpose` file in this directory contain at minimum?
480
+ choices:
481
+ A: Just a `name` field — Paradigm will auto-discover the rest
482
+ B: A `name`, `description`, and at least one `#component` entry
483
+ C: A full `$flow` definition with all steps documented
484
+ D: Gate definitions (`^`) for all protected endpoints in this feature
485
+ E: Signal definitions (`!`) for all events this feature emits
486
+ correct: B
487
+ explanation: At minimum, a `.purpose` file needs a `name` and `description` (to orient agents and developers) and at least one `#component` documenting a code unit. Gates go in `portal.yaml` (the authoritative source for route protection), not in purpose files. Flows and signals are important but not required for every feature — they should be added when the feature has multi-step processes or emits events. Paradigm does not auto-discover components; they must be explicitly documented.
488
+ slot: slot-015
489
+ section: para-201
490
+ - id: plsat-017
491
+ scenario: |-
492
+ Your team defines the following flow:
493
+
494
+ ```yaml
495
+ flows:
496
+ $user-onboarding:
497
+ description: New user setup after registration
498
+ steps:
499
+ - component: "#email-verifier"
500
+ action: send-verification-email
501
+ - component: "#profile-wizard"
502
+ action: collect-profile-data
503
+ - component: "#team-assigner"
504
+ action: assign-default-team
505
+ - component: "#welcome-emailer"
506
+ action: send-welcome-email
507
+ signals: ["!user-onboarded"]
508
+ ```
509
+
510
+ You need to modify `#email-verifier` to use a new email provider.
511
+ question: Before making the code change, what should you do FIRST?
512
+ choices:
513
+ A: Run `paradigm_search` to find all references to `#email-verifier`
514
+ B: Read the `#email-verifier` source file to understand the current implementation
515
+ C: Call `paradigm_ripple` on `#email-verifier` to understand the impact
516
+ D: Call `paradigm_orchestrate_inline` to plan the migration
517
+ E: Update the `.purpose` file first to reflect the new provider
518
+ correct: C
519
+ explanation: Before modifying ANY symbol, the first step is always `paradigm_ripple`. This shows you what depends on `#email-verifier` directly and indirectly — in this case, you'd discover it's part of `$user-onboarding` and any other flows or components that reference it. This prevents you from making changes that break downstream dependencies. After ripple analysis, you'd read the source (B), check wisdom/history, and then implement. Orchestration (D) is for complex multi-file tasks, not single-component changes.
520
+ slot: slot-017
521
+ section: para-201
522
+ - id: plsat-018
523
+ scenario: You're defining a gate for a multi-tenant SaaS application. Users can belong to multiple organizations, and each organization has its own resources. You need a gate that checks whether the requesting user is a member of the organization that owns the requested resource.
524
+ question: Which gate definition follows the Paradigm portal pattern?
525
+ choices:
526
+ A: |-
527
+ ```yaml
528
+ ^org-member:
529
+ description: User is a member of the organization AND has read permission
530
+ check: org.members.includes(req.user.id) && req.user.permissions.read
531
+ ```
532
+ B: |-
533
+ ```yaml
534
+ ^org-member:
535
+ description: User is a member of the resource's organization
536
+ check: org.members.includes(req.user.id)
537
+ ```
538
+ C: |-
539
+ ```yaml
540
+ ^org-access:
541
+ description: User can access organization resources with full CRUD
542
+ check: org.members.includes(req.user.id) && req.user.role !== 'viewer'
543
+ ```
544
+ D: |-
545
+ ```yaml
546
+ ^resource-check:
547
+ description: Validates resource exists and user has access
548
+ check: resource != null && org.members.includes(req.user.id)
549
+ ```
550
+ E: |-
551
+ ```yaml
552
+ ^member-or-admin:
553
+ description: User is either a member or an admin of the organization
554
+ check: org.members.includes(req.user.id) || org.admins.includes(req.user.id)
555
+ ```
556
+ correct: B
557
+ explanation: 'Paradigm''s gate philosophy is ''one responsibility per gate.'' Choice B does exactly one thing: checks organization membership. Choice A bundles membership with permissions (two responsibilities). Choice C adds a role check. Choice D validates resource existence (that''s a different concern). Choice E conflates membership and admin roles. If you need permission checks, create `^org-reader`, `^org-writer` as separate gates with `requires: ["^org-member"]`. Keep gates minimal and composable.'
558
+ slot: slot-018
559
+ section: para-201
560
+ - id: plsat-020
561
+ scenario: 'You open `.paradigm/config.yaml` and see `discipline: fullstack`. You''re curious what this means and whether it affects how symbols are mapped.'
562
+ question: What does the `discipline` field control in Paradigm?
563
+ choices:
564
+ A: It determines which programming language the project uses
565
+ B: It configures the symbol-to-directory mapping and recommended patterns for the project type
566
+ C: It restricts which symbols are allowed — e.g., some disciplines don't use flows
567
+ D: It's purely cosmetic — shown in the dashboard but has no functional effect
568
+ E: It determines the deployment pipeline configuration
569
+ correct: B
570
+ explanation: The `discipline` field tells Paradigm what kind of project this is, which influences symbol mappings (e.g., where components vs. gates typically live), suggested patterns, and how AI agents reason about the codebase. A `fullstack` discipline suggests `middleware/` maps to gates, `services/` to components, etc. Different disciplines (e.g., `cli`, `library`, `embedded`) would have different default mappings. Since v2, Paradigm auto-detects the discipline from project structure at init time. It doesn't restrict symbols — all 5 are always available.
571
+ slot: slot-020
572
+ section: para-201
573
+ - id: plsat-021
574
+ scenario: |-
575
+ While reviewing a PR, you notice a developer added a new webhook endpoint:
576
+
577
+ ```typescript
578
+ // src/api/webhooks/stripe.ts
579
+ app.post('/api/webhooks/stripe', async (req, res) => {
580
+ const event = verifyStripeSignature(req);
581
+ if (event.type === 'payment_intent.succeeded') {
582
+ await updateOrderStatus(event.data);
583
+ await sendReceipt(event.data);
584
+ }
585
+ res.json({ received: true });
586
+ });
587
+ ```
588
+
589
+ The developer did not update any Paradigm files.
590
+ question: Which Paradigm files should be updated? Select the MOST complete answer.
591
+ choices:
592
+ A: Only `portal.yaml` — add the webhook route with appropriate gates
593
+ B: Only the nearest `.purpose` file — add `#stripe-webhook-handler` as a component
594
+ C: Both `portal.yaml` and the nearest `.purpose` file
595
+ D: The `.purpose` file, `portal.yaml`, and add a `!payment-succeeded` signal definition
596
+ E: No updates needed — webhooks are external and don't need Paradigm documentation
597
+ correct: D
598
+ explanation: 'The most complete answer includes: (1) The `.purpose` file needs a `#stripe-webhook-handler` component with `tags: [integration, stripe]`. (2) `portal.yaml` needs the route — even webhook endpoints may need gates (e.g., signature verification as `^stripe-signature-valid`). (3) The payment success event should be documented as `!payment-succeeded` since it triggers side effects (order update, receipt). This is the Paradigm principle: if it exists in code, it should exist in the symbol graph.'
599
+ slot: slot-021
600
+ section: para-201
601
+ - id: plsat-022
602
+ scenario: |-
603
+ A developer writes this commit message:
604
+
605
+ ```
606
+ added apple pay button and updated checkout
607
+ ```
608
+ question: What is wrong with this commit message according to Paradigm conventions?
609
+ choices:
610
+ A: It should use past tense ('added' is correct, actually)
611
+ B: It's missing the conventional commit type, primary symbol in parentheses, and the `Symbols:` trailer
612
+ C: It should be in ALL CAPS for visibility
613
+ D: It should reference the Jira ticket number instead of symbols
614
+ E: Nothing is wrong — commit messages are personal preference
615
+ correct: B
616
+ explanation: |-
617
+ Paradigm commit messages follow a strict format: `type(#primary-symbol): description` in the subject, symbol references in the body, and a `Symbols:` trailer for machine parsing. The correct message would be:
618
+
619
+ ```
620
+ feat(#payment-form): add Apple Pay support
621
+
622
+ - Add #apple-pay-button component
623
+ - Update $checkout-flow with new payment step
624
+
625
+ Symbols: #payment-form, #apple-pay-button, $checkout-flow
626
+ ```
627
+
628
+ The `Symbols:` trailer is parsed by the post-commit hook for automatic history capture.
629
+ slot: slot-022
630
+ section: para-201
631
+ - id: plsat-023
632
+ scenario: You're building a real-time notification system. Notifications can arrive via WebSocket, and the user can mark them as read, archive them, or delete them. The system also needs to handle notification preferences (email, push, in-app) and batching for high-volume scenarios.
633
+ question: A team member suggests modeling each notification action (read, archive, delete) as a separate `$` flow. What is the BEST response?
634
+ choices:
635
+ A: Agree — each action is a distinct process that should have its own flow
636
+ B: Disagree — single actions are not flows; use `#` components for each action and a `$notification-lifecycle` flow for the overall process
637
+ C: Disagree — these should all be `!` signals since they're user-triggered events
638
+ D: Agree, but only if each action involves 3+ components
639
+ E: Disagree — model them as `^` gates since they require condition checks
640
+ correct: B
641
+ explanation: 'Flows (`$`) are for multi-step processes spanning 3+ components. A single action like ''mark as read'' is just a component method, not a flow. The correct modeling is: `#notification-reader`, `#notification-archiver`, etc. as components, with `!notification-read`, `!notification-archived` as signals for side effects. If the OVERALL lifecycle (receive → display → interact → archive/delete) is worth documenting, THAT is the flow: `$notification-lifecycle`. Choice D gets close but misses the key insight about modeling the lifecycle.'
642
+ slot: slot-023
643
+ section: para-201
644
+ - id: plsat-024
645
+ scenario: |-
646
+ Your project has the following aspect definition:
647
+
648
+ ```yaml
649
+ ~audit-required:
650
+ description: Financial operations must produce audit logs
651
+ anchors:
652
+ - src/middleware/audit.ts:15-35
653
+ - src/decorators/auditable.ts:1-20
654
+ applies-to: ["#*Service"]
655
+ enforcement: middleware
656
+ ```
657
+
658
+ A colleague refactors `audit.ts` and moves the audit logic from lines 15-35 to lines 50-70.
659
+ question: What happens if the anchors are not updated?
660
+ choices:
661
+ A: Nothing — anchors are just documentation hints and aren't validated
662
+ B: '`paradigm doctor` will report stale anchors, and `paradigm_aspect_check` will flag the mismatch'
663
+ C: The application will crash because the aspect can't find its enforcement code
664
+ D: The audit middleware will stop working because Paradigm controls execution
665
+ E: The CI pipeline will block the merge due to anchor validation
666
+ correct: B
667
+ explanation: Paradigm is a documentation/intelligence layer, not a runtime. Stale anchors won't crash your app or stop middleware from working. However, `paradigm doctor` (the validation command) and `paradigm_aspect_check` (the MCP tool) will detect that the anchored lines no longer match the expected code. This is important because anchors are the mechanism that keeps aspects grounded in real code rather than becoming aspirational documentation. The developer should update anchors to `src/middleware/audit.ts:50-70` as part of the refactor.
668
+ slot: slot-024
669
+ section: para-201
670
+ - id: plsat-025
671
+ scenario: You're adding a new API endpoint `POST /api/billing/invoices` to your project. Before writing any code, you want to follow Paradigm best practices.
672
+ question: What is the recommended sequence of steps?
673
+ choices:
674
+ A: Write the code → Update `.purpose` file → Update `portal.yaml` → Commit
675
+ B: Call `paradigm_gates_for_route` → Update `portal.yaml` → Write the code → Update `.purpose` file → Commit
676
+ C: Update `portal.yaml` → Write the code → Run `paradigm doctor` → Commit
677
+ D: Write the code → Run `paradigm scan` → Let it auto-generate the purpose file → Commit
678
+ E: Call `paradigm_orchestrate_inline` → Spawn agents → Let agents handle everything
679
+ correct: B
680
+ explanation: 'The recommended flow for adding endpoints is: (1) Call `paradigm_gates_for_route` to get gate suggestions for `POST /api/billing/invoices`, (2) Update `portal.yaml` with the route and its gates, (3) Implement the endpoint with proper gate enforcement, (4) Update the nearest `.purpose` file with the new `#` component, signals, etc. (5) Commit with proper Paradigm commit format. This ''portal first'' approach ensures security is designed before implementation. Choice D is wrong because `paradigm scan` discovers existing symbols but doesn''t auto-generate purpose files from code.'
681
+ slot: slot-025
682
+ section: para-201
683
+ - id: plsat-026
684
+ scenario: You're in the middle of a long Claude Code session. You've made 47 tool calls, modified 12 files, and you're about to start a complex refactor of the payment system. You vaguely recall something about context management but can't remember the details.
685
+ question: What should you do before starting the refactor?
686
+ choices:
687
+ A: Just continue — context management is handled automatically
688
+ B: Call `paradigm_session_health` to see if a handoff is recommended
689
+ C: Start a new Claude session immediately to get fresh context
690
+ D: Save all files and run `paradigm scan` to rebuild the index
691
+ E: Call `paradigm_session_stats` and if over 100 tool calls, panic
692
+ correct: B
693
+ explanation: The protocol says to call `paradigm_session_health` periodically (every 10-15 tool calls) during long sessions. At 47 tool calls, you're well overdue for a check. This tool analyzes your context window usage and recommends whether to continue, prepare a handoff, or urgently wrap up. If usage is over 85%, you should prioritize completing your current task and prepare a handoff with `paradigm_handoff_prepare`. Don't just start a new session (C) without preparing a handoff — you'd lose all context about what was done.
694
+ slot: slot-026
695
+ section: para-301
696
+ - id: plsat-027
697
+ scenario: |-
698
+ You run `paradigm doctor` and get the following output:
699
+
700
+ ```
701
+ WARNING: #payment-processor has been modified 7 times in 14 days
702
+ WARNING: #payment-processor has 3 rollbacks in history
703
+ FRAGILITY SCORE: 0.85 (HIGH)
704
+ ```
705
+ question: What does a fragility score of 0.85 indicate, and what should you do?
706
+ choices:
707
+ A: The component has a bug 85% of the time — rewrite it from scratch
708
+ B: 85% of the codebase depends on it — extract it into a separate service
709
+ C: The component is highly unstable due to frequent changes and rollbacks — proceed with extra caution, review wisdom, and consider refactoring
710
+ D: The component needs 85% more test coverage — write tests first
711
+ E: The score is informational only — ignore it and proceed normally
712
+ correct: C
713
+ explanation: 'A fragility score of 0.85 (out of 1.0) means the component is highly unstable. It''s been changed 7 times in 2 weeks with 3 rollbacks — a clear pattern of churn. Before modifying it: (1) Call `paradigm_wisdom_context` to check for antipatterns and `paradigm_decision_search` for prior decisions about it, (2) Call `paradigm_history_context` to understand the recent changes, (3) Consider whether a refactor is needed before adding more changes. The score doesn''t mean ''has bugs 85%'' or ''needs 85% coverage'' — it''s a stability metric based on change frequency and rollback rate.'
714
+ slot: slot-027
715
+ section: para-301
716
+ - id: plsat-028
717
+ scenario: 'After a production incident where the payment system double-charged a customer, the team discovers the root cause: a developer removed a deduplication check while refactoring `#payment-processor`. The team wants to prevent this from happening again.'
718
+ question: What is the MOST appropriate Paradigm response to this incident?
719
+ choices:
720
+ A: 'Add a comment in the code: `// DO NOT REMOVE THIS CHECK`'
721
+ B: 'Record an antipattern in wisdom: ''Never remove deduplication from #payment-processor'' with the alternative approach'
722
+ C: Create a `^deduplication-enforced` gate in portal.yaml
723
+ D: Add `#payment-processor` to a 'do not touch' list in `.paradigm/config.yaml`
724
+ E: Record the incident in Sentinel AND record an antipattern in wisdom
725
+ correct: E
726
+ explanation: 'The most complete response uses both Sentinel and Wisdom. (1) Record the incident with `paradigm_sentinel_record` so it appears in incident tracking with symbolic context (the `#payment-processor` symbol, error details, timeline). (2) Record an antipattern with `paradigm_wisdom_record` so future AI agents and developers are warned before modifying `#payment-processor`. The antipattern would say: ''Never remove deduplication logic'' with alternative: ''If refactoring payment processing, always preserve the deduplication middleware and add tests for it.'' A code comment (A) is easily missed. A gate (C) is for condition checks on routes, not business logic.'
727
+ slot: slot-028
728
+ section: para-301
729
+ - id: plsat-029
730
+ scenario: You need to understand the impact of changing `^authenticated` — the main authentication gate used across your entire application. You want to know what depends on it.
731
+ question: Which MCP tool call gives you the MOST useful dependency information?
732
+ choices:
733
+ A: '`paradigm_search({ query: ''^authenticated'' })` — find all references'
734
+ B: '`paradigm_related({ symbol: ''^authenticated'' })` — show direct relations'
735
+ C: '`paradigm_ripple({ symbol: ''^authenticated'', depth: 3 })` — show direct AND indirect dependencies up to 3 levels'
736
+ D: '`paradigm_navigate({ intent: ''find'', target: ''^authenticated'' })` — locate the gate'
737
+ E: '`paradigm_flows_affected({ symbol: ''^authenticated'' })` — show affected flows'
738
+ correct: C
739
+ explanation: '`paradigm_ripple` with a depth parameter gives you the cascading dependency analysis. For a widely-used gate like `^authenticated`, you need to see not just what directly references it, but what depends on things that depend on it (transitive dependencies). At depth 3, you''d see: Level 1 — all gates with `requires: [^authenticated]`, all routes using it. Level 2 — all components behind those routes. Level 3 — flows involving those components. `paradigm_related` (B) only shows direct connections. `paradigm_search` (A) finds textual references but doesn''t analyze the dependency graph.'
740
+ slot: slot-029
741
+ section: para-301
742
+ - id: plsat-030
743
+ scenario: It's Monday morning. You open Claude Code to continue work on a project. Last Friday, a different Claude session was making changes to the user management module. You have no idea what state things are in.
744
+ question: What is the FIRST thing you should do?
745
+ choices:
746
+ A: Run `git log` to see recent commits
747
+ B: Call `paradigm_session_recover` to load breadcrumbs from the previous session
748
+ C: Call `paradigm_status` for a general project overview
749
+ D: Read the `.paradigm/config.yaml` to understand the project
750
+ E: Start fresh — don't worry about what the previous session did
751
+ correct: B
752
+ explanation: '`paradigm_session_recover` is designed exactly for this scenario. It loads breadcrumbs from previous sessions, showing you what was done, what files were modified, what symbols were touched, and what the next steps were. This is more useful than `paradigm_status` (C) because status gives you general project info, not session-specific context. After recovering the session, THEN you''d call `paradigm_status` and check the relevant files. Starting fresh (E) risks duplicating work or missing important context.'
753
+ slot: slot-030
754
+ section: para-301
755
+ - id: plsat-031
756
+ scenario: Your application is experiencing intermittent 500 errors on the checkout flow. You suspect it's related to the Stripe integration. You want to check if there's a known pattern for this type of failure.
757
+ question: Which sequence of Sentinel tools should you use?
758
+ choices:
759
+ A: '`paradigm_sentinel_triage` to see open incidents, then `paradigm_sentinel_patterns` to find matching failure patterns'
760
+ B: '`paradigm_sentinel_stats` to see overall health, then guess at the root cause'
761
+ C: '`paradigm_sentinel_record` to create a new incident immediately'
762
+ D: '`paradigm_sentinel_suggest_pattern` without any incident context'
763
+ E: '`paradigm_sentinel_resolve` to close any open incidents and hope it goes away'
764
+ correct: A
765
+ explanation: 'The diagnostic workflow is: (1) `paradigm_sentinel_triage` with a filter like `search: ''500''` or `symbol: ''#stripe-service''` to see if there are existing open incidents matching your symptoms. (2) `paradigm_sentinel_patterns` to find known failure patterns (with confidence scores) that match the error. If a pattern matches, it includes resolution steps and code hints. You''d only record a NEW incident (C) if triage shows this is a novel failure. Resolving without investigating (E) is never the right answer. Stats (B) give you health metrics but not diagnostic details.'
766
+ slot: slot-031
767
+ section: para-301
768
+ - id: plsat-032
769
+ scenario: You want to find who on your team has the most expertise with the payment system before making a significant architectural change.
770
+ question: Which Paradigm tool helps you find the right person?
771
+ choices:
772
+ A: '`paradigm_search({ query: ''payments'' })` and look at file authors in git blame'
773
+ B: '`paradigm_wisdom_expert({ area: ''payments'' })` to find recognized experts'
774
+ C: '`paradigm_history_context({ symbols: [''#payment-service''] })` and infer from commit history'
775
+ D: '`paradigm_navigate({ intent: ''explore'', target: ''payments'' })` and read the code to figure out who wrote it'
776
+ E: Ask in Deus / Slack / Teams
777
+ correct: B
778
+ explanation: '`paradigm_wisdom_expert` is purpose-built for finding human experts by area or symbol. It returns people who are recognized as knowledgeable about the payment system, based on recorded wisdom and history. While `paradigm_history_context` (C) can show who recently worked on the code, that doesn''t mean they''re the expert — they might have just fixed a typo. `paradigm_wisdom_expert` tracks deliberate expertise attribution, not just commit frequency.'
779
+ slot: slot-032
780
+ section: para-301
781
+ - id: plsat-033
782
+ scenario: After completing a significant refactor of the authentication module, you want to record what you did for future sessions and team members.
783
+ question: What is the correct way to record this in Paradigm's history system?
784
+ choices:
785
+ A: Write a detailed comment in the `.purpose` file
786
+ B: Call `paradigm_history_record` with type 'refactor', affected symbols, and description
787
+ C: Update `.paradigm/docs/changelog.md` with the changes
788
+ D: Just commit with a good message — git history is sufficient
789
+ E: Call `paradigm_decision_record` with title, rationale, and participants explaining the refactor rationale
790
+ correct: B
791
+ explanation: '`paradigm_history_record` is the right tool for recording implementation events. You''d call it with `type: ''refactor''`, `symbols: [''^authenticated'', ''#auth-middleware'', ...]`, and a description of what was changed. This feeds into the history system that powers `paradigm_history_context` and `paradigm_history_fragility`. A good commit message (D) is important but separate — Paradigm''s history provides symbolic context that git alone doesn''t. If the refactor involved an architectural DECISION, you''d ALSO record it via `paradigm_decision_record` (E), but that''s supplementary, not a replacement — the implementation event itself belongs in the history system. (Note: `paradigm_wisdom_record` no longer accepts `type: ''decision''` in v6.0; decisions live in the dedicated decision store.)'
792
+ slot: slot-033
793
+ section: para-301
794
+ - id: plsat-034
795
+ scenario: You've run tests after implementing a new feature. 15 tests passed, 2 failed, and 1 was skipped. You previously recorded the implementation with `paradigm_history_record` and got back an implementation ID.
796
+ question: How should you record the test results?
797
+ choices:
798
+ A: Update the implementation record by calling `paradigm_history_record` again
799
+ B: Call `paradigm_history_validate` with result 'partial' and the test counts
800
+ C: Call `paradigm_history_validate` with result 'fail' because not all tests passed
801
+ D: Don't record it — fix the failing tests first, then record a 'pass'
802
+ E: Call `paradigm_sentinel_record` to log the test failures as incidents
803
+ correct: B
804
+ explanation: '`paradigm_history_validate` is the validation companion to `paradigm_history_record`. With 15 passed, 2 failed, and 1 skipped, the result is ''partial'' (not full pass, not complete failure). You''d call it with `result: ''partial''` and `tests: { passed: 15, failed: 2, skipped: 1 }`. This creates a validation record linked to the implementation. Recording it as ''fail'' (C) is too harsh — partial acknowledges progress. Waiting to record (D) loses valuable history about the initial state. Test failures aren''t production incidents (E) unless they indicate a production problem.'
805
+ slot: slot-034
806
+ section: para-301
807
+ - id: plsat-035
808
+ scenario: |-
809
+ A context check returns the following:
810
+
811
+ ```
812
+ Context usage: ~82%
813
+ Recommendation: prepare-handoff
814
+ Message: Context getting full. Complete current task and prepare handoff.
815
+ ```
816
+
817
+ You're in the middle of implementing a feature that's about 70% done.
818
+ question: What is the correct course of action?
819
+ choices:
820
+ A: Ignore the warning and finish the feature — 82% means you still have 18% left
821
+ B: Stop immediately and call `paradigm_handoff_prepare` with what you've done so far
822
+ C: Finish the current task as quickly as possible, then call `paradigm_handoff_prepare` with a summary, modified files, and next steps
823
+ D: Delete some earlier context by running `paradigm_session_recover` to free up space
824
+ E: Switch to a different, smaller task that can be completed in the remaining context
825
+ correct: C
826
+ explanation: 'At 82% with a ''prepare-handoff'' recommendation, you should complete your current task (not start new ones) and then hand off. The protocol says: when context usage is high but not critical (>85%), prioritize completing the current task, then prepare a handoff. `paradigm_handoff_prepare` takes your summary, modified files, symbols touched, and next steps — giving the next session everything it needs to pick up where you left off. Ignoring it (A) risks running out of context mid-task. Stopping immediately (B) wastes the 70% progress. You can''t delete context (D).'
827
+ slot: slot-035
828
+ section: para-301
829
+ - id: plsat-036
830
+ scenario: You want to understand the complete structure of the authentication module without reading every file. The project has 200+ files across nested directories.
831
+ question: What is the MOST token-efficient way to explore this?
832
+ choices:
833
+ A: Read every file in `src/auth/` one by one
834
+ B: 'Call `paradigm_navigate({ intent: ''explore'', target: ''auth'' })` to browse the area'
835
+ C: 'Run `paradigm_search({ query: ''auth'' })` and read every matching file'
836
+ D: Read `.paradigm/navigator.yaml` and then every file it references
837
+ E: Call `paradigm_status` and hope it includes auth module details
838
+ correct: B
839
+ explanation: '`paradigm_navigate` with intent ''explore'' is designed for exactly this: browsing an area of the codebase without reading individual files. At ~200 tokens per call, it''s vastly more efficient than reading files (~500-2000 tokens each). It returns the structural overview of the auth area — components, gates, flows, signals — from the indexed symbols. If you need specific implementation details AFTER exploring, then you read individual files. The rule is: MCP for discovery, files for implementation.'
840
+ slot: slot-036
841
+ section: para-301
842
+ - id: plsat-037
843
+ scenario: You're debugging an issue where the `$order-fulfillment` flow is failing at the 'ship order' step. The flow has 6 steps spanning 4 components. You suspect the gate `^warehouse-authorized` is rejecting valid requests.
844
+ question: Which combination of tools gives you the MOST diagnostic information?
845
+ choices:
846
+ A: '`paradigm_flow_check({ flowId: ''$order-fulfillment'' })` + `paradigm_sentinel_triage({ symbol: ''^warehouse-authorized'' })`'
847
+ B: '`paradigm_search({ query: ''warehouse'' })` + read all matching files'
848
+ C: '`paradigm_ripple({ symbol: ''$order-fulfillment'' })` only'
849
+ D: '`paradigm_history_context({ symbols: [''$order-fulfillment''] })` only'
850
+ E: '`paradigm_navigate({ intent: ''find'', target: ''^warehouse-authorized'' })` + read the gate code'
851
+ correct: A
852
+ explanation: 'The best combination is: (1) `paradigm_flow_check` checks the flow definition against the codebase — are all steps implemented, do the gates exist, are signals emitted? This could reveal if the flow definition is out of sync with the code. (2) `paradigm_sentinel_triage` filtered by `^warehouse-authorized` shows if there are incidents or known patterns for this gate failing. Together, these give you structural validation AND operational history. Ripple (C) shows dependencies but not failures. History (D) shows changes but not current errors.'
853
+ slot: slot-037
854
+ section: para-301
855
+ - id: plsat-038
856
+ scenario: 'Your team has been using Paradigm for 6 months. A new developer joins and asks: ''How do I know if the Paradigm files are actually accurate? What if the code has drifted from the documentation?'''
857
+ question: What is the correct answer?
858
+ choices:
859
+ A: Trust the Paradigm files — they're always accurate because they're machine-generated
860
+ B: Run `paradigm doctor` to validate consistency between Paradigm files and the codebase, and check `paradigm_aspect_check` for aspect anchor drift
861
+ C: Paradigm files are aspirational — they describe what the code SHOULD be, not what it IS
862
+ D: Run `paradigm scan` to regenerate all Paradigm files from scratch
863
+ E: Check the git blame on `.purpose` files to see when they were last updated
864
+ correct: B
865
+ explanation: '`paradigm doctor` is the validation tool that checks for inconsistencies between Paradigm files and the codebase. It flags missing anchors, undefined symbols referenced in flows, gates referenced in portal.yaml that don''t have implementations, and more. `paradigm_aspect_check` specifically validates that aspect anchors still point to valid code. `paradigm_purpose_validate` checks `.purpose` file structural validity. These tools are how you verify accuracy. Paradigm files are NOT auto-generated (A) or aspirational (C) — they''re maintained documentation that has validation tools.'
866
+ slot: slot-038
867
+ section: para-301
868
+ variants:
869
+ - id: plsat-038b
870
+ scenario: |-
871
+ While writing a `.purpose` file for a new feature, you aren't sure whether the notification service should send emails or push notifications. Rather than guessing, you write:
872
+
873
+ ```yaml
874
+ components:
875
+ notification-dispatcher:
876
+ description: "Routes notifications to users [NEEDS CLARIFICATION: email, push, or both?]"
877
+ ```
878
+
879
+ You then run `paradigm doctor` and `paradigm_purpose_validate`.
880
+ question: 'How do these tools treat the `[NEEDS CLARIFICATION: ...]` marker?'
881
+ choices:
882
+ A: As an error — the `.purpose` file fails validation until the marker is removed
883
+ B: As a warning — it surfaces during checks but does not block validation or break builds
884
+ C: They ignore it — it's just text in a YAML string
885
+ D: As a fatal parse error — the square brackets break YAML syntax
886
+ E: As an info-level message only visible with `--verbose` flag
887
+ correct: B
888
+ explanation: 'Clarification markers (`[NEEDS CLARIFICATION: ...]`) are treated as warnings by both `paradigm doctor` and `paradigm_purpose_validate`. They scan all description fields for this exact pattern and report matches as warnings. This means the marker surfaces during health checks to remind the team of open design questions, but it does not fail validation or block builds. The intent is to make ambiguity visible and trackable rather than silent. Resolve markers before shipping by replacing them with the clarified text.'
889
+ - id: plsat-039
890
+ scenario: 'You need to build a complete user profile feature: a UI component, API endpoint, database service, validation logic, and tests. This will touch at least 6 files across 3 directories.'
891
+ question: Before writing ANY code, what should you do FIRST?
892
+ choices:
893
+ A: Start coding the UI component — start with the frontend and work backwards
894
+ B: 'Call `paradigm_orchestrate_inline({ task: ''Build user profile feature'', mode: ''plan'' })` to get the right agents and plan'
895
+ C: Call `paradigm_search` for existing profile-related symbols
896
+ D: Create the `.purpose` file first to define all the symbols
897
+ E: Call `paradigm_ripple` on every component you plan to create
898
+ correct: B
899
+ explanation: 'When a task affects 3+ files, involves multiple features, or spans security and implementation, the FIRST step is calling `paradigm_orchestrate_inline` with mode=''plan''. This returns: the right agent team (e.g., architect + security + builder + tester), estimated token cost, and an execution plan with stages. This prevents you from wasting tokens on ad-hoc implementation when a structured approach would be more efficient. After the plan, you''d call with mode=''execute'' to get full agent prompts. You can''t ripple (E) symbols that don''t exist yet.'
900
+ slot: slot-039
901
+ section: para-401
902
+ - id: plsat-040
903
+ scenario: |-
904
+ You call `paradigm_orchestrate_inline` with mode='plan' for a task involving JWT authentication. The plan returns four agents: architect, security, builder, tester. The plan shows two stages:
905
+
906
+ ```
907
+ Stage 1: [architect, security] (canRunParallel: true)
908
+ Stage 2: [builder, tester] (canRunParallel: false)
909
+ ```
910
+ question: How should you execute this plan?
911
+ choices:
912
+ A: Run all four agents simultaneously for maximum speed
913
+ B: Run architect and security in parallel, wait for both to complete, then run builder, then tester sequentially
914
+ C: Run architect first, then security, then builder, then tester — always sequential
915
+ D: Skip the architect and security agents — just run builder and tester
916
+ E: Run builder first to get code written, then architect and security for review
917
+ correct: B
918
+ explanation: 'The orchestration plan explicitly marks `canRunParallel: true` for Stage 1 (architect + security), meaning they can run simultaneously. Stage 2 has `canRunParallel: false`, meaning builder must complete before tester starts. The correct execution is: launch architect and security in parallel (Stage 1), wait for both to finish, run builder with handoff context from Stage 1, then run tester after builder completes. Skipping agents (D) defeats the purpose of orchestration. Running builder first (E) ignores architecture and security design.'
919
+ slot: slot-040
920
+ section: para-401
921
+ - id: plsat-041
922
+ scenario: |-
923
+ Your team is evaluating which agent provider to use for orchestration. The environment has:
924
+ - `ANTHROPIC_API_KEY` set
925
+ - Claude Code installed (Max subscription)
926
+ - Cursor IDE open
927
+ - No `.paradigm/config.yaml` provider override
928
+
929
+ The team runs `paradigm team providers`.
930
+ question: Which provider will be used by default based on the cascade?
931
+ choices:
932
+ A: Cursor agent CLI — because Cursor IDE is detected
933
+ B: Claude Code Task tool — because Max subscription is available
934
+ C: Anthropic API (claude) — because it's first in the cascade and the API key is set
935
+ D: Manual file-based handoffs — because no provider is explicitly configured
936
+ E: Claude Code Agent Teams — because it supports parallel execution
937
+ correct: C
938
+ explanation: 'The provider cascade tries providers in order: (1) claude (Anthropic API), (2) claude-code-teams, (3) claude-code, (4) cursor-cli, (5) claude-cli, (6) manual. Since `ANTHROPIC_API_KEY` is set, the first provider (`claude` — Anthropic API) is available and will be used. The cascade stops at the first available provider unless overridden. Even though Cursor and Claude Code are available, they''re lower priority. To override, use `paradigm team providers --set cursor-cli` or set `agent-provider` in config.'
939
+ slot: slot-041
940
+ section: para-401
941
+ - id: plsat-042
942
+ scenario: |-
943
+ You're configuring agent models for your team. The task involves:
944
+ - An architectural review (complex reasoning needed)
945
+ - A security audit (critical, needs thoroughness)
946
+ - Building 3 UI components (straightforward implementation)
947
+ - Writing unit tests (repetitive, pattern-based)
948
+
949
+ Your budget is limited.
950
+ question: Which model assignment follows Paradigm's recommended configuration?
951
+ choices:
952
+ A: All agents use opus for maximum quality
953
+ B: 'Architect: opus, Security: opus, Builder: haiku, Tester: haiku'
954
+ C: All agents use haiku for cost efficiency
955
+ D: 'Architect: sonnet, Security: sonnet, Builder: sonnet, Tester: sonnet'
956
+ E: 'Architect: opus, Security: sonnet, Builder: opus, Tester: haiku'
957
+ correct: B
958
+ explanation: 'Paradigm''s recommended model configuration is: architect and security agents use opus (complex reasoning, critical decisions), builder uses haiku (fast, cost-effective for straightforward implementation), and tester uses haiku (pattern-based, repetitive work). The reviewer role (not in this scenario) uses sonnet (balanced critique). This balances quality where it matters most (architecture, security) with cost efficiency where tasks are more mechanical (building, testing). All-opus (A) blows the budget. All-haiku (C) risks poor architectural decisions.'
959
+ slot: slot-042
960
+ section: para-401
961
+ variants:
962
+ - id: plsat-042b
963
+ scenario: The reviewer agent is reviewing a builder's implementation of a new checkout feature. During Stage 1 (Spec Compliance), the reviewer discovers that the builder created a new `#shipping-calculator` component but did not register it in any `.purpose` file. The code itself looks clean and well-tested.
964
+ question: What should the reviewer do?
965
+ choices:
966
+ A: Proceed to Stage 2 (Code Quality) since the code looks good, and mention the missing `.purpose` entry as a note
967
+ B: Stop at Stage 1, report a blocking finding for the unregistered component, and hand back to the builder without running Stage 2
968
+ C: Register the component in the `.purpose` file on behalf of the builder, then approve
969
+ D: Skip both stages and approve since the code is well-tested
970
+ E: Run Stage 2 first since code quality is more important than metadata
971
+ correct: B
972
+ explanation: The reviewer follows a strict two-stage protocol. Stage 1 (Spec Compliance) is a hard gate — if it fails, the reviewer stops immediately and hands back to the builder. A missing `.purpose` registration is a spec compliance violation (blocking finding). There is no point reviewing code quality of spec-noncompliant code. The reviewer never writes code or modifies files (C is wrong). Stage 2 cannot run before Stage 1 passes.
973
+ - id: plsat-043
974
+ scenario: |-
975
+ You're about to call an MCP tool. Your choices are:
976
+ 1. `paradigm_status` (~100 tokens)
977
+ 2. `paradigm_navigate` (~200 tokens)
978
+ 3. Reading a 400-line TypeScript file (~2000 tokens)
979
+ 4. `paradigm_ripple` (~300 tokens)
980
+
981
+ You need to understand what components exist in the payments area before modifying `#payment-service`.
982
+ question: What is the MOST token-efficient approach?
983
+ choices:
984
+ A: Read the TypeScript file directly — you need to see the actual code
985
+ B: Call `paradigm_navigate` to explore the payments area, then `paradigm_ripple` on `#payment-service`, then read only the specific file you need to change
986
+ C: Call `paradigm_status` first, then read all payment-related files
987
+ D: Call all four tools to be thorough
988
+ E: Skip all tools and just start coding — you'll figure it out
989
+ correct: B
990
+ explanation: 'The optimal approach is: (1) `paradigm_navigate` (~200 tokens) to discover what exists in the payments area without reading files. (2) `paradigm_ripple` (~300 tokens) on `#payment-service` to understand impact before modifying. (3) THEN read the specific file you need to change. Total: ~500 tokens + one targeted file read. This follows the rule: ''MCP for discovery, files for implementation.'' Reading files first (A) costs ~2000 tokens before you even know what you''re looking at. Calling everything (D) wastes `paradigm_status` tokens on info you don''t need for this task.'
991
+ slot: slot-043
992
+ section: para-401
993
+ - id: plsat-044
994
+ scenario: Your team decides that all API responses should include a `requestId` header for tracing. This is an architectural decision that affects every API endpoint in the project.
995
+ question: How should this decision be recorded in Paradigm?
996
+ choices:
997
+ A: Add a comment in every API route file
998
+ B: Call `paradigm_decision_record` with title, rationale, participants, and alternatives_considered — the decision lives in `.paradigm/decisions/` and writes a companion lore `insight` automatically
999
+ C: Create a `~request-id-required` aspect and add it to every component
1000
+ D: Update `.paradigm/config.yaml` with a new convention
1001
+ E: Both B and C — record the decision AND create an aspect with code anchors
1002
+ correct: E
1003
+ explanation: 'The most complete answer is both. (1) Record the architectural decision with `paradigm_decision_record` including title, decision text, rationale (tracing, debugging, support requirements), participants with stances, and alternatives_considered (response body field rejected_because clients prefer headers). The decision is stored in `.paradigm/decisions/` and a companion lore `insight` is auto-written so the timeline shows the moment the choice was made. (2) Create `~request-id-required` aspect with anchors pointing to the middleware that adds the header. The decision documents the WHY, the aspect enforces the WHAT with verifiable code anchors. Just the decision (B) lacks enforcement. Just the aspect (C) lacks rationale.'
1004
+ slot: slot-044
1005
+ section: para-401
1006
+ - id: plsat-045
1007
+ scenario: |-
1008
+ You're writing a commit message for a change that added rate limiting to the payment API, modified the Stripe webhook handler, and added a new `!rate-limit-exceeded` signal.
1009
+
1010
+ The affected symbols are: `#payment-api`, `#stripe-webhook-handler`, `~rate-limited`, `!rate-limit-exceeded`.
1011
+ question: Which commit message follows Paradigm conventions?
1012
+ choices:
1013
+ A: |-
1014
+ ```
1015
+ feat: add rate limiting to payments
1016
+ ```
1017
+ B: |-
1018
+ ```
1019
+ feat(#payment-api): add rate limiting to payment endpoints
1020
+
1021
+ - Apply ~rate-limited aspect to #payment-api
1022
+ - Update #stripe-webhook-handler with rate limit checks
1023
+ - Add !rate-limit-exceeded signal for monitoring
1024
+
1025
+ Symbols: #payment-api, #stripe-webhook-handler, ~rate-limited, !rate-limit-exceeded
1026
+ ```
1027
+ C: |-
1028
+ ```
1029
+ feat(payments): add rate limiting
1030
+
1031
+ Added rate limiting to payment API and webhook handler.
1032
+ ```
1033
+ D: |-
1034
+ ```
1035
+ FEAT(#payment-api): ADD RATE LIMITING
1036
+
1037
+ Symbols: ALL PAYMENT SYMBOLS
1038
+ ```
1039
+ E: |-
1040
+ ```
1041
+ feat(~rate-limited): apply rate limiting aspect
1042
+
1043
+ - Updated payment-api
1044
+ - Updated stripe-webhook-handler
1045
+
1046
+ Symbols: ~rate-limited
1047
+ ```
1048
+ correct: B
1049
+ explanation: 'Choice B follows all Paradigm commit conventions: (1) Subject line: `type(#primary-symbol): description` with the primary affected component. (2) Body: references all affected symbols with their prefixes (#, ~, !). (3) `Symbols:` trailer: machine-readable list of ALL affected symbols for the post-commit hook to parse. Choice A lacks symbols entirely. Choice C uses a generic scope instead of a symbol. Choice D is SCREAMING_CASE (no). Choice E uses the aspect as the primary symbol, but the primary change is to `#payment-api`, and the Symbols trailer is incomplete.'
1050
+ slot: slot-045
1051
+ section: para-401
1052
+ - id: plsat-046
1053
+ scenario: 'A developer on your team proposes a new tag: `[webhook-handler]`. They''ve noticed 5 components across the project that handle incoming webhooks and think a dedicated tag would be useful for classification.'
1054
+ question: What is the correct process for adding this tag?
1055
+ choices:
1056
+ A: Add it directly to the `core` section of `tags.yaml`
1057
+ B: Add it directly to the `project` section of `tags.yaml`
1058
+ C: Call `paradigm_tags_suggest` with the tag name, description, reason, and example symbols — it goes to `suggested` for human approval
1059
+ D: 'Just start using `tags: [webhook-handler]` on components — tags are freeform'
1060
+ E: Create a new aspect `~webhook-handler` instead — tags aren't for this
1061
+ correct: C
1062
+ explanation: 'The proper process is `paradigm_tags_suggest`, which adds the tag to the `suggested` section of `tags.yaml` for human review. This is Paradigm''s governance model: AI can propose tags, but humans must approve them before they become official. Once approved, a human promotes it to `project` (team-specific) or `core` (if it should ship with Paradigm). Adding directly to `core` (A) is only for Paradigm framework maintainers. Freeform usage (D) leads to tag sprawl. An aspect (E) is wrong because webhook handling isn''t a cross-cutting rule requiring code anchors.'
1063
+ slot: slot-046
1064
+ section: para-401
1065
+ - id: plsat-047
1066
+ scenario: |-
1067
+ Your CI/CD pipeline runs `paradigm doctor` as a check. The latest run shows:
1068
+
1069
+ ```
1070
+ ERROR: Gate ^project-owner referenced in portal.yaml but not defined
1071
+ WARNING: Aspect ~cache-invalidation has stale anchors (file moved)
1072
+ ERROR: Flow $signup-flow references undefined component #sms-verifier
1073
+ INFO: 3 suggested tags awaiting human review
1074
+ ```
1075
+ question: Which issues MUST be fixed before merging, and which can wait?
1076
+ choices:
1077
+ A: All four must be fixed — `paradigm doctor` errors should block the merge
1078
+ B: The two ERRORs must be fixed (undefined gate and undefined component). The WARNING and INFO can wait.
1079
+ C: Only the gate error must be fixed (security). Everything else is documentation.
1080
+ D: None need to block the merge — `paradigm doctor` is advisory only
1081
+ E: The errors and warning must be fixed. Only the INFO about suggested tags can wait.
1082
+ correct: B
1083
+ explanation: 'ERRORs indicate broken references that will cause real problems: a gate referenced in `portal.yaml` that doesn''t exist means route protection is undefined, and a flow referencing an undefined component means the flow documentation is actively misleading. These MUST be fixed. The WARNING about stale anchors is important but not merge-blocking — the aspect still works, the documentation just needs updating. The INFO about suggested tags is purely administrative. In practice, many teams also fix WARNINGs before merge, but the ERRORs are the must-fix items.'
1084
+ slot: slot-047
1085
+ section: para-401
1086
+ - id: plsat-048
1087
+ scenario: You're preparing a handoff because your context window is at 87%. You've been working on a migration from REST to GraphQL for the user module. You've completed the schema and resolvers but haven't written tests yet.
1088
+ question: What information should you include in `paradigm_handoff_prepare`?
1089
+ choices:
1090
+ A: Just the summary — the next session can figure out the rest
1091
+ B: Summary, list of modified files, symbols touched, next steps (write tests), and the open question about whether to keep the REST endpoints during migration
1092
+ C: A complete transcript of everything you did in this session
1093
+ D: Only the list of modified files — the next session can read them
1094
+ E: Summary and next steps only — modified files are in git
1095
+ correct: B
1096
+ explanation: '`paradigm_handoff_prepare` accepts: summary (what was done), modifiedFiles (what changed), symbolsTouched (which symbols were affected), nextSteps (what to do next), and openQuestions (unresolved decisions). The MOST useful handoff includes ALL of these. The next session needs to know: (1) what was accomplished (summary), (2) which files to look at (modifiedFiles), (3) which symbols are in play (symbolsTouched), (4) exactly what to do next (write tests), and (5) any decisions that still need to be made (keep REST endpoints?). A transcript (C) is too much. Just files (D) lacks context.'
1097
+ slot: slot-048
1098
+ section: para-401
1099
+ - id: plsat-049
1100
+ scenario: You want to validate that a specific flow `$checkout-flow` is correctly implemented. The flow has 5 steps, involves 3 gates, and emits 2 signals. You want to verify that all steps have implementations, gates exist in portal.yaml, and signals are actually emitted in the code.
1101
+ question: Which MCP tool call gives you this deep implementation check?
1102
+ choices:
1103
+ A: '`paradigm_flow_check({ flowId: ''$checkout-flow'' })` — validates flow definition only'
1104
+ B: '`paradigm_flow_check({ flowId: ''$checkout-flow'', checkImplementation: true })` — deep check against codebase'
1105
+ C: '`paradigm_purpose_validate()` — validates all purpose files including flows'
1106
+ D: '`paradigm_ripple({ symbol: ''$checkout-flow'' })` — shows flow dependencies'
1107
+ E: '`paradigm_related({ symbol: ''$checkout-flow'' })` — shows what''s connected to the flow'
1108
+ correct: B
1109
+ explanation: '`paradigm_flow_check` with `checkImplementation: true` performs a deep validation: it checks that gates exist in portal.yaml, actions are implemented in the codebase, and signals are emitted. Without `checkImplementation`, it only validates the YAML structure (A). `paradigm_purpose_validate` (C) checks structural validity of purpose files but doesn''t do the deep codebase cross-reference. `paradigm_ripple` (D) shows what depends on the flow, not whether it''s correctly implemented. The `checkImplementation` flag is the key to going from structural validation to implementation verification.'
1110
+ slot: slot-049
1111
+ section: para-401
1112
+ - id: plsat-050
1113
+ scenario: |-
1114
+ It's 2 AM. Your production system is down. The error logs show:
1115
+
1116
+ ```
1117
+ ERROR: Cannot read property 'id' of null
1118
+ at PaymentProcessor.processRefund (payment-processor.ts:142)
1119
+ at RefundHandler.handle (refund-handler.ts:67)
1120
+ ```
1121
+
1122
+ You need to investigate and fix this as fast as possible using Paradigm tools. The on-call engineer has no context about the payment system.
1123
+ question: What is the optimal 2 AM sequence of actions using Paradigm?
1124
+ choices:
1125
+ A: Read the source files, find the bug, fix it, deploy
1126
+ B: '`paradigm_sentinel_record` the incident → `paradigm_sentinel_patterns` for known fixes → `paradigm_wisdom_context` for `#payment-processor` antipatterns → Fix → `paradigm_sentinel_resolve`'
1127
+ C: '`paradigm_orchestrate_inline` to spin up an architect and security agent'
1128
+ D: '`paradigm_status` → `paradigm_navigate` → Read every file in payments/ → Eventually find the bug'
1129
+ E: '`paradigm_ripple` on `#payment-processor` → Fix everything that depends on it'
1130
+ correct: B
1131
+ explanation: 'At 2 AM with production down, you want the fastest path to resolution with full traceability: (1) `paradigm_sentinel_record` the incident with the error and stack trace — this starts the clock and provides symbolic context. (2) `paradigm_sentinel_patterns` to check if this is a KNOWN failure pattern with an existing resolution — this could save you hours. (3) `paradigm_wisdom_context` for `#payment-processor` to check if there are recorded antipatterns (e.g., ''null check required before accessing refund.id''). (4) Fix the issue with full context. (5) `paradigm_sentinel_resolve` to close the incident with the fix commit. Orchestration (C) is overkill for an emergency fix. Reading everything (D) burns time you don''t have.'
1132
+ slot: slot-050
1133
+ section: para-401
1134
+ - id: plsat-051
1135
+ scenario: Your team just held a meeting where the lead architect chose to switch from PostgreSQL to CockroachDB for the user service. No code was written yet — this is purely a strategic decision with rationale, alternatives considered, and dissent recorded in meeting notes.
1136
+ question: How should this be recorded in v6.0?
1137
+ choices:
1138
+ A: '`paradigm_lore_record({ type: ''decision'', ... })` — the lore system handles architectural decisions'
1139
+ B: '`paradigm_lore_record({ type: ''milestone'', ... })` — switching databases is a major event'
1140
+ C: '`paradigm_decision_record({ title, decision, rationale, participants, alternatives_considered })` — decisions live in the dedicated decision store at `.paradigm/decisions/`, with a companion lore `insight` auto-written for timeline coverage'
1141
+ D: '`paradigm_wisdom_record({ type: ''decision'', ... })` — wisdom captures team decisions'
1142
+ E: '`paradigm_lore_record({ type: ''human-note'', ... })` — a human made the decision'
1143
+ correct: C
1144
+ explanation: 'In v6.0 architectural decisions moved out of both the lore system (the `decision` lore type was removed) and the wisdom system (`paradigm_wisdom_record` no longer accepts `type: ''decision''`). They live in `.paradigm/decisions/` as `TD-*` entries recorded via `paradigm_decision_record`, which carries the full ADR shape: title, decision, rationale, participants with stances (proposed, supported, dissented, abstained), alternatives_considered (with rejected_because), symbols_affected, and a status lifecycle (active, superseded, deprecated). A companion lore entry of type `insight` is auto-written so the project timeline still surfaces when the decision was made. A and D both reference removed APIs — the storage layer rejects type:''decision'' lore entries with an error pointing at `paradigm_decision_record`.'
1145
+ slot: slot-051
1146
+ section: para-501
1147
+ - id: plsat-052
1148
+ scenario: 'An agent just finished a quick session where it fixed a typo in a README and updated one comment in a source file. Total files modified: 2 (README.md and src/utils/format.ts).'
1149
+ question: Should the agent record a lore entry before ending the session?
1150
+ choices:
1151
+ A: Yes — every session should be recorded regardless of scope
1152
+ B: No — 2 files is below the 3-file significance threshold, and the stop hook will not require it
1153
+ C: Yes — the source file modification triggers the recording requirement
1154
+ D: No — but only because README.md is not a source file
1155
+ E: It depends on whether the typo fix was in a critical component
1156
+ correct: B
1157
+ explanation: The lore recording trigger is 3+ modified source files. With only 2 files modified (and one being a README which is typically excluded from the source file count), this session is below the threshold. The stop hook will not block for a missing lore entry. Agents can still choose to record, but it is not enforced.
1158
+ slot: slot-052
1159
+ section: para-501
1160
+ - id: plsat-053
1161
+ scenario: Sentinel has recorded 5 incidents over the past week. Three involve `#payment-processor` with TypeError, one involves `#payment-processor` with NetworkError, and one involves `#auth-service` with TypeError. Sentinel groups incidents using a 0.6 similarity threshold.
1162
+ question: How many incident groups will Sentinel likely create?
1163
+ choices:
1164
+ A: 1 group — all 5 incidents involve errors in the same general area
1165
+ B: '2 groups — one for the 3 TypeError incidents in #payment-processor, and the auth TypeError stays separate because different component'
1166
+ C: 3 groups — one per unique (component, error type) combination
1167
+ D: 5 groups — each incident is unique
1168
+ E: '2 groups — one for all #payment-processor incidents (4), one for #auth-service (1)'
1169
+ correct: C
1170
+ explanation: Sentinel groups by symbolic similarity with a 0.6 threshold. The three `#payment-processor` + TypeError incidents share both component and error type — high similarity, one group. The `#payment-processor` + NetworkError shares the component but differs in error type — enough divergence for a separate group. The `#auth-service` + TypeError shares error type with group 1 but differs in component — separate group. Three distinct (component, error type) clusters yield 3 groups.
1171
+ slot: slot-053
1172
+ section: para-501
1173
+ - id: plsat-054
1174
+ scenario: A production error just occurred. You want to record it, check for known fixes, resolve it, and then create a new pattern so future occurrences are handled faster.
1175
+ question: What is the correct Sentinel tool sequence?
1176
+ choices:
1177
+ A: '`sentinel_add_pattern` → `sentinel_record` → `sentinel_resolve`'
1178
+ B: '`sentinel_triage` → `sentinel_record` → `sentinel_resolve` → `sentinel_add_pattern`'
1179
+ C: '`sentinel_record` → `sentinel_triage` → fix → `sentinel_resolve` → `sentinel_add_pattern`'
1180
+ D: '`sentinel_record` → `sentinel_add_pattern` → `sentinel_resolve`'
1181
+ E: '`sentinel_patterns` → `sentinel_record` → `sentinel_add_pattern` → `sentinel_resolve`'
1182
+ correct: C
1183
+ explanation: 'The correct lifecycle is: (1) `sentinel_record` — create the incident with error details and symbolic context. (2) `sentinel_triage` — view the incident with matched patterns and suggested resolutions. (3) Fix the issue using the context from triage. (4) `sentinel_resolve` — close the incident with the fix commit. (5) `sentinel_add_pattern` — capture the fix as a reusable pattern. You must record before you can triage, fix before you can resolve, and resolve before creating a pattern from the resolution.'
1184
+ slot: slot-054
1185
+ section: para-501
1186
+ - id: plsat-055
1187
+ scenario: 'You are configuring habits for your project. You want to ensure that agents always call `paradigm_ripple` before modifying symbols, and you want this enforced at the start of every task. The seed habit `ripple-before-modify` exists with `trigger: preflight` and `severity: advisory`.'
1188
+ question: Which trigger ensures the habit is evaluated before implementation begins?
1189
+ choices:
1190
+ A: '`on-stop` — checks compliance at the end of the session'
1191
+ B: '`on-commit` — checks before changes are committed'
1192
+ C: '`preflight` — evaluated before starting implementation'
1193
+ D: '`postflight` — evaluated after completing implementation'
1194
+ E: '`pre-write` — evaluated before each file edit'
1195
+ correct: C
1196
+ explanation: The `preflight` trigger evaluates habits before implementation begins — this is when discovery checks like `paradigm_ripple` should run. `postflight` is too late (implementation already happened), `on-stop` is the end of the session, and `on-commit` is at commit time. There is no `pre-write` trigger — the four triggers are preflight, postflight, on-commit, and on-stop.
1197
+ slot: slot-055
1198
+ section: para-501
1199
+ - id: plsat-056
1200
+ scenario: 'A project overrides the seed habit `verify-before-done` (originally `severity: warn`) to `severity: block` in `.paradigm/habits.yaml`. An agent finishes implementing a feature but does not call `paradigm_pm_postflight`. The stop hook runs.'
1201
+ question: What happens?
1202
+ choices:
1203
+ A: A warning is logged but the session completes — `warn` is the seed severity
1204
+ B: The session is blocked — the project override to `block` means the stop hook treats this as a blocking violation
1205
+ C: Nothing — habit severity only affects habit check output, not the stop hook
1206
+ D: The override is ignored because seed habits cannot be overridden
1207
+ E: The session completes but the next session receives a mandatory reminder
1208
+ correct: B
1209
+ explanation: 'Project overrides in `.paradigm/habits.yaml` take precedence over seed defaults. The three-layer merge (seed → global → project) means the project''s `severity: block` override replaces the seed''s `severity: warn`. When the stop hook evaluates on-stop habits, it finds a blocking violation because `verify-before-done` was not followed and its severity is now `block`. The session cannot complete until the agent runs `paradigm_pm_postflight`.'
1210
+ slot: slot-056
1211
+ section: para-501
1212
+ - id: plsat-057
1213
+ scenario: An agent has finished reading requirements and has a clear plan for implementing a new feature. It has not written any code yet. It wants to save a checkpoint in case the session crashes.
1214
+ question: Which checkpoint phase should it use?
1215
+ choices:
1216
+ A: '`implementing` — the agent is about to start implementing'
1217
+ B: '`planning` — the agent has a plan but has not started coding'
1218
+ C: '`validating` — the agent needs to validate its plan first'
1219
+ D: '`complete` — the planning phase is complete'
1220
+ E: '`ready` — the agent is ready to implement'
1221
+ correct: B
1222
+ explanation: 'The `planning` phase captures the state after requirements are understood and a plan exists but before any code is written. If the session crashes, recovery knows: the plan is set, coding has not started, here are the key decisions. `implementing` should only be used after code changes begin. `complete` is for when the entire task is finished. There is no `ready` phase — the four phases are planning, implementing, validating, and complete.'
1223
+ slot: slot-057
1224
+ section: para-501
1225
+ - id: plsat-058
1226
+ scenario: |-
1227
+ The stop hook has blocked your session with two violations:
1228
+ 1. "Modified source directories missing .purpose coverage: src/services/refund/"
1229
+ 2. "Lore entry expected: 4 source files modified, no lore recorded"
1230
+
1231
+ You need to unblock and complete your session.
1232
+ question: What is the correct remediation sequence?
1233
+ choices:
1234
+ A: Run `paradigm_reindex` to fix both violations automatically
1235
+ B: Create a .purpose file in `src/services/refund/`, record a lore entry with `paradigm_lore_record`, then run `paradigm_reindex`
1236
+ C: Delete the `.paradigm/.pending-review` file to clear the tracking
1237
+ D: Add `src/services/refund/` to the `.paradigm/config.yaml` skip list
1238
+ E: Call `paradigm_pm_postflight` which handles all violations
1239
+ correct: B
1240
+ explanation: 'Each violation requires a specific fix: (1) Create or update a .purpose file in or above `src/services/refund/` to provide coverage for the new directory. (2) Call `paradigm_lore_record` with a summary of the session since 4 files exceeds the 3-file threshold. (3) Run `paradigm_reindex` to rebuild the index with the new .purpose file. Deleting `.pending-review` (C) just hides the tracking — the stop hook would still detect uncovered directories. `paradigm_pm_postflight` (E) reports violations but doesn''t fix them.'
1241
+ slot: slot-058
1242
+ section: para-501
1243
+ - id: plsat-059
1244
+ scenario: |-
1245
+ You run `paradigm flow validate` on your project and receive this output:
1246
+
1247
+ ```
1248
+ ⚠ Circular Dependencies (1)
1249
+
1250
+ $order-flow → $inventory-flow → $order-flow
1251
+ ```
1252
+
1253
+ Both flows reference each other via `relatedFlows`.
1254
+ question: What is the best way to resolve this circular dependency?
1255
+ choices:
1256
+ A: Delete one of the flows — circular flows are always a design error
1257
+ B: Extract the shared logic into a new `$stock-check-flow` that both flows reference, breaking the cycle
1258
+ C: Ignore it — circular dependencies are just warnings and do not affect anything
1259
+ D: Rename the flows so the validator does not detect the cycle
1260
+ E: Move both flows into the same .purpose file to merge them
1261
+ correct: B
1262
+ explanation: The recommended resolution for circular flow dependencies is to extract shared logic into a separate flow. If $order-flow and $inventory-flow both need shared behavior, create a third flow (e.g., $stock-check-flow) that both reference unidirectionally. This eliminates the cycle while preserving the relationships. Deletion (A) loses documentation, ignoring (C) hides architectural coupling, and renaming (D) is a workaround that does not fix the underlying issue.
1263
+ slot: slot-059
1264
+ section: para-201
1265
+ variants:
1266
+ - id: plsat-059b
1267
+ scenario: |-
1268
+ Your project has three flows with these `relatedFlows` references:
1269
+ - `$checkout-flow` → `[$payment-flow]`
1270
+ - `$payment-flow` → `[$receipt-flow]`
1271
+ - `$receipt-flow` → `[$checkout-flow]`
1272
+
1273
+ You run `paradigm_flow_check({})` to validate all flows.
1274
+ question: What will the circular dependency detection report?
1275
+ choices:
1276
+ A: No issues — each flow only references one other flow
1277
+ B: Three separate circular dependencies, one for each flow
1278
+ C: 'One circular dependency: $checkout-flow → $payment-flow → $receipt-flow → $checkout-flow'
1279
+ D: A warning that flows should not have relatedFlows at all
1280
+ E: An error that three-flow cycles are not supported
1281
+ correct: C
1282
+ explanation: 'Paradigm''s circular dependency detector uses depth-first search to trace the full dependency graph. Starting from $checkout-flow, it follows: $checkout-flow → $payment-flow → $receipt-flow → $checkout-flow, detecting a single 3-node cycle. The cycle is normalized (starting from the lexicographically smallest node) and reported once, not three times.'
1283
+ - id: plsat-pg3-q1a
1284
+ scenario: ''
1285
+ question: An agent skips calling `paradigm_ripple` before modifying `#checkout-service` and then tries to end the session. What happens?
1286
+ choices:
1287
+ A: Advisory note is logged — ripple-before-modify defaults to advisory severity
1288
+ B: Warning is shown — the override upgrades it to warn
1289
+ C: 'Session is blocked — the override sets ripple-before-modify to severity: block'
1290
+ D: Nothing — ripple-before-modify only checks during preflight, not on-stop
1291
+ E: The habit is disabled because test-new-components is disabled
1292
+ correct: C
1293
+ explanation: 'The overrides section sets `ripple-before-modify` to `severity: block`. The seed habit''s trigger is `preflight`, but when severity is `block`, violations detected during preflight evaluation carry through to the stop hook as blocking violations. The agent cannot complete the session until it calls `paradigm_ripple` for the modified symbols.'
1294
+ slot: pg-habits-q1
1295
+ section: para-501
1296
+ passageId: passage-habits-review
1297
+ passage: |-
1298
+ Your team's `.paradigm/habits.yaml` for an e-commerce project:
1299
+
1300
+ ```yaml
1301
+ overrides:
1302
+ ripple-before-modify:
1303
+ severity: block
1304
+ explore-before-implement:
1305
+ severity: warn
1306
+ test-new-components:
1307
+ enabled: false
1308
+ record-lore-for-significant:
1309
+ severity: block
1310
+
1311
+ custom:
1312
+ - id: check-price-validation
1313
+ name: Validate Price Calculations
1314
+ description: Ensure price calculation tests exist for any payment-related changes
1315
+ category: testing
1316
+ trigger: postflight
1317
+ severity: warn
1318
+ check:
1319
+ type: tests-exist
1320
+ params:
1321
+ patterns: ["**/price*.test.*", "**/payment*.test.*"]
1322
+ enabled: true
1323
+ ```
1324
+
1325
+ The seed habits that are NOT shown in overrides retain their default values. The project has 14 seed habits plus 1 custom habit.
1326
+ - id: plsat-pg3-q2a
1327
+ scenario: ''
1328
+ question: The team disabled `test-new-components` but added a custom `check-price-validation` habit. What testing discipline does this configuration express?
1329
+ choices:
1330
+ A: No testing discipline — disabling the seed habit removes all test requirements
1331
+ B: Targeted testing — the team doesn't require tests for ALL components, but does require them for price/payment code specifically
1332
+ C: The custom habit replaces the seed habit entirely
1333
+ D: This is a configuration error — you cannot disable a seed habit and add a custom one in the same category
1334
+ E: Full testing — the custom habit covers everything the seed habit did
1335
+ correct: B
1336
+ explanation: 'Disabling `test-new-components` (which checks for test files globally with `**/*.test.*`) removes the blanket test requirement. The custom `check-price-validation` habit adds a targeted requirement: test files must exist specifically for price and payment code (`**/price*.test.*`, `**/payment*.test.*`). This is a deliberate choice: the team decided that testing everything is too strict, but payment/price logic is critical enough to enforce test coverage.'
1337
+ slot: pg-habits-q2
1338
+ section: para-501
1339
+ passageId: passage-habits-review
1340
+ passage: |-
1341
+ Your team's `.paradigm/habits.yaml` for an e-commerce project:
1342
+
1343
+ ```yaml
1344
+ overrides:
1345
+ ripple-before-modify:
1346
+ severity: block
1347
+ explore-before-implement:
1348
+ severity: warn
1349
+ test-new-components:
1350
+ enabled: false
1351
+ record-lore-for-significant:
1352
+ severity: block
1353
+
1354
+ custom:
1355
+ - id: check-price-validation
1356
+ name: Validate Price Calculations
1357
+ description: Ensure price calculation tests exist for any payment-related changes
1358
+ category: testing
1359
+ trigger: postflight
1360
+ severity: warn
1361
+ check:
1362
+ type: tests-exist
1363
+ params:
1364
+ patterns: ["**/price*.test.*", "**/payment*.test.*"]
1365
+ enabled: true
1366
+ ```
1367
+
1368
+ The seed habits that are NOT shown in overrides retain their default values. The project has 14 seed habits plus 1 custom habit.
1369
+ - id: plsat-pg3-q3a
1370
+ scenario: ''
1371
+ question: An agent modifies 5 source files including payment logic but does not record a lore entry. What combination of violations will the stop hook report?
1372
+ choices:
1373
+ A: 'One violation: missing lore entry (block severity)'
1374
+ B: 'Two violations: missing lore entry (block) and missing price validation tests (warn)'
1375
+ C: 'One violation: missing price validation tests only — lore is advisory by default'
1376
+ D: 'Three violations: missing lore, missing tests, and .purpose coverage'
1377
+ E: The lore violation alone blocks the session — other checks are not evaluated after a block
1378
+ correct: B
1379
+ explanation: 'The stop hook evaluates ALL checks independently. `record-lore-for-significant` is overridden to `severity: block` and 5 files exceeds the 3-file threshold — this blocks. `check-price-validation` has `trigger: postflight` and `severity: warn`, and if payment files were modified without matching test files, it warns. Both violations are reported. The stop hook blocks because at least one violation has `severity: block`, but it reports all violations so the agent can fix everything in one pass.'
1380
+ slot: pg-habits-q3
1381
+ section: para-501
1382
+ passageId: passage-habits-review
1383
+ passage: |-
1384
+ Your team's `.paradigm/habits.yaml` for an e-commerce project:
1385
+
1386
+ ```yaml
1387
+ overrides:
1388
+ ripple-before-modify:
1389
+ severity: block
1390
+ explore-before-implement:
1391
+ severity: warn
1392
+ test-new-components:
1393
+ enabled: false
1394
+ record-lore-for-significant:
1395
+ severity: block
1396
+
1397
+ custom:
1398
+ - id: check-price-validation
1399
+ name: Validate Price Calculations
1400
+ description: Ensure price calculation tests exist for any payment-related changes
1401
+ category: testing
1402
+ trigger: postflight
1403
+ severity: warn
1404
+ check:
1405
+ type: tests-exist
1406
+ params:
1407
+ patterns: ["**/price*.test.*", "**/payment*.test.*"]
1408
+ enabled: true
1409
+ ```
1410
+
1411
+ The seed habits that are NOT shown in overrides retain their default values. The project has 14 seed habits plus 1 custom habit.
1412
+ - id: plsat-060
1413
+ scenario: |-
1414
+ A project has these habits enabled:
1415
+ - `commit-message-symbols` (on-commit/advisory) — checks commit messages match `type(#symbol):` format and include a `Symbols:` trailer
1416
+ - `flow-coverage-for-multi-component` (postflight/advisory) — checks that changes spanning 3+ components have a documented $flow
1417
+
1418
+ An agent modifies `#auth-handler`, `#session-store`, `#login-page`, and `#password-reset` but does not create a $flow. The agent then commits with message: `fix: update auth logic`.
1419
+ question: Which habits are violated?
1420
+ choices:
1421
+ A: 'Only commit-message-symbols — the message lacks type(#symbol): format'
1422
+ B: Only flow-coverage — 4 components without a $flow
1423
+ C: 'Both: commit message lacks #symbol in parens and Symbols: trailer, plus 4 components touched without a flow'
1424
+ D: Neither — both are advisory and don't actually check anything
1425
+ E: Only flow-coverage — the commit message format is correct
1426
+ correct: C
1427
+ explanation: 'The commit message `fix: update auth logic` matches the conventional prefix `fix:` but lacks a #symbol in parentheses (should be `fix(#auth-handler):`) and has no `Symbols:` trailer. Additionally, 4 components were modified (>= 3 threshold) without a documented $flow. Both habits are violated — the advisory severity means they log notes rather than blocking.'
1428
+ slot: slot-060
1429
+ section: para-501
1430
+ variants:
1431
+ - id: plsat-060b
1432
+ scenario: |-
1433
+ A project enables `context-session-awareness` (preflight/advisory) and `aspect-anchors-valid` (postflight/advisory).
1434
+
1435
+ An agent starts a session, immediately begins modifying `~rate-limited` aspect without calling any context or session recovery tools. After modifying the aspect's anchor locations, the agent calls `paradigm_aspect_check` to verify the anchors are valid.
1436
+ question: What do the habit evaluations show?
1437
+ choices:
1438
+ A: Both followed — the agent did check the aspect
1439
+ B: 'context-session-awareness: skipped (no context tools called); aspect-anchors-valid: followed (paradigm_aspect_check was called)'
1440
+ C: Both skipped — advisory habits are always skipped
1441
+ D: 'context-session-awareness: followed (aspect_check counts as context); aspect-anchors-valid: skipped (anchors were modified)'
1442
+ E: Both partial — the agent did some work for each
1443
+ correct: B
1444
+ explanation: context-session-awareness checks if paradigm_session_health, paradigm_session_recover, or paradigm_session_checkpoint was called — paradigm_aspect_check does not count. aspect-anchors-valid checks if paradigm_aspect_check was called for touched aspects, which it was. So the first is skipped and the second is followed.
1445
+ - id: plsat-061
1446
+ scenario: |-
1447
+ Sentinel groups 8 incidents affecting `#payment-service` with these error messages:
1448
+ - 4 incidents: "Stripe API returned 429: rate limited"
1449
+ - 2 incidents: "Payment webhook timeout after 30s"
1450
+ - 2 incidents: "Connection reset by peer during payment callback"
1451
+
1452
+ The pattern suggester infers a resolution strategy from the grouped incidents.
1453
+ question: What strategy will the suggester infer?
1454
+ choices:
1455
+ A: fix-code — the default for any group of incidents
1456
+ B: retry — timeout and network-related errors dominate the group
1457
+ C: scale-up — rate limiting means the service needs more capacity
1458
+ D: rollback — the errors suggest a recent deployment broke something
1459
+ E: config-change — the 429 means the API key needs updating
1460
+ correct: B
1461
+ explanation: 'The strategy inference checks error messages for keywords. ''timeout'' and ''connection reset'' match the retry strategy (timeout, network keywords). While ''429: rate limited'' could suggest scale-up, the ''timeout'' keyword in 2 messages triggers the retry check first in the keyword priority order. The inference returns the first matching strategy, which is retry for timeout/network errors.'
1462
+ slot: slot-061
1463
+ section: para-301
1464
+ - id: plsat-062a
1465
+ scenario: |-
1466
+ A portal.yaml defines a gate `^subscription-required` with two locks:
1467
+
1468
+ ```yaml
1469
+ locks:
1470
+ - id: has-user
1471
+ keys:
1472
+ - expression: "req.user != null"
1473
+ - id: active-sub
1474
+ keys:
1475
+ - expression: "req.user.subscription.status === 'active'"
1476
+ - expression: "req.user.subscription.plan !== 'free'"
1477
+ ```
1478
+
1479
+ You run `paradigm portal test --gate ^subscription-required`.
1480
+ question: How many test cases does the gate lock introspection auto-generate?
1481
+ choices:
1482
+ A: 2 — one passing case and one failing case
1483
+ B: 3 — one passing case, one per-lock failure case for each of the 2 locks, but no empty entity case
1484
+ C: 4 — one passing case, one per-lock failure case for each of the 2 locks, and one empty entity case
1485
+ D: 5 — one case per key expression plus one empty entity case
1486
+ E: 1 — only the passing case with all properties populated
1487
+ correct: C
1488
+ explanation: 'Gate lock introspection generates: (1) a passing case with all properties populated from all key expressions, (2) one failure case per lock (omitting that lock''s required properties), and (3) an empty entity case that should always fail. With 2 locks, that''s 1 + 2 + 1 = 4 test cases.'
1489
+ slot: slot-062
1490
+ section: para-201
1491
+ variants:
1492
+ - id: plsat-062b
1493
+ scenario: You need a machine-readable export of your portal configuration for a CI audit pipeline. Your portal.yaml has 5 gates and 12 routes.
1494
+ question: Which command produces a structured export suitable for programmatic consumption in CI?
1495
+ choices:
1496
+ A: paradigm portal export --format json
1497
+ B: paradigm portal export --format csv
1498
+ C: paradigm portal export --format markdown
1499
+ D: paradigm doctor --json
1500
+ E: paradigm scan --verbose
1501
+ correct: A
1502
+ explanation: paradigm portal export --format json produces a structured JSON output with gates and routes arrays, ideal for CI pipelines. CSV is for spreadsheet analysis, markdown for documentation. paradigm doctor --json reports health checks, not portal config.
1503
+ - id: plsat-063
1504
+ scenario: You join a team working on a large codebase. Many source directories have code but no `.purpose` files documenting their components. You want to quickly generate draft documentation.
1505
+ question: What is the correct approach using Paradigm's lint tooling?
1506
+ choices:
1507
+ A: paradigm lint --fix — automatically creates .purpose files for all undocumented directories
1508
+ B: paradigm lint --auto-populate — scans source directories and suggests .purpose drafts, then paradigm lint --auto-populate --fix to write them
1509
+ C: paradigm scan --fix — rebuilds the index and creates missing .purpose files
1510
+ D: paradigm doctor --fix — finds missing documentation and generates stubs
1511
+ correct: B
1512
+ explanation: paradigm lint --auto-populate scans source directories (max depth 4) for undocumented components — directories containing source files but no .purpose file. Without --fix it reports suggestions; with --fix it writes draft .purpose files. paradigm lint --fix only fixes lint issues in existing .purpose files, it doesn't create new ones. scan and doctor don't generate .purpose files.
1513
+ slot: slot-063
1514
+ section: para-301
1515
+ - id: plsat-064a
1516
+ scenario: A team has both AGENTS.md and llms.txt in their Paradigm project. A new developer asks what each file is for.
1517
+ question: Which statement correctly distinguishes the two files?
1518
+ choices:
1519
+ A: AGENTS.md is for Claude, llms.txt is for all other LLMs
1520
+ B: AGENTS.md contains instructions (how to behave), llms.txt contains facts (what exists)
1521
+ C: llms.txt replaces AGENTS.md in Paradigm v2
1522
+ D: They contain the same information in different formats
1523
+ E: AGENTS.md is auto-generated but llms.txt must be hand-written
1524
+ correct: B
1525
+ explanation: AGENTS.md is prescriptive — it tells agents what tools to use, what conventions to follow, and what workflow to observe. llms.txt is descriptive — it tells agents what symbols exist, what flows are defined, and how the project is structured. Both are auto-generated by Paradigm (sync agents and sync-llms respectively) and serve distinct purposes.
1526
+ slot: slot-064
1527
+ section: para-401
1528
+ variants:
1529
+ - id: plsat-064b
1530
+ scenario: An AI agent spawned in isolation needs to orient itself before working on a task. It has access to AGENTS.md, MCP tools, and the full codebase.
1531
+ question: What is the most token-efficient orientation sequence?
1532
+ choices:
1533
+ A: Read all .purpose files, then read portal.yaml
1534
+ B: Read AGENTS.md → paradigm_session_recover → paradigm_navigate with context intent (~500 tokens total)
1535
+ C: paradigm_search for every symbol type → read matching files
1536
+ D: Read every file in .paradigm/ for full context
1537
+ E: Call paradigm_status repeatedly until context is sufficient
1538
+ correct: B
1539
+ explanation: 'The Fresh Context Principle: AGENTS.md provides instructions and conventions, paradigm_session_recover provides previous session context, and paradigm_navigate with context intent provides task-relevant files. Total cost: ~500 tokens — compared to thousands of tokens for file-reading approaches.'
1540
+ - id: plsat-065
1541
+ scenario: |-
1542
+ You have a `$checkout-flow` with these steps:
1543
+ 1. ^authenticated (gate)
1544
+ 2. #validate-cart (action)
1545
+ 3. #process-payment (action)
1546
+ 4. !order-placed (signal)
1547
+
1548
+ You run `paradigm flow diagram $checkout-flow`.
1549
+ question: In the generated Mermaid diagram, what shapes represent each step type?
1550
+ choices:
1551
+ A: All steps are rectangles with different colors
1552
+ B: Gates are diamonds, actions are rectangles, signals are rounded boxes
1553
+ C: Gates are hexagons, actions are circles, signals are parallelograms
1554
+ D: All steps are circles connected by labeled arrows
1555
+ E: Gates are rounded boxes, actions are diamonds, signals are rectangles
1556
+ correct: B
1557
+ explanation: 'Paradigm''s Mermaid diagram generator uses conventional flowchart shapes: diamond shapes (decision points) for gates, rectangles for actions, and rounded rectangles for signals. Gates also show deny paths when a failResponse or errorSignal is defined. Steps are color-coded: yellow for gates, blue for actions, green for signals.'
1558
+ slot: slot-065
1559
+ section: para-201
1560
+ - id: plsat-066
1561
+ scenario: Your MCP-connected agent calls `paradigm_search` for `#auth` twice within 10 seconds. The project has a ToolCache with a 30-second TTL configured.
1562
+ question: What happens on the second call?
1563
+ choices:
1564
+ A: The search runs again because each MCP call is stateless
1565
+ B: The cached result is returned instantly without re-scanning the index
1566
+ C: The cache is checked but always invalidated because search results may change
1567
+ D: The second call is queued until the first cache entry expires
1568
+ E: An error is returned because duplicate calls are rate-limited
1569
+ correct: B
1570
+ explanation: The ToolCache uses a time-based TTL (default 30 seconds). When the same tool is called with the same arguments within the TTL window, the cached result is returned immediately without re-executing the underlying scan. This saves significant compute for repeated discovery operations like search, status, and navigate.
1571
+ slot: slot-066
1572
+ section: para-401
1573
+ variants:
1574
+ - id: plsat-066b
1575
+ scenario: An agent calls `paradigm_reindex` to rebuild the static index after modifying several .purpose files. The project has ToolCache enabled.
1576
+ question: What happens to the ToolCache when reindex completes?
1577
+ choices:
1578
+ A: Nothing — the cache is independent of the index
1579
+ B: Only search-related cache entries are invalidated
1580
+ C: The entire cache is cleared to ensure fresh results from the rebuilt index
1581
+ D: Cache entries are marked stale but still served until they expire naturally
1582
+ E: The cache TTL is doubled to avoid redundant scans after reindex
1583
+ correct: C
1584
+ explanation: When paradigm_reindex completes successfully, it calls toolCache.clear() to invalidate ALL cached entries. This is critical because the reindex rebuilds the underlying data that search, navigate, and status tools depend on. Serving stale cached results after a reindex would return outdated symbol information.
1585
+ - id: plsat-067
1586
+ scenario: An agent has been working for 45 minutes, modifying 5 source files and touching symbols `#auth-middleware`, `^rate-limited`, and `!login-failed`. The session has 12 breadcrumbs recorded.
1587
+ question: What triggers auto-lore drafting?
1588
+ choices:
1589
+ A: Auto-lore drafts after every file modification regardless of count
1590
+ B: Auto-lore drafts when 3+ files are modified, generating a partial LoreEntry from session breadcrumbs
1591
+ C: Auto-lore drafts only when the agent explicitly calls paradigm_lore_record
1592
+ D: Auto-lore drafts at a fixed time interval (every 30 minutes)
1593
+ E: Auto-lore drafts only during the on-stop habit check
1594
+ correct: B
1595
+ explanation: The draftLoreFromBreadcrumbs() function generates a partial LoreEntry when 3+ files have been modified in a session. It extracts tool usage statistics from breadcrumbs, includes the symbols touched and files modified, and tags the draft with 'auto-draft' for review. The 3-file threshold ensures trivial edits don't generate noise.
1596
+ slot: slot-067
1597
+ section: para-501
1598
+ variants:
1599
+ - id: plsat-067b
1600
+ scenario: After a long coding session, the auto-lore system generates a draft entry. You inspect the draft and notice it has a tag you didn't add.
1601
+ question: What tag does auto-lore always apply to drafted entries?
1602
+ choices:
1603
+ A: '`auto-generated` — marking it as machine-created'
1604
+ B: '`auto-draft` — indicating it needs human review before finalization'
1605
+ C: '`session-log` — categorizing it as a session record'
1606
+ D: '`pending-review` — flagging it for team approval'
1607
+ E: '`unverified` — warning that the content may be incomplete'
1608
+ correct: B
1609
+ explanation: Auto-drafted lore entries are always tagged with 'auto-draft' to distinguish them from manually recorded entries. This tag signals that the entry was machine-generated from session breadcrumbs and should be reviewed for accuracy before being treated as authoritative project history.
1610
+ - id: plsat-068
1611
+ scenario: |-
1612
+ Your project's `.paradigm/config.yaml` contains:
1613
+ ```yaml
1614
+ limits:
1615
+ habitsCacheTtlMs: 60000
1616
+ threadTrailMax: 20
1617
+ breadcrumbsMax: 100
1618
+ ```
1619
+ question: What do these configurable limits control?
1620
+ choices:
1621
+ A: Maximum file sizes for paradigm-managed files
1622
+ B: Rate limits for MCP tool calls per session
1623
+ C: Tunable parameters for habits cache duration, thread trail depth, and breadcrumb history length
1624
+ D: Hard caps on the number of symbols, flows, and gates allowed
1625
+ E: Timeout durations for CLI commands
1626
+ correct: C
1627
+ explanation: 'The LimitsConfig in config.yaml allows projects to tune operational parameters: habitsCacheTtlMs controls how long habit definitions are cached (default 30000ms), threadTrailMax sets the maximum breadcrumbs shown in thread trail output (default 10), and breadcrumbsMax sets the maximum breadcrumbs stored per session. These defaults work for most projects but can be adjusted for larger codebases.'
1628
+ slot: slot-068
1629
+ section: para-501
1630
+ variants:
1631
+ - id: plsat-068b
1632
+ scenario: A large monorepo project finds that paradigm_search is running too frequently, consuming unnecessary compute. They want to increase the cache duration for MCP tool results.
1633
+ question: Which config.yaml field controls MCP tool cache duration?
1634
+ choices:
1635
+ A: '`limits.searchCacheTtlMs` — specific to search operations'
1636
+ B: '`limits.toolCacheTtlMs` — controls the ToolCache TTL for all cached MCP tools'
1637
+ C: '`limits.mcpTimeoutMs` — sets the MCP response timeout'
1638
+ D: '`cache.ttl` — global cache setting for all paradigm operations'
1639
+ E: '`limits.habitsCacheTtlMs` — since habits and tools share the same cache'
1640
+ correct: B
1641
+ explanation: The limits.toolCacheTtlMs field in config.yaml controls the TTL for the ToolCache that wraps paradigm_search, paradigm_status, and paradigm_navigate. The default is 30000ms (30 seconds). Increasing this value reduces redundant computations but may serve slightly stale results. It's separate from habitsCacheTtlMs which controls the habits definition cache.
1642
+ - id: plsat-069
1643
+ scenario: Your team uses Paradigm's Global Brain (`~/.paradigm/`) to share wisdom, lore, and history across projects. After a year, the global directory has grown to contain hundreds of old entries.
1644
+ question: How do you clean up old Global Brain entries?
1645
+ choices:
1646
+ A: Manually delete files from `~/.paradigm/` using `rm -rf`
1647
+ B: '`paradigm global clean --older-than 90d` removes files older than the specified duration'
1648
+ C: '`paradigm scan --prune` removes unused global entries'
1649
+ D: Global Brain entries are automatically pruned on each `paradigm shift`
1650
+ E: '`paradigm doctor --fix` cleans up stale global files'
1651
+ correct: B
1652
+ explanation: The `paradigm global clean` command scans ~/.paradigm/ directories (wisdom, lore, history, cache) for files older than the specified duration. The --older-than flag accepts human-readable durations like 90d, 30d, or 7d. Use --dry-run first to preview what would be deleted. This is safer than manual deletion because it respects directory structure and cleans up empty directories afterward.
1653
+ slot: slot-069
1654
+ section: para-501
1655
+ - id: plsat-070
1656
+ scenario: |-
1657
+ A Claude Code plugin's `hooks.json` includes:
1658
+ ```json
1659
+ {
1660
+ "compatibleVersions": {
1661
+ "min": "3.0.0",
1662
+ "max": "4.0.0"
1663
+ }
1664
+ }
1665
+ ```
1666
+ Your installed Paradigm CLI is version 3.1.6.
1667
+ question: What happens when you run `paradigm hooks install`?
1668
+ choices:
1669
+ A: Installation fails because 3.1.6 is not exactly 3.0.0 or 4.0.0
1670
+ B: Installation proceeds normally — 3.1.6 is within the compatible range
1671
+ C: A warning is shown but installation is blocked until you upgrade
1672
+ D: The plugin is downgraded to match version 3.0.0
1673
+ E: The compatibleVersions field is ignored during installation
1674
+ correct: B
1675
+ explanation: The plugin version compatibility check compares the installed Paradigm version against the min/max range in hooks.json. Since 3.1.6 >= 3.0.0 and 3.1.6 < 4.0.0, installation proceeds normally. If the version were outside the range (e.g., 2.9.0 or 4.1.0), a warning would be displayed advising the user to update their Paradigm version or the plugin.
1676
+ slot: slot-070
1677
+ section: para-401
1678
+ variants:
1679
+ - id: plsat-070b
1680
+ scenario: You're developing a Paradigm plugin and want to ensure it only works with Paradigm versions that support the habits system (introduced in v3.0).
1681
+ question: Where do you declare this version requirement?
1682
+ choices:
1683
+ A: In the plugin's `package.json` under `peerDependencies`
1684
+ B: 'In the plugin''s `hooks.json` under the `compatibleVersions` field with `min: "3.0.0"`'
1685
+ C: In the plugin's `.purpose` file under `dependencies`
1686
+ D: In the project's `.paradigm/config.yaml` under `plugins`
1687
+ E: Version requirements are not enforceable — plugins work with any version
1688
+ correct: B
1689
+ explanation: Plugin version compatibility is declared in the plugin's hooks.json file using the compatibleVersions field. Setting min to '3.0.0' ensures that paradigm hooks install will warn users running older versions that lack habits support. This check runs at the start of hook installation before any hooks are written.
1690
+ - id: plsat-071
1691
+ scenario: You want to record a lore entry that credits both the human developer and the AI agent that collaborated on a feature. The lore system supports co-authorship tracking.
1692
+ question: Which field on a LoreEntry captures AI collaboration?
1693
+ choices:
1694
+ A: '`author` — set to the AI agent''s name'
1695
+ B: '`assistedBy` — with type (''agent'', ''tool'', or ''human''), id, and optional role'
1696
+ C: '`contributors` — an array of all participant names'
1697
+ D: '`metadata.aiModel` — storing the model name used'
1698
+ E: '`tags` — add an ''ai-assisted'' tag'
1699
+ correct: B
1700
+ explanation: The assistedBy field on LoreEntry provides structured co-authorship tracking. It records the type of assistant (agent, tool, or human), their identifier (e.g., 'claude-opus-4', 'copilot'), and an optional role description. The author field remains the human developer, while assistedBy captures the AI collaboration context for project history.
1701
+ slot: slot-071
1702
+ section: para-501
1703
+ variants:
1704
+ - id: plsat-071b
1705
+ scenario: Your team reviews lore entries from the past month and wants to understand how much AI assistance was involved in recent changes.
1706
+ question: How does the `assistedBy` field help with this analysis?
1707
+ choices:
1708
+ A: It tracks token usage per AI interaction
1709
+ B: It records the AI's confidence score for each change
1710
+ C: It provides structured data (type, id, role) showing which AI tools or agents assisted each recorded session
1711
+ D: It measures the percentage of code written by AI vs human
1712
+ E: It links to the AI conversation transcript
1713
+ correct: C
1714
+ explanation: 'The assistedBy field captures three dimensions of AI collaboration: type (was it an agent like Claude, a tool like Copilot, or a human pair-programmer?), id (which specific model or tool?), and role (what was their contribution — implementation, review, planning?). This structured data enables teams to analyze collaboration patterns across their lore timeline.'
1715
+ - id: plsat-072
1716
+ scenario: A project has no `limits` section in `.paradigm/config.yaml`. An agent calls tools that rely on configurable limits — habits cache, thread trail, and ToolCache.
1717
+ question: What values are used when limits are not configured?
1718
+ choices:
1719
+ A: All limits are set to 0 (unlimited)
1720
+ B: An error is thrown requiring explicit configuration
1721
+ C: 'Sensible defaults: habitsCacheTtlMs=30000, threadTrailMax=10, toolCacheTtlMs=30000, breadcrumbsMax=unlimited'
1722
+ D: Limits are inherited from the Global Brain (~/.paradigm/) configuration
1723
+ E: Each tool prompts the user to set a limit on first use
1724
+ correct: C
1725
+ explanation: 'All configurable limits have sensible defaults that match the pre-configuration behavior: habits cache refreshes every 30 seconds, thread trail shows the last 10 breadcrumbs, ToolCache entries expire after 30 seconds. These defaults work well for most projects. The limits section in config.yaml is entirely optional — only override when you have a specific need.'
1726
+ slot: slot-072
1727
+ section: para-501
1728
+ - id: plsat-073
1729
+ scenario: |-
1730
+ An agent working on a complex feature calls these MCP tools in sequence:
1731
+ 1. `paradigm_status` (cached)
1732
+ 2. `paradigm_search` for `#auth` (cached)
1733
+ 3. Edits 3 files, adds a new component
1734
+ 4. `paradigm_reindex`
1735
+ 5. `paradigm_search` for `#auth` again
1736
+ question: Does step 5 return the updated results including the new component?
1737
+ choices:
1738
+ A: No — the search cache still has the old results from step 2
1739
+ B: Yes — reindex at step 4 clears all caches, so step 5 runs a fresh search against the rebuilt index
1740
+ C: Only if 30 seconds have passed since step 2
1741
+ D: Only if the agent explicitly called toolCache.clear()
1742
+ E: The search always bypasses cache after a write operation
1743
+ correct: B
1744
+ explanation: 'The reindex operation at step 4 has two effects: it rebuilds the static index from .purpose files and clears the entire ToolCache. This means step 5 performs a fresh search against the newly rebuilt index, which includes the new component. This cache-invalidation-on-reindex pattern ensures that discovery tools always reflect the current state after structural changes.'
1745
+ slot: slot-073
1746
+ section: para-401
1747
+ - id: plsat-074
1748
+ scenario: 'An auto-lore draft is generated from session breadcrumbs after modifying 6 files. The breadcrumbs show: 4 Edit tool calls, 2 Write tool calls, 8 Read tool calls, 3 paradigm_navigate calls.'
1749
+ question: What information does the auto-lore draft extract from these breadcrumbs?
1750
+ choices:
1751
+ A: Only the file paths that were modified
1752
+ B: A complete diff of all code changes
1753
+ C: Tool usage statistics (edit count, write count, read count) plus modified files and symbols touched
1754
+ D: The full text of every tool call and response
1755
+ E: Only the symbols referenced in paradigm_navigate calls
1756
+ correct: C
1757
+ explanation: The auto-lore drafting function analyzes breadcrumbs to extract tool usage statistics — counting edits, writes, reads, and paradigm tool calls. It combines this with the list of modified files and symbols touched to generate a summary. The draft doesn't include full diffs or response text, keeping the lore entry concise and focused on what happened rather than how.
1758
+ slot: slot-074
1759
+ section: para-501
1760
+ variants:
1761
+ - id: plsat-074b
1762
+ scenario: An agent completes a task that modified only 2 files. The habits system runs the on-stop check.
1763
+ question: Will auto-lore drafting generate an entry?
1764
+ choices:
1765
+ A: Yes — any file modification triggers auto-lore
1766
+ B: No — auto-lore requires 3+ modified files to trigger
1767
+ C: Yes — but only if the session lasted longer than 15 minutes
1768
+ D: No — auto-lore only runs during postflight, not on-stop
1769
+ E: It depends on the project's limits.breadcrumbsMax setting
1770
+ correct: B
1771
+ explanation: 'Auto-lore drafting has a 3-file minimum threshold. Modifying only 2 files does not trigger a draft because such small changes are typically routine fixes that don''t warrant project history entries. This threshold aligns with the lore recording decision tree: ''Did I modify 3+ source files? YES → Record lore.'' The threshold prevents noise in project history.'
1772
+ - id: plsat-075
1773
+ scenario: |-
1774
+ You run `paradigm global clean --older-than 30d --dry-run` and see:
1775
+ ```
1776
+ Would delete 23 files from wisdom/
1777
+ Would delete 45 files from lore/
1778
+ Would delete 12 files from history/
1779
+ Would delete 0 files from cache/
1780
+ ```
1781
+ question: What is the safest next step?
1782
+ choices:
1783
+ A: Run `paradigm global clean --older-than 30d` to delete all 80 files immediately
1784
+ B: Review the specific files listed, then run without --dry-run if the deletions look correct
1785
+ C: Run `paradigm global clean --older-than 7d` to be more aggressive
1786
+ D: Delete the `~/.paradigm/` directory entirely since most files are old
1787
+ E: Skip cleanup — 80 files is too many to safely remove
1788
+ correct: B
1789
+ explanation: 'The --dry-run flag exists specifically to preview destructive operations. The safest workflow is: (1) dry-run to see what would be deleted, (2) review the file list for anything you want to keep, (3) run without --dry-run once satisfied. Global Brain files contain cross-project wisdom and lore that may be valuable — always review before bulk deletion.'
1790
+ slot: slot-075
1791
+ section: para-501
1792
+ - id: plsat-076
1793
+ scenario: Your project uses both the ToolCache (for MCP tool results) and the habits cache (for habit definitions). Both have configurable TTLs.
1794
+ question: Why are these two separate caches rather than one unified cache?
1795
+ choices:
1796
+ A: Historical accident — they were built by different teams
1797
+ B: 'They cache different data types with different invalidation needs: tool results change on reindex, habit definitions change on file edits'
1798
+ C: Performance — two smaller caches are faster than one large cache
1799
+ D: Security — MCP tool results must be isolated from habit definitions
1800
+ E: They are the same cache with different configuration keys
1801
+ correct: B
1802
+ explanation: The ToolCache caches MCP tool results (search, navigate, status) and is invalidated on reindex when the underlying index changes. The habits cache stores parsed habit definitions from habits.yaml and is invalidated when the file's modification time changes. These fundamentally different invalidation strategies require separate cache implementations — flushing all habit definitions because a .purpose file changed would be wasteful, and vice versa.
1803
+ slot: slot-076
1804
+ section: para-401
1805
+ - id: plsat-077
1806
+ scenario: You're configuring a large monorepo with 500+ symbols. Sessions often span 30+ minutes with many breadcrumbs. You want to optimize the Paradigm configuration.
1807
+ question: Which limits configuration would be most appropriate?
1808
+ choices:
1809
+ A: Set all limits to maximum values for the largest possible buffers
1810
+ B: Increase threadTrailMax to 25 and toolCacheTtlMs to 60000 for the larger codebase, keep other defaults
1811
+ C: Decrease all TTLs to 5000ms to ensure data is always fresh
1812
+ D: Remove the limits section entirely and rely on defaults
1813
+ E: Set breadcrumbsMax to 10 to save memory
1814
+ correct: B
1815
+ explanation: For large monorepos, increasing threadTrailMax (from default 10 to 25) provides more session context for complex tasks, and increasing toolCacheTtlMs (from 30s to 60s) reduces redundant index scans across the larger symbol space. Other defaults work well regardless of project size. Setting TTLs too low causes excessive recomputation, while setting breadcrumbsMax too low loses valuable session context.
1816
+ slot: slot-077
1817
+ section: para-501
1818
+ - id: plsat-078
1819
+ scenario: 'Your project has aspects categorized as rules, decisions, constraints, configurations, and invariants. A new aspect states: ''API response payloads must not exceed 5MB.'' A developer is unsure which category to assign.'
1820
+ question: Which aspect category is correct for this aspect?
1821
+ choices:
1822
+ A: '`rule` — because it uses ''must not'', indicating a mandatory pattern'
1823
+ B: '`constraint` — because it defines a quantitative limit (5MB) on system behavior'
1824
+ C: '`configuration` — because the 5MB value could be changed per environment'
1825
+ D: '`invariant` — because it must always hold true'
1826
+ E: '`decision` — because someone decided 5MB was the right limit'
1827
+ correct: B
1828
+ explanation: 'The key indicator is the quantitative limit: ''5MB''. Constraints define measurable boundaries on system behavior — file sizes, rate limits, timeouts, quotas. While ''must not exceed'' sounds like a rule, the category inference system prioritizes ''limit'', ''maximum'', ''cannot exceed'' keywords for `constraint`. A rule would be a pattern without a specific numeric boundary (e.g., ''all responses must include request IDs''). Configuration would apply if the value explicitly varies by environment.'
1829
+ slot: slot-078
1830
+ section: para-501
1831
+ variants:
1832
+ - id: plsat-078b
1833
+ scenario: 'An aspect definition reads: ''The team decided to use PostgreSQL over MongoDB for the user service due to relational query requirements.'' No category field is explicitly set.'
1834
+ question: What category will Paradigm's category inference assign?
1835
+ choices:
1836
+ A: '`rule` — because it implies PostgreSQL must be used'
1837
+ B: '`constraint` — because it limits the database technology'
1838
+ C: '`decision` — because the description contains ''decided'' and ''chose'''
1839
+ D: '`configuration` — because the database choice is a deployment setting'
1840
+ E: '`invariant` — because the database choice should never change'
1841
+ correct: C
1842
+ explanation: Category inference uses keyword matching on the description. Words like 'decided', 'chosen', 'selected', 'opted' trigger the `decision` category. The description explicitly says 'The team decided to use PostgreSQL over MongoDB' — this is a textbook architectural decision with rationale.
1843
+ - id: plsat-079
1844
+ scenario: |-
1845
+ Your aspect graph has the following edges:
1846
+ - `~token-expiry-24h` --depends-on--> `~jwt-signing-rs256`
1847
+ - `~jwt-signing-rs256` --enforced-by--> `#auth-middleware`
1848
+ - `~cache-aggressively` --contradicts--> `~always-fresh-data`
1849
+ - `~rate-limit-v2` --supersedes--> `~rate-limit-v1`
1850
+
1851
+ You need to modify `~jwt-signing-rs256` to change the signing algorithm.
1852
+ question: Which aspect will paradigm_ripple surface as impacted through the 'depends-on' edge?
1853
+ choices:
1854
+ A: '`~cache-aggressively` — because it has a contradicts edge in the same graph'
1855
+ B: '`~token-expiry-24h` — because it depends-on the aspect being modified'
1856
+ C: '`~rate-limit-v2` — because it supersedes another aspect'
1857
+ D: '`#auth-middleware` — because it enforces the aspect'
1858
+ E: All four aspects — ripple follows all edge types equally
1859
+ correct: B
1860
+ explanation: Ripple follows dependency edges to discover indirect impacts. `~token-expiry-24h` has a `depends-on` edge to `~jwt-signing-rs256`, meaning changes to the signing algorithm may affect token expiry behavior. `#auth-middleware` has an `enforced-by` edge (reverse direction — it enforces the aspect, but the aspect doesn't depend on it for correctness). The contradicts and supersedes edges involve unrelated aspects.
1861
+ slot: slot-079
1862
+ section: para-501
1863
+ - id: plsat-080
1864
+ scenario: |-
1865
+ You run `paradigm_aspect_search({ query: 'jwt expiry' })` and get three results:
1866
+ - Tier 1 (learned): `~token-expiry-24h` (weight: 3.0)
1867
+ - Tier 2 (FTS5): `~session-timeout-30m` (BM25: 0.7)
1868
+ - Tier 3 (fuzzy): `~jwt-refresh-rotation` (distance: 2)
1869
+
1870
+ The Tier 1 result is exactly what you need.
1871
+ question: What should you do to reinforce this search mapping?
1872
+ choices:
1873
+ A: Nothing — Tier 1 results are already reinforced by being in the search_weights table
1874
+ B: 'Call `paradigm_aspect_confirm({ query: ''jwt expiry'', aspectId: ''token-expiry-24h'' })` to add +1.0 weight and decay the others'
1875
+ C: Manually update the search_weights SQLite table to increase the weight
1876
+ D: 'Call `paradigm_aspect_get({ aspectId: ''token-expiry-24h'' })` to register a direct access'
1877
+ E: Call `paradigm_reindex` to rebuild the learned mappings
1878
+ correct: B
1879
+ explanation: paradigm_aspect_confirm is the feedback mechanism for the learning system. Calling it with the query and selected aspect ID adds +1.0 to the confirmed result's weight (3.0 → 4.0) and decays all other results for that query by *0.95. This reinforces the correct mapping. Reindex rebuilds the graph but does not affect search_weights — those persist across reindexes. Direct access via aspect_get records a heatmap entry but does not affect search learning.
1880
+ slot: slot-080
1881
+ section: para-501
1882
+ - id: plsat-081
1883
+ scenario: The aspect graph materialization pipeline runs during `paradigm_reindex`. It processes aspects from .purpose files through a specific sequence of steps.
1884
+ question: What is the correct order of the five-step materialization pipeline?
1885
+ choices:
1886
+ A: materialize aspects → open graph → materialize lore links → infer lore edges → close graph
1887
+ B: open graph → materialize aspects → materialize lore links → infer lore edges → close graph
1888
+ C: open graph → infer lore edges → materialize aspects → materialize lore links → close graph
1889
+ D: materialize aspects → materialize lore links → open graph → infer lore edges → close graph
1890
+ E: open graph → materialize lore links → materialize aspects → close graph → infer lore edges
1891
+ correct: B
1892
+ explanation: 'The materialization pipeline follows a strict order: (1) openAspectGraph opens or creates the SQLite database and clears all tables. (2) materializeAspects reads .purpose files and writes aspects, anchors, and explicit/inferred edges. (3) materializeLoreLinks creates entries connecting aspects to their referenced lore entries. (4) inferLoreEdges scans for shared lore references between aspects and creates learned edges. (5) closeAspectGraph commits changes, runs ANALYZE, and closes the connection.'
1893
+ slot: slot-081
1894
+ section: para-501
1895
+ - id: plsat-082
1896
+ scenario: |-
1897
+ You define an aspect with an `applies-to` reference to a component:
1898
+
1899
+ ```yaml
1900
+ ~audit-required:
1901
+ description: Financial operations must produce audit logs
1902
+ applies-to: ["#payment-service"]
1903
+ edges:
1904
+ - target: "#audit-middleware"
1905
+ relation: enforced-by
1906
+ ```
1907
+ question: What edges will the materialization pipeline create, and what are their origins and weights?
1908
+ choices:
1909
+ A: 'One edge: `enforced-by` to `#audit-middleware` with origin `explicit` and weight 1.0'
1910
+ B: 'Two edges: `enforced-by` to `#audit-middleware` (origin: explicit, weight: 1.0) and an inferred edge to `#payment-service` (origin: inferred, weight: 0.5)'
1911
+ C: 'Two edges: both with origin `explicit` and weight 1.0'
1912
+ D: 'Three edges: one explicit, one inferred, and one learned'
1913
+ E: 'One edge: `applies-to` is documentation only and does not generate edges'
1914
+ correct: B
1915
+ explanation: The materialization pipeline creates edges from two sources. The explicit `edges` field generates an edge to `#audit-middleware` with origin `explicit` and weight 1.0. The `applies-to` reference generates an inferred edge to `#payment-service` with origin `inferred` and weight 0.5. Inferred edges have lower weight because they represent a weaker relationship than explicitly declared edges.
1916
+ slot: slot-082
1917
+ section: para-501
1918
+ - id: plsat-083
1919
+ scenario: |-
1920
+ An aspect `~session-timeout-30m` was created 3 months ago with an anchor at `src/middleware/session.ts:15-25`. Since then, a developer refactored the file and the session timeout logic is now at lines 40-55. The aspect definition was not updated.
1921
+
1922
+ You run `paradigm_aspect_drift({ aspectId: 'session-timeout-30m' })`.
1923
+ question: What will the drift detection report?
1924
+ choices:
1925
+ A: No drift — the file still exists, so the anchor is valid
1926
+ B: 'Drift detected: the SHA-256 content hash of lines 15-25 no longer matches the stored hash, indicating the code at the anchored location has changed'
1927
+ C: An error — the anchor line range exceeds the current file length
1928
+ D: Partial drift — only some lines within the range changed
1929
+ E: No drift — drift detection only checks whether the file exists, not its contents
1930
+ correct: B
1931
+ explanation: Drift detection computes a SHA-256 hash of the current code at the anchored line range (15-25) and compares it to the hash stored during materialization. Since the timeout logic moved to different lines, the code at lines 15-25 is now different — the hashes will not match, and drift is reported. The fix is to update the anchor to `src/middleware/session.ts:40-55` to point to the new location of the timeout logic.
1932
+ slot: slot-083
1933
+ section: para-501
1934
+ - id: plsat-084
1935
+ scenario: |-
1936
+ You run `paradigm_aspect_suggest_scan({ filePath: 'src/auth/jwt.ts' })` on a file containing:
1937
+
1938
+ ```typescript
1939
+ const TOKEN_EXPIRY = 86400; // 24 hours in seconds
1940
+ const MAX_REFRESH_ATTEMPTS = 3;
1941
+ if (process.env.NODE_ENV === 'production') { ... }
1942
+ const EMAIL_REGEX = /^[a-zA-Z0-9+_.-]+@[a-zA-Z0-9.-]+$/;
1943
+ ```
1944
+ question: Which of the 8 built-in detectors will fire for each pattern?
1945
+ choices:
1946
+ A: All four lines trigger the 'magic numbers' detector only
1947
+ B: 'TOKEN_EXPIRY: time values; MAX_REFRESH_ATTEMPTS: magic numbers; process.env: environment checks; EMAIL_REGEX: regex patterns'
1948
+ C: 'TOKEN_EXPIRY: magic numbers; MAX_REFRESH_ATTEMPTS: rate limits; process.env: feature flags; EMAIL_REGEX: hardcoded strings'
1949
+ D: All four lines trigger the 'hardcoded strings' detector
1950
+ E: 'TOKEN_EXPIRY: configuration; MAX_REFRESH_ATTEMPTS: constraint; process.env: environment checks; EMAIL_REGEX: assertion guards'
1951
+ correct: B
1952
+ explanation: 'Each pattern matches a specific detector: (1) 86400 with a comment mentioning ''24 hours'' matches the time values detector (durations, timeouts, TTLs, expiry). (2) MAX_REFRESH_ATTEMPTS = 3 is a numeric literal that is not 0 or 1, matching the magic numbers detector. (3) process.env.NODE_ENV matches the environment checks detector. (4) The regular expression literal matches the regex patterns detector. The detectors are specialized for these exact pattern types.'
1953
+ slot: slot-084
1954
+ section: para-501
1955
+ - id: plsat-085
1956
+ scenario: |-
1957
+ Two aspects in your project both reference lore entry `L-2026-01-15-003`:
1958
+ - `~token-expiry-24h` has `lore: [L-2026-01-15-003]`
1959
+ - `~refresh-token-rotation` has `lore: [L-2026-01-15-003]`
1960
+
1961
+ Neither aspect has an explicit edge to the other.
1962
+ question: What happens during the `inferLoreEdges` step of materialization?
1963
+ choices:
1964
+ A: Nothing — edges are only created from explicit YAML definitions
1965
+ B: A learned edge is created between the two aspects with origin 'learned' and weight proportional to shared lore references
1966
+ C: Both aspects are merged into a single aspect
1967
+ D: A lore_links entry is created but no edge is generated
1968
+ E: An explicit edge with weight 1.0 is created between them
1969
+ correct: B
1970
+ explanation: The inferLoreEdges step scans the lore_links table for aspects that share lore references. When two aspects both reference the same lore entry, a learned edge is created between them with origin 'learned' and a weight proportional to the number of shared references. This discovers implicit relationships — aspects that were discussed in the same lore context are likely related even without explicit edges.
1971
+ slot: slot-085
1972
+ section: para-501
1973
+ - id: plsat-086
1974
+ scenario: |-
1975
+ Your project has three edge origins in the aspect graph:
1976
+ - Explicit edges (weight 1.0) from YAML `edges` fields
1977
+ - Inferred edges (weight 0.5) from `applies-to` references
1978
+ - Learned edges from shared lore references
1979
+
1980
+ During recursive ripple, the BFS traverses: an explicit edge (1.0) then an inferred edge (0.5) then another inferred edge (0.5).
1981
+ question: What is the cumulative path weight after these three hops, and will it be pruned by the default minWeight threshold?
1982
+ choices:
1983
+ A: 'Weight: 2.0 (additive) — well above the 0.1 threshold, not pruned'
1984
+ B: 'Weight: 0.25 (multiplicative: 1.0 * 0.5 * 0.5) — above 0.1, not pruned'
1985
+ C: 'Weight: 0.5 (only the weakest edge counts) — above 0.1, not pruned'
1986
+ D: 'Weight: 0.0625 (multiplicative: 1.0 * 0.25 * 0.25) — below 0.1, pruned'
1987
+ E: 'Weight: 0.167 (average of all three) — above 0.1, not pruned'
1988
+ correct: B
1989
+ explanation: 'Recursive ripple uses multiplicative decay: the weight at each hop is multiplied by the edge weight. Starting at 1.0, after an explicit edge (1.0): 1.0 * 1.0 = 1.0. After an inferred edge (0.5): 1.0 * 0.5 = 0.5. After another inferred edge (0.5): 0.5 * 0.5 = 0.25. The cumulative weight 0.25 is above the default minWeight threshold of 0.1, so this path is NOT pruned. One more inferred edge would drop it to 0.125, still above threshold. Two more would reach 0.0625, below threshold and pruned.'
1990
+ slot: slot-086
1991
+ section: para-501
1992
+ - id: plsat-087
1993
+ scenario: Your project's aspect graph SQLite database at `.paradigm/aspect-graph.db` has six tables. During a governance review, you want to understand which aspects are discovered most frequently and how they are typically found.
1994
+ question: Which table stores this information, and what are its columns?
1995
+ choices:
1996
+ A: The `aspects` table with an `access_count` column
1997
+ B: The `edges` table with a `traversal_count` column
1998
+ C: 'The `heatmap` table with columns: aspect_id, access_type, count, and last_accessed'
1999
+ D: The `search_weights` table with a `hit_count` column
2000
+ E: The `anchors` table with a `reference_count` column
2001
+ correct: C
2002
+ explanation: 'The `heatmap` table tracks aspect access patterns with four columns: `aspect_id` (which aspect), `access_type` (how it was discovered: search, ripple, navigate, or direct), `count` (frequency), and `last_accessed` (timestamp). This table powers `paradigm_aspect_heatmap` and reveals whether aspects are typically found via search, encountered during ripple analysis, discovered through navigation, or accessed by direct ID lookup.'
2003
+ slot: slot-087
2004
+ section: para-501
2005
+ - id: plsat-088
2006
+ scenario: You want to extend `paradigm_aspect_suggest_scan` to detect SOC2 compliance annotations specific to your project. The built-in 8 detectors do not cover this pattern.
2007
+ question: How do you add a custom detector?
2008
+ choices:
2009
+ A: Edit the Paradigm source code to add a 9th built-in detector
2010
+ B: Define a custom detector in `.paradigm/aspect-detectors.yaml` with regex patterns, language filters, and suggested category/severity
2011
+ C: Create a `.paradigm/plugins/soc2-detector.js` plugin file
2012
+ D: Add a `detectors` section to `.paradigm/config.yaml`
2013
+ E: Custom detectors are not supported — use paradigm_aspect_search instead
2014
+ correct: B
2015
+ explanation: Custom detectors are defined in `.paradigm/aspect-detectors.yaml`. Each detector specifies an id, name, description, regex patterns with language filters, and suggestions for category, severity, and tags. Custom detectors are loaded alongside the built-in 8 during `paradigm_aspect_suggest_scan`, extending the detection system without modifying Paradigm's source code.
2016
+ slot: slot-088
2017
+ section: para-501
2018
+ - id: plsat-089
2019
+ scenario: 'During a quarterly governance review of a project with 150 aspects, the heatmap shows 40 aspects with zero access. The drift audit reveals 12 drifted anchors. The category distribution is: 95 rules, 20 constraints, 15 configurations, 12 decisions, 8 invariants.'
2020
+ question: What does the category distribution suggest about this project's aspect governance?
2021
+ choices:
2022
+ A: The distribution is healthy — rules should always be the majority
2023
+ B: The project may be over-documenting constraints as rules, and under-documenting strategic decisions — review whether some 'rules' are actually constraints or decisions
2024
+ C: The project needs more invariants to balance the distribution
2025
+ D: Configuration aspects should equal rules in a well-governed project
2026
+ E: The 40 zero-access aspects indicate the project should reduce to 110 aspects
2027
+ correct: B
2028
+ explanation: A 63% concentration in the `rule` category (95 out of 150) suggests over-classification. Many numeric limits (which should be constraints) and architectural choices (which should be decisions) may be categorized as rules. The low decision count (12) is a red flag — a project with 150 aspects likely made more than 12 strategic decisions. The governance review should reclassify mistyped aspects and document missing decisions. Zero-access aspects (40) are a separate concern requiring individual evaluation.
2029
+ slot: slot-089
2030
+ section: para-501
2031
+ - id: plsat-090
2032
+ scenario: 'Your team uses Paradigm task management to track work across context windows. An agent creates three tasks on 2026-03-10: a high-priority auth bug, a medium-priority docs update, and a low-priority refactor. Later that day, a fourth task is created. The next morning (2026-03-11), a new agent session starts and calls `paradigm_session_recover`.'
2033
+ question: What task ID is assigned to the fourth task on 2026-03-10, and how are tasks surfaced in the new session?
2034
+ choices:
2035
+ A: T-2026-03-10-004. All four tasks are fully displayed in the recovery payload.
2036
+ B: T-2026-03-10-004. The top 5 open tasks sorted by priority are surfaced in the session recovery, so all four appear (high first, then medium, then low).
2037
+ C: T-004. Only tasks explicitly pinned to the session are recovered.
2038
+ D: T-2026-03-11-001. Task IDs reset per calendar day, so recovery re-numbers them.
2039
+ E: T-2026-03-10-004. Tasks are not surfaced automatically — the agent must call `paradigm_task_list` to see them.
2040
+ correct: B
2041
+ explanation: 'Task IDs follow the format T-YYYY-MM-DD-NNN with per-date sequential numbering, so the fourth task on 2026-03-10 is T-2026-03-10-004. Tasks are designed to survive context windows — on session recovery, the top 5 open tasks (sorted by priority: high > medium > low) are automatically surfaced. Since there are only four open tasks, all four appear. This ensures continuity without requiring the agent to manually query task state.'
2042
+ slot: slot-090
2043
+ section: para-501
2044
+ - id: plsat-091
2045
+ scenario: 'An agent finishes a debugging session and wants to record the root cause and resolution as a lasting insight, grouped under the `arc:auth-hardening` arc. The agent calls `paradigm_lore_record` with `type: "insight"`, `tags: ["arc:auth-hardening", "assessment:insight"]`, a summary, and a body with the detailed analysis. The author is `ascend` and the timestamp is `2026-04-02T16:30:00Z`.'
2046
+ question: Where is the entry stored, and how does the arc grouping work?
2047
+ choices:
2048
+ A: Stored in `.paradigm/assessments/arcs/arc-auth-hardening/entries/` — arcs have their own storage directories.
2049
+ B: Stored in `.paradigm/lore/entries/2026-04-02/` as a `.lore` file — arcs are just tag prefixes, not separate storage.
2050
+ C: Stored in `.paradigm/lore/arcs/auth-hardening/` — arc entries get their own subdirectory.
2051
+ D: Stored in `.paradigm/lore/entries/` root directory — no date partitioning for arc entries.
2052
+ E: Stored in both `.paradigm/lore/` and `.paradigm/assessments/` — the system maintains backward compatibility.
2053
+ correct: B
2054
+ explanation: 'All lore entries are stored in `.paradigm/lore/entries/{date}/` as `.lore` files, regardless of their tags. Arcs are simply tag prefixes (e.g., `arc:auth-hardening`) — they require no separate directory structure or management. To find all entries in an arc, use `paradigm_lore_search` with `tag: "arc:auth-hardening"`. This unified storage eliminates the complexity of a separate assessment system while preserving full arc-based organization through tags.'
2055
+ slot: slot-091
2056
+ section: para-501
2057
+ - id: plsat-092
2058
+ scenario: 'A developer wants to record retrospectives about a failed deployment. They have no prior entries tagged with `arc:platform-stability`. The agent calls `paradigm_lore_record` with `type: "retro"`, `tags: ["arc:platform-stability", "assessment:retro"]`, a title, summary, and body describing the failure and lessons learned.'
2059
+ question: What happens when you use a new arc tag that no prior entries have?
2060
+ choices:
2061
+ A: The call fails — arcs must be explicitly created before tagging entries.
2062
+ B: The entry is recorded normally — arcs are just tag prefixes, no creation step needed. The arc exists as soon as an entry has the tag.
2063
+ C: The system auto-creates a `.paradigm/lore/arcs/platform-stability/` directory to track the arc.
2064
+ D: The entry is recorded but flagged as orphaned until an arc is formally registered.
2065
+ E: The tag is rejected because it does not match an existing arc in the arc registry.
2066
+ correct: B
2067
+ explanation: 'Arcs in the unified lore system are simply tag prefixes — no explicit creation needed. The first entry tagged with `arc:platform-stability` effectively creates that arc. To find all entries in this arc later, use `paradigm_lore_search` with `tag: "arc:platform-stability"`. To close the arc, add `arc-closed` and `arc-status:complete` tags to its entries. This tag-based approach eliminates the overhead of managing separate arc directories and YAML files.'
2068
+ slot: slot-092
2069
+ section: para-501
2070
+ - id: plsat-093
2071
+ scenario: 'Your project has been running for six months. The codebase has 200+ commits in git and 57 lore entries: 45 are `agent-session` type (automatic session records), 8 have `arc:*` tags (retrospectives and insights grouped into thematic arcs), and 4 are `decision` type (architectural decisions). A new team member asks how lore''s different entry types and tags work together.'
2072
+ question: Which statement BEST describes Paradigm's unified lore model?
2073
+ choices:
2074
+ A: Lore entries are all the same — tags are purely cosmetic and do not affect searching or organization.
2075
+ B: Lore is the single project memory system. Entry types classify the nature of knowledge (session, retro, insight, decision), while tags like `arc:*` group related entries into themes — both are filterable via `paradigm_lore_search`.
2076
+ C: Session entries and reflection entries are stored in separate directories, with tags used only for cross-referencing between them.
2077
+ D: The `arc:*` tags are managed by a separate arc subsystem that must be initialized before use.
2078
+ E: Entry types are deprecated — tags alone drive all classification in the new model.
2079
+ correct: B
2080
+ explanation: 'Paradigm''s unified lore model uses one system with two classification axes: entry `type` classifies the nature of the knowledge (agent-session for automated records, retro for retrospectives, insight for patterns, decision for choices, etc.), while tags provide flexible grouping (arc:* for thematic arcs, assessment:* for reflection type, plus arbitrary project tags). Both are searchable via `paradigm_lore_search` — you can filter by type, tag prefix, symbol, author, and date range. All entries live in the same `.paradigm/lore/entries/` directory structure regardless of type or tags.'
2081
+ slot: slot-093
2082
+ section: para-501
2083
+ - id: plsat-094
2084
+ scenario: 'You receive a task: "Add a new Settings page to the application." The project has a `.paradigm/protocols/` directory with several `.protocol` files. You know that previous agents added similar pages (Logs, Events, Dashboard) following the same pattern each time.'
2085
+ question: What should you do FIRST before exploring the codebase?
2086
+ choices:
2087
+ A: Read every existing page component to understand the pattern
2088
+ B: Call `paradigm_protocol_search` with your task description to check for a matching protocol
2089
+ C: Call `paradigm_lore_record` to document that you are starting the task
2090
+ D: Call `paradigm_protocol_record` to create a new protocol for the Settings page
2091
+ E: Read the `.paradigm/protocols/index.yaml` file directly to scan for relevant entries
2092
+ correct: B
2093
+ explanation: paradigm_protocol_search is the agent's first stop before exploring the codebase. It takes a natural language task description and returns matching protocols with steps, exemplar files, and freshness info — typically saving thousands of exploration tokens. Reading existing pages (A) is exactly the expensive exploration that protocols prevent. Recording lore (C) is done after work, not before. Recording a protocol (D) is done after completing repeatable work. Reading index.yaml directly (E) bypasses the fuzzy search that matches task descriptions to protocols.
2094
+ slot: slot-094
2095
+ section: para-301
2096
+ variants:
2097
+ - id: plsat-094b
2098
+ scenario: 'An agent needs to add a new API endpoint to the project. The project has 15 recorded protocols covering common tasks. The agent calls `paradigm_protocol_search({ task: "add a new API endpoint" })` and gets back a protocol with 4 steps: create route file, add handler, register route, verify with build.'
2099
+ question: What should the agent do next?
2100
+ choices:
2101
+ A: Ignore the protocol and explore the codebase to find its own approach
2102
+ B: Call `paradigm_protocol_get` with the protocol ID to get full details, then follow the steps using the exemplar as reference
2103
+ C: Call `paradigm_protocol_record` to save the protocol it just found
2104
+ D: Run `paradigm_reindex` to make sure the protocol is up to date
2105
+ E: Call `paradigm_protocol_validate` to check all protocols before proceeding
2106
+ correct: B
2107
+ explanation: After finding a matching protocol via search, the next step is paradigm_protocol_get to retrieve full details (including the exemplar file path and detailed step notes), then follow the steps. The exemplar is the canonical file to study for the pattern. Ignoring the protocol (A) wastes the lookup. Recording (C) is for new protocols. Reindex (D) and full validation (E) are maintenance operations, not implementation steps.
2108
+ - id: plsat-095
2109
+ scenario: 'An agent just finished adding a new Sentinel event schema — the third such schema added to the project this month. Each time, the agent followed the same steps: create a schema file, register it in the schema index, add a migration, and verify. No protocol exists for this task yet.'
2110
+ question: What should the agent do regarding protocols?
2111
+ choices:
2112
+ A: Nothing — protocols are only created by project maintainers, not agents
2113
+ B: Call `paradigm_protocol_record` to capture the repeatable pattern it just followed
2114
+ C: Edit an existing protocol to add the schema steps as a sub-procedure
2115
+ D: Wait for `paradigm_reindex` to auto-generate a protocol from git history
2116
+ E: File an issue asking a human to write the protocol later
2117
+ correct: B
2118
+ explanation: Protocols are captured AFTER completing work, by the agent that did the work. When an agent completes a repeatable task and no protocol existed, it should call paradigm_protocol_record with the steps it followed, trigger phrases, tags, and an exemplar file. This ensures the next agent that receives a similar task can skip exploration entirely. Reindex (D) validates existing protocols but does not auto-generate new ones from git history.
2119
+ slot: slot-095
2120
+ section: para-301
2121
+ variants:
2122
+ - id: plsat-095b
2123
+ scenario: After recording lore for a session where the agent created two new view components following the existing LogsView pattern, the `paradigm_lore_record` response includes a `protocol_suggestion` field with a draft protocol.
2124
+ question: What triggered this protocol suggestion?
2125
+ choices:
2126
+ A: The agent explicitly asked for protocol suggestions in the lore_record call
2127
+ B: The lore system detected that the session created new files following existing patterns in the same directory
2128
+ C: All lore entries automatically include protocol suggestions
2129
+ D: The protocol suggestion was cached from a previous session
2130
+ E: The lore system runs paradigm_protocol_search on every lore entry
2131
+ correct: B
2132
+ explanation: 'When paradigm_lore_record is called, it runs a detection heuristic: if the session created 2+ new files in a directory that already has similar files, or modified the same ''registration'' files that existing protocols touch, it includes a protocol_suggestion in the response. This nudges agents to capture repeatable patterns without manual intervention. Not all lore entries trigger suggestions (C) — only those with detectable repeatable patterns.'
2133
+ - id: plsat-096
2134
+ scenario: During `paradigm_reindex`, the system validates all protocols. Protocol P-add-view references `ui/src/views/LogsView.tsx` as its exemplar. The file still exists but was significantly refactored two weeks after the protocol was last verified.
2135
+ question: What status will the reindex assign to this protocol?
2136
+ choices:
2137
+ A: '`current` — the file still exists, so the protocol is valid'
2138
+ B: '`stale` — the exemplar has been modified since the protocol was last verified'
2139
+ C: '`broken` — any change to a referenced file invalidates the protocol'
2140
+ D: '`deprecated` — protocols older than 30 days are automatically deprecated'
2141
+ E: '`unknown` — reindex cannot determine status without running the protocol'
2142
+ correct: B
2143
+ explanation: 'During reindex validation, a protocol is marked ''stale'' when its exemplar or referenced files have been modified since last_verified. The file still exists (so it is not ''broken''), but the protocol''s steps might no longer match the current code pattern. A ''broken'' status (C) is reserved for when referenced files are missing entirely. Stale protocols still work but should be reviewed and refreshed after successful use via paradigm_protocol_update with refresh: true.'
2144
+ slot: slot-096
2145
+ section: para-301
2146
+ variants:
2147
+ - id: plsat-096b
2148
+ scenario: 'An agent calls `paradigm_protocol_validate({ id: "P-add-api-route" })`. The protocol''s step 2 references the file `src/routes/index.ts`, but that file was deleted during a recent refactoring.'
2149
+ question: What status will the validation assign?
2150
+ choices:
2151
+ A: '`stale` — a referenced file has changed'
2152
+ B: '`current` — the protocol itself is still syntactically valid'
2153
+ C: '`broken` — a referenced file no longer exists'
2154
+ D: '`warning` — the file might be temporarily missing'
2155
+ E: '`archived` — protocols with missing references are auto-archived'
2156
+ correct: C
2157
+ explanation: A 'broken' status means one or more files referenced by the protocol (targets, exemplars, or template_from) no longer exist. This is more severe than 'stale' (where files exist but have been modified). A broken protocol cannot be reliably followed until its references are updated to point to existing files via paradigm_protocol_update.
2158
+ - id: plsat-097
2159
+ scenario: 'You run `paradigm init` on an existing Next.js project. The output shows `discipline: fullstack` and `stack: nextjs`. Your colleague asks what the difference is between a discipline and a stack preset.'
2160
+ question: Which statement BEST describes the relationship between disciplines and stack presets?
2161
+ choices:
2162
+ A: They are the same thing — 'discipline' is the old name and 'stack preset' is the new name
2163
+ B: Disciplines define domain-level symbol mappings (web, backend, mobile), while stack presets layer framework-specific configuration (Next.js, FastAPI, SwiftUI) on top of the discipline
2164
+ C: Stack presets replace disciplines entirely — once a preset is detected, the discipline is ignored
2165
+ D: Disciplines are for code organization and stack presets are for deployment configuration
2166
+ E: Stack presets are only used for auto-scan patterns, while disciplines control everything else
2167
+ correct: B
2168
+ explanation: 'Disciplines and stack presets are a two-layer system. The discipline (e.g., ''fullstack'') defines broad symbol mappings for the development domain. The stack preset (e.g., ''nextjs'') adds framework-specific refinements: scan hints for Next.js patterns like app/ routes and server components, purpose-required paths, and additional symbol mappings. The preset extends the discipline — it does not replace it.'
2169
+ slot: slot-097
2170
+ section: para-201
2171
+ variants:
2172
+ - id: plsat-097b
2173
+ scenario: 'A team is setting up Paradigm on a Flutter mobile app. They run `paradigm init` and see `discipline: mobile` with `stack: flutter`. They want to see what other stack presets are available for mobile projects.'
2174
+ question: Which command shows available stack presets filtered by discipline?
2175
+ choices:
2176
+ A: '`paradigm disciplines --list`'
2177
+ B: '`paradigm presets --discipline mobile`'
2178
+ C: '`paradigm init --show-stacks`'
2179
+ D: '`paradigm config --list-presets`'
2180
+ E: '`paradigm scan --detect-stack`'
2181
+ correct: B
2182
+ explanation: The `paradigm presets` command lists all available stack presets. The `--discipline` flag filters to show only presets for a specific discipline. For mobile, this would show flutter, swift-ios, kotlin-android, and react-native presets.
2183
+ - id: plsat-098
2184
+ scenario: You join a large existing project with 200+ source files and no Paradigm setup. Running `paradigm init` creates the `.paradigm/` directory, but no `.purpose` files exist yet. A colleague suggests running `paradigm scan auto` to bootstrap the project.
2185
+ question: What does `paradigm scan auto` do?
2186
+ choices:
2187
+ A: It reads all source files line-by-line and creates comprehensive documentation for every function
2188
+ B: It uses regex-based heuristics to detect components, routes, auth patterns, and signals in your codebase, then generates draft `.purpose` files with detected symbols
2189
+ C: It copies `.purpose` templates from a global registry and places them in every directory
2190
+ D: It connects to an AI service to analyze your code and generate documentation
2191
+ E: It deletes all existing `.purpose` files and starts fresh
2192
+ correct: B
2193
+ explanation: paradigm scan auto uses pattern-based detection to find components (exported classes/functions), routes (HTTP method patterns like app.get/router.post), auth patterns (JWT, session checks, middleware), and signals (event emitters). It produces draft .purpose files with detected symbols and confidence levels. The detection is local and regex-based — it does not call external services or read every line. Stack presets enhance the scan with framework-specific patterns.
2194
+ slot: slot-098
2195
+ section: para-301
2196
+ variants:
2197
+ - id: plsat-098b
2198
+ scenario: 'After running `paradigm scan auto` on a FastAPI project, the auto-scan detects several route handlers in `src/routes/users.py` and marks them with `confidence: high`. It also finds some utility functions and marks them `confidence: medium`.'
2199
+ question: What do the confidence levels on auto-detected symbols indicate?
2200
+ choices:
2201
+ A: How important the symbol is to the project — high means critical, medium means optional
2202
+ B: How certain the scanner is that the detected pattern actually represents that symbol type — high means strong pattern match, medium means heuristic match
2203
+ C: How many lines of code the detected symbol contains — more lines means higher confidence
2204
+ D: How recently the file was modified — recently modified files get higher confidence
2205
+ E: How many other symbols reference this one — more references means higher confidence
2206
+ correct: B
2207
+ explanation: Confidence levels reflect the scanner's certainty about the detection. A route handler matching `@app.get('/users')` is a strong, unambiguous pattern match (high confidence). A utility function detected from an exported function that doesn't match any specific pattern is a heuristic match (medium confidence). Low confidence detections are more speculative. Users should review auto-generated .purpose files, especially medium and low confidence entries.
2208
+ - id: plsat-099
2209
+ scenario: A developer is tasked with adding Paradigm to a mature React Native project that has been in development for two years. The project has 150+ components, navigation stacks, Redux state management, and several API integration files. The developer is worried about the setup effort.
2210
+ question: What is the recommended approach for adding Paradigm to a large existing project?
2211
+ choices:
2212
+ A: Document every single component on day one — comprehensive coverage is required before Paradigm is useful
2213
+ B: Start with `paradigm init` (which auto-detects discipline and stack), then create one `.purpose` file for the most critical module, and expand incrementally
2214
+ C: Rewrite the project structure to match Paradigm's expected directory layout
2215
+ D: Wait until starting a new project — Paradigm cannot be added to existing codebases
2216
+ E: Run `paradigm scan auto` and commit all generated files without review
2217
+ correct: B
2218
+ explanation: Paradigm is designed for incremental adoption. Start with `paradigm init` which auto-detects your discipline (mobile) and stack preset (react-native), configuring symbol mappings and scan patterns for your framework. Then create one .purpose file for your most important module. You can optionally run `paradigm scan auto` to bootstrap additional .purpose files, but always review auto-generated content. A common pitfall is trying to document everything on day one — start small and expand as the project grows.
2219
+ slot: slot-099
2220
+ section: para-101
2221
+ variants:
2222
+ - id: plsat-099b
2223
+ scenario: 'A team runs `paradigm init` on their Django project. The init command auto-detects `discipline: fullstack` and `stack: django`. The team then runs `paradigm scan auto` which generates draft `.purpose` files for `views/`, `models/`, and `urls/` directories.'
2224
+ question: Why did the auto-scan know to look in `views/`, `models/`, and `urls/` directories?
2225
+ choices:
2226
+ A: These are hardcoded directories that every Paradigm scan checks regardless of discipline
2227
+ B: The django stack preset provides scan hints with Django-specific component patterns, route patterns, and directory structures
2228
+ C: The scan randomly explores all directories and happened to find code there
2229
+ D: The team manually configured these directories in config.yaml before scanning
2230
+ E: Django is a special case with its own dedicated scanner module in Paradigm
2231
+ correct: B
2232
+ explanation: 'Stack presets include scan hints — framework-specific patterns that tell the auto-scanner where to look and what patterns to match. The django preset knows that views.py files contain route handlers, models.py files contain data models, and urls.py files define URL routing. This is why stack presets solve the cold-start problem: they bring framework knowledge that makes auto-scanning productive for existing projects.'
2233
+ - id: plsat-100
2234
+ scenario: 'Your project has a `#PaymentService` that coordinates payment processing and integrates with Stripe. A new developer asks whether they should use `type: integration` or `tags: [integration]` to capture the Stripe relationship.'
2235
+ question: What is the correct approach?
2236
+ choices:
2237
+ A: '`type: integration` — because the Stripe integration is the most important thing about this component'
2238
+ B: '`type: service` with `tags: [integration]` — type describes structural role, tags describe domain/behavior'
2239
+ C: 'Both `type: integration` and `tags: [integration]` — redundancy is good for search'
2240
+ D: Neither — integration is a v1 concept replaced by `&` prefix in v2
2241
+ E: '`type: stripe` — use the specific integration name as the type'
2242
+ correct: B
2243
+ explanation: 'The `type` field describes a component''s structural role — what the code IS architecturally. PaymentService is a service, so `type: service`. The Stripe integration is a behavioral/domain concern, captured with `tags: [integration]`. Type and tags serve different classification axes: type = architecture, tags = domain.'
2244
+ slot: slot-100
2245
+ section: para-101
2246
+ variants:
2247
+ - id: plsat-100b
2248
+ scenario: 'A developer adds a new `#EmailValidator` utility and sets `type: validator` in the .purpose file. Another developer points out that `validator` is not in the project''s `component_types` glossary in config.yaml.'
2249
+ question: 'Is `type: validator` valid?'
2250
+ choices:
2251
+ A: No — types must exactly match the glossary entries or they are rejected
2252
+ B: Yes — types are open strings and the glossary is descriptive only, not enforced
2253
+ C: No — you must add it to the glossary first before using it
2254
+ D: Yes — but only if the developer also adds a `~validator` aspect
2255
+ E: It depends on the `strict-types` setting in config.yaml
2256
+ correct: B
2257
+ explanation: 'Component types are open strings — any project can invent its own vocabulary. The glossary in config.yaml is descriptive only (it helps agents understand types) but does not enforce or block unknown types. The developer can use `type: validator` freely.'
2258
+ - id: plsat-101
2259
+ scenario: You have a `#GazeRouter` component that maps gaze coordinates to dispatch targets. It is managed by `#InputOrchestrator`. You want to express this hierarchy in the .purpose file.
2260
+ question: How should the parent relationship be declared?
2261
+ choices:
2262
+ A: 'On `#InputOrchestrator` with `children: ["#GazeRouter"]`'
2263
+ B: 'On `#GazeRouter` with `parent: "#InputOrchestrator"`'
2264
+ C: In a separate `relationships` section at the top of the .purpose file
2265
+ D: In portal.yaml alongside route gates
2266
+ E: 'Using `tags: [child-of-InputOrchestrator]` on GazeRouter'
2267
+ correct: B
2268
+ explanation: Parent relationships are declared on the child component using the `parent` field with a `#` symbol reference. This keeps .purpose files decentralized — you don't need to maintain a children roster on the parent. The parent field is computed upward, not maintained downward.
2269
+ slot: slot-101
2270
+ section: para-101
2271
+ - id: plsat-102
2272
+ scenario: An AI agent needs to find all router components in a project to understand the dispatch architecture. The project uses component types consistently.
2273
+ question: What is the most efficient MCP tool call?
2274
+ choices:
2275
+ A: '`paradigm_search` with `query: "router"` — search by name'
2276
+ B: '`paradigm_search` with `query: "*"` and `componentType: "router"` — filter by type'
2277
+ C: '`paradigm_navigate` with `intent: "find"` and `target: "router"` — navigate to routers'
2278
+ D: '`paradigm_ripple` with `symbol: "#router"` — check router dependencies'
2279
+ E: 'Read every .purpose file and grep for `type: router`'
2280
+ correct: B
2281
+ explanation: 'The `paradigm_search` tool accepts a `componentType` filter that directly queries the symbol index for components of a specific type. This is the most efficient approach — it uses the index instead of reading files, and returns exactly the components with `type: router`.'
2282
+ slot: slot-102
2283
+ section: para-101
2284
+ - id: plsat-103
2285
+ scenario: 'Your team is evaluating Symphony''s The Score for inter-agent communication. A team member asks: ''Does The Score require a central server or WebSocket connection to route messages between agents on the same machine?'''
2286
+ question: How does The Score route messages between agents on a single machine?
2287
+ choices:
2288
+ A: Through a persistent WebSocket connection managed by Sentinel's event hub
2289
+ B: Through a central message broker that must be started with `paradigm symphony serve`
2290
+ C: Through file-based mailboxes — agents write to JSONL files in `~/.paradigm/score/agents/` and poll for new messages via `/loop`
2291
+ D: Through Conductor's native IPC channel between Swift and Node.js processes
2292
+ E: Through a shared SQLite database at `~/.paradigm/score/messages.db`
2293
+ correct: C
2294
+ explanation: The Score is file-based with zero daemon dependencies. Each agent has a mailbox directory containing inbox.jsonl and outbox.jsonl. Messages are appended as single JSON lines. Agents poll for new messages using `/loop 10s paradigm_symphony_poll`. No WebSocket, no broker, no Conductor required — just filesystem reads and writes.
2295
+ slot: slot-103
2296
+ section: para-501
2297
+ variants:
2298
+ - id: plsat-103b
2299
+ scenario: A developer is comparing Symphony The Score to traditional inter-process communication. They note that The Score uses JSONL files for message passing instead of sockets, pipes, or shared memory.
2300
+ question: Why does The Score use JSONL files instead of a real-time transport like WebSockets?
2301
+ choices:
2302
+ A: JSONL is faster than WebSockets for small messages under 1KB
2303
+ B: File-based messaging requires zero dependencies beyond the Paradigm CLI — no Conductor, no Sentinel, no persistent server process
2304
+ C: Claude Code sessions cannot open network connections, so files are the only option
2305
+ D: JSONL files provide built-in encryption that WebSockets lack
2306
+ E: File-based messaging is required by macOS sandboxing rules for CLI tools
2307
+ correct: B
2308
+ explanation: The Score's design goal is zero-dependency messaging. It works with nothing beyond the Paradigm CLI — no Conductor, no Sentinel, no network configuration. File-based JSONL is append-only (safe for concurrent writes), trivial to parse, and works on every OS without additional runtime dependencies. Later phases (Conductor, Sentinel) add richer transports, but The Score is the foundation that always works.
2309
+ - id: plsat-104
2310
+ scenario: 'You have two Claude Code sessions open: one working on `a-paradigm` in the core library role, and another working on `a-paradigm` in the backend role. After running `paradigm symphony join`, each session gets an identity.'
2311
+ question: How are agent identities determined in The Score?
2312
+ choices:
2313
+ A: Random UUIDs assigned at session start — different every time
2314
+ B: The process ID (PID) of the Claude Code session is used as the identity
2315
+ C: Derived from project directory + role (e.g., `a-paradigm/core`) — deterministic and stable across session restarts for the same project context
2316
+ D: The user's GitHub username combined with a session counter
2317
+ E: 'Assigned sequentially by the mail router: agent-001, agent-002, etc.'
2318
+ correct: C
2319
+ explanation: 'Agent identity in The Score is deterministic: `{project-name}/{role}`. The same project opened in the same context always gets the same identity, even across session restarts. This stability means other agents can reliably address messages to `a-paradigm/backend` knowing it will reach the backend agent regardless of which specific Claude Code session is running. PID maps to identity via identity.json but is not the identity itself.'
2320
+ slot: slot-104
2321
+ section: para-501
2322
+ variants:
2323
+ - id: plsat-104b
2324
+ scenario: 'After running `paradigm symphony whoami`, an agent sees: `agent-abc123 (a-paradigm/backend) — 3 linked peers, 2 active threads`. The agent then crashes and a new Claude Code session is started for the same project directory with the same working context.'
2325
+ question: What happens to the agent's identity after the restart?
2326
+ choices:
2327
+ A: A new random identity is assigned — the old one is permanently lost
2328
+ B: The identity `a-paradigm/backend` is restored because it is derived from the project directory and role, not the PID
2329
+ C: The identity is lost unless the agent calls `paradigm symphony recover` within 5 minutes
2330
+ D: The new session gets a different identity with a `-2` suffix to avoid conflicts
2331
+ E: The old session's mailbox is deleted and a fresh one is created with a new ID
2332
+ correct: B
2333
+ explanation: Agent identity is derived from project directory + role, not from PID or session-specific state. When a new session starts for the same project context, it gets the same deterministic identity. The PID-to-identity mapping in identity.json is updated, but the mailbox (inbox.jsonl, outbox.jsonl) persists. Unread messages from before the crash are still in the inbox waiting for the next poll.
2334
+ - id: plsat-105
2335
+ scenario: An agent investigating a production bug sends a Symphony message to the frontend agent. The agent wants to indicate that it is asking a question and expects information in response, not just acknowledgment.
2336
+ question: Which message intent should the agent use?
2337
+ choices:
2338
+ A: '`context` — because the agent is seeking contextual information'
2339
+ B: '`clarification` — because the agent wants details clarified'
2340
+ C: '`question` — because the agent is asking for information and the intent classifies the message''s purpose for structured processing'
2341
+ D: '`reference` — because the agent wants the frontend agent to reference its recent changes'
2342
+ E: '`alert` — because the production bug is urgent'
2343
+ correct: C
2344
+ explanation: Message intents classify the purpose of a message. `question` signals that the sender is asking for information and expects a substantive response. `context` is for providing background (not requesting it). `clarification` is for asking about something already said. `alert` is specifically for forwarding Sentinel alerts. Intents help the receiving agent understand what kind of response is expected.
2345
+ slot: slot-105
2346
+ section: para-501
2347
+ variants:
2348
+ - id: plsat-105b
2349
+ scenario: 'During a Symphony conversation thread, Agent A proposes a fix: ''Make the currency field optional with a default of USD.'' Agent B agrees and wants to record this as a team decision that will be captured in Lore.'
2350
+ question: What intent should Agent B use when confirming the decision?
2351
+ choices:
2352
+ A: '`approval` — to approve Agent A''s proposal'
2353
+ B: '`decision` — to formally record the choice, triggering automatic Lore entry creation'
2354
+ C: '`action` — to announce it will implement the fix'
2355
+ D: '`verification` — to verify understanding of the proposal'
2356
+ E: '`context` — to provide context that it agrees'
2357
+ correct: B
2358
+ explanation: 'The `decision` intent serves a dual purpose: it communicates agreement within the conversation AND triggers automatic Lore integration. Symphony auto-records messages with `intent: decision` as lore entries of type `decision`, linking them to the thread and referenced symbols. Using `approval` (A) would confirm the proposal but would not trigger the Lore recording side effect.'
2359
+ - id: plsat-106
2360
+ scenario: 'Agent A on your machine needs `src/config/database.ts` from Agent B on a teammate''s machine. Agent A calls `paradigm_symphony_request_file` with the file path and reason. The teammate''s trust.yaml has `neverApprove: [".env*", "**/*.key"]` but no entry for `.ts` files.'
2361
+ question: What happens next in the file transfer pipeline?
2362
+ choices:
2363
+ A: The file is automatically sent because `.ts` files are not in the neverApprove list
2364
+ B: The request is queued and the teammate (human) receives a prompt to approve, deny, or approve with redaction — human approval is required for every file transfer
2365
+ C: Agent B automatically reads and sends the file since both agents are in the same Symphony network
2366
+ D: The request is denied because `database.ts` might contain database credentials
2367
+ E: The file is sent after a 5-minute delay to give the human time to intervene
2368
+ correct: B
2369
+ explanation: 'Every file transfer requires explicit human approval regardless of trust configuration. The neverApprove list adds hard denials (even if the human clicks approve), but not being on the neverApprove list does not mean auto-approval. The teammate sees a prompt with the file path, the requesting agent, and the reason, then chooses: approve, deny, or approve with redaction.'
2370
+ slot: slot-106
2371
+ section: para-501
2372
+ variants:
2373
+ - id: plsat-106b
2374
+ scenario: 'A developer sees a file request in their terminal via `paradigm symphony requests`: Agent C is requesting `.env.production` from their project. The developer''s trust.yaml includes `neverApprove: [".env*"]`. The developer runs `paradigm symphony approve req-xyz` anyway.'
2375
+ question: What happens when the developer tries to approve a file on the neverApprove list?
2376
+ choices:
2377
+ A: The approval succeeds — human override always takes priority over trust configuration
2378
+ B: The approval is rejected by the system — files matching neverApprove patterns are always denied, even if the human explicitly approves
2379
+ C: The file is sent but with all values redacted automatically
2380
+ D: The developer is prompted a second time to confirm the override
2381
+ E: The approval is queued for review by a second team member
2382
+ correct: B
2383
+ explanation: The neverApprove list is enforced absolutely by the system. Files matching patterns like `.env*`, `*.key`, or `*.pem` are always denied regardless of human action. This is a security guardrail — even well-intentioned approvals cannot override it. The developer would need to modify trust.yaml to remove the pattern before the file could be approved.
2384
+ - id: plsat-107
2385
+ scenario: 'A thread about migrating to a new API version has been active for 30 minutes. Four agents and two humans participated. The team agreed on an approach: skip the database migration and hotfix the serializer. Someone runs `paradigm symphony resolve thr-abc`.'
2386
+ question: What does thread resolution produce?
2387
+ choices:
2388
+ A: A git commit with the conversation as the commit message
2389
+ B: A comprehensive lore entry capturing the topic, participants, decisions made, actions taken, and symbols discussed — bridging ephemeral conversation to permanent project memory
2390
+ C: A .purpose file update adding the thread as a new component
2391
+ D: A Sentinel incident record linking the conversation to a production error
2392
+ E: An email summary sent to all team members who were not in the thread
2393
+ correct: B
2394
+ explanation: 'Thread resolution creates a lore entry that captures the entire collaborative context: the conversation topic, all participants (agents and humans), decisions made during the discussion, actions taken, and Paradigm symbols referenced. This bridges the gap between ephemeral real-time conversation and permanent project memory. Future developers can find and learn from the discussion via `paradigm_lore_search`.'
2395
+ slot: slot-107
2396
+ section: para-501
2397
+ variants:
2398
+ - id: plsat-107b
2399
+ scenario: An agent resolves a thread with `paradigm symphony resolve thr-def`. The thread contained 12 messages, 3 decisions, and referenced symbols `#payment-serializer`, `#api-types`, and `$refund-flow`. A week later, a new developer encounters a similar serialization issue.
2400
+ question: How can the new developer find the resolved thread's knowledge?
2401
+ choices:
2402
+ A: Search `~/.paradigm/score/threads/` for the thread JSON file — resolved threads are kept permanently
2403
+ B: 'Call `paradigm_lore_search` with `symbol: ''#payment-serializer''` — the resolved thread became a lore entry linked to the discussed symbols'
2404
+ C: Call `paradigm_symphony_thread` with the old thread ID — resolved threads remain in the Symphony network
2405
+ D: Check git log for the conversation — resolved threads are saved as commit messages
2406
+ E: The knowledge is lost — resolved threads are deleted from the filesystem
2407
+ correct: B
2408
+ explanation: 'When a thread is resolved, it becomes a lore entry tagged with the symbols discussed in the conversation. The new developer searching for `#payment-serializer` via `paradigm_lore_search` will find the entry, which contains the full conversation context, decisions, and actions. This is the core value of thread resolution: converting temporary discussion into searchable, permanent project memory.'
2409
+ - id: plsat-108
2410
+ scenario: 'You have set up The Score with 3 linked agents. Each agent has a mailbox, but none of them are running `/loop`. You send a message via `paradigm symphony send "Check the failing test in #auth-service"`. The message is written to each agent''s inbox.jsonl.'
2411
+ question: When will the agents see and respond to this message?
2412
+ choices:
2413
+ A: Immediately — messages trigger an interrupt in the Claude Code session
2414
+ B: Never — without `/loop`, agents have no mechanism to poll their inbox and will not discover the message
2415
+ C: Within 30 seconds — the MCP tool cache automatically polls inboxes
2416
+ D: On the next `paradigm_status` call — status checks include inbox polling
2417
+ E: When the agent explicitly calls `paradigm symphony read` from its session
2418
+ correct: B
2419
+ explanation: '`/loop` is the agent heartbeat. It runs `paradigm_symphony_poll` on a timer (typically every 10 seconds), which reads inbox.jsonl and presents messages to the agent. Without `/loop`, messages accumulate in the inbox with nobody reading them. The convenience command `paradigm symphony join` sets up both identity registration and the polling loop in one step. This is why the setup instructions always include `/loop 10s paradigm_symphony_poll` in each session.'
2420
+ slot: slot-108
2421
+ section: para-501
2422
+ variants:
2423
+ - id: plsat-108b
2424
+ scenario: 'An agent is running `/loop 10s paradigm_symphony_poll`. During one poll cycle, the tool returns 2 new messages: a question from the frontend agent and a decision message from a human. The agent processes both and sends replies via `paradigm_symphony_send`.'
2425
+ question: Where do the agent's replies go, and how are they delivered to the recipients?
2426
+ choices:
2427
+ A: Directly into each recipient's inbox.jsonl — the send tool writes to all inboxes simultaneously
2428
+ B: To this agent's outbox.jsonl — a mail router (or Conductor) picks up outbox messages and delivers them to the appropriate recipient inboxes
2429
+ C: To a central message queue at `~/.paradigm/score/queue.jsonl` that all agents read from
2430
+ D: Over a WebSocket connection to each recipient's MCP server
2431
+ E: To Sentinel's event hub, which broadcasts to all connected agents
2432
+ correct: B
2433
+ explanation: The agent writes replies to its own outbox.jsonl via `paradigm_symphony_send`. A mail router process (in The Score's Phase 0 implementation) or Conductor (in later phases) reads outbox files and delivers messages to the correct recipient inbox files. This separation of write (outbox) and delivery (router) keeps the protocol simple — agents only ever write to their own outbox and read from their own inbox.
2434
+ - id: plsat-109
2435
+ scenario: A developer is using `paradigm serve` to run the Platform. Their AI agent is helping refactor `#payment-service`. The agent wants to walk the developer through three related components on the Graph canvas, but the developer is currently reading a lore entry.
2436
+ question: What sequence of MCP tool calls should the agent use to present its walkthrough without disrupting the developer?
2437
+ choices:
2438
+ A: Call `paradigm_platform_navigate` three times rapidly for each component — the browser handles queuing
2439
+ B: Call `paradigm_platform_observe` first to check if the user is active, then `paradigm_platform_annotate` with a toast saying 'I'd like to show you something on the Graph', then navigate after the user responds
2440
+ C: Call `paradigm_platform_highlight` on all three symbols simultaneously — highlights work across all sections
2441
+ D: Write to the Symphony mailbox and wait for the developer to read the message
2442
+ E: Call `paradigm_platform_clear` first, then force-navigate to each component
2443
+ correct: B
2444
+ explanation: The agent should first observe the user's state to understand context (are they busy? what section? muted?). Since the user is actively reading lore, the agent should use a toast annotation to signal intent rather than auto-navigating. When the user is active (<5s since last interaction), navigate commands show a prompt rather than auto-executing — but checking observe first lets the agent tailor its approach.
2445
+ slot: slot-109
2446
+ section: para-501
2447
+ variants:
2448
+ - id: plsat-109b
2449
+ scenario: 'An AI agent calls `paradigm_platform_highlight({ symbols: [''#api-gateway'', ''#auth-middleware'', ''#rate-limiter''], label: ''Security surface'', duration: 8000, pulse: true })` on the Platform. The developer sees three nodes glowing on the Graph canvas.'
2450
+ question: What happens to the highlights after 8 seconds?
2451
+ choices:
2452
+ A: They remain until the developer clicks on one of the nodes
2453
+ B: They fade out automatically — the duration parameter sets auto-expiry on both server (UserStateTracker) and browser (agentStore)
2454
+ C: 'They persist until the agent calls `paradigm_platform_clear({ target: ''highlights'' })`'
2455
+ D: They remain but the pulse animation stops
2456
+ E: The Platform server removes them but the browser keeps a static glow
2457
+ correct: B
2458
+ explanation: The duration parameter controls auto-expiry. On the server, UserStateTracker schedules removal after the specified milliseconds. On the browser, agentStore sets a setTimeout that filters out the highlight after duration expires. Both sides independently clean up, so even if a message is lost, the highlight eventually disappears.
2459
+ - id: plsat-110
2460
+ scenario: The Platform server starts with `paradigm serve`. Two browser tabs are open. An MCP tool sends an `agent:annotate` command with type `callout` targeting `#database-pool`.
2461
+ question: How does the annotation reach both browser tabs?
2462
+ choices:
2463
+ A: The MCP tool sends the command directly to each tab via separate HTTP requests
2464
+ B: The Platform server stores the annotation in scan-index.json and both tabs poll for changes
2465
+ C: The MCP tool POSTs to /api/platform/agent-command, the server broadcasts a WebSocket message to all connected clients in the wsClients Set — both tabs receive it
2466
+ D: Only the active tab receives the annotation; the other tab receives it on focus
2467
+ E: The annotation is stored in localStorage, which is shared between tabs
2468
+ correct: C
2469
+ explanation: The Platform server maintains a Set<WebSocket> of all connected browser clients. When the agent command route receives a POST, it broadcasts the typed message (agent:annotate) to every client with readyState === OPEN. Both tabs have independent WebSocket connections to ws://localhost:3850/ws, so both receive the broadcast simultaneously.
2470
+ slot: slot-110
2471
+ section: para-501
2472
+ variants:
2473
+ - id: plsat-110b
2474
+ scenario: A developer opens the Platform at localhost:3850 and navigates to the Graph section. They select `#payment-service`. The Platform server's UserStateTracker records this activity. Ten seconds later, the AI agent calls `paradigm_platform_observe()`.
2475
+ question: What information does the observe tool return?
2476
+ choices:
2477
+ A: 'Only whether the Platform is running — `{ connected: true }`'
2478
+ B: The section, selected symbol, theme, mute state, connected agents, browser client count, and optionally active highlights/annotations
2479
+ C: The full DOM tree of the Platform UI for the agent to parse
2480
+ D: A diff of everything that changed since the agent's last observe call
2481
+ E: Only the section name and selected symbol — no agent or highlight info
2482
+ correct: B
2483
+ explanation: 'paradigm_platform_observe returns the full UI state from the UserStateTracker: connected (boolean), users (client count), agents (array of AgentPresence with agentId, color, timestamps), and state (section, selectedSymbol, theme, muted). With detail: ''full'', it also includes active highlights and annotations. This gives the agent a complete picture of the shared workspace.'
2484
+ - id: plsat-111
2485
+ scenario: An agent connected to the Platform has been idle for 3 minutes. No MCP tool calls have been made. The AgentPresenceManager runs its periodic cleanup.
2486
+ question: What happens to the agent's presence?
2487
+ choices:
2488
+ A: Nothing — agents are only removed when they explicitly disconnect
2489
+ B: The agent is marked as 'idle' but remains in the agents list with a dimmed indicator
2490
+ C: The agent is pruned from the presence list and an `agent:leave` message is broadcast to all browsers, removing the presence dot from the header
2491
+ D: The server sends a ping to the agent and waits for a response before deciding
2492
+ E: The agent's highlights and annotations are cleared but its presence remains
2493
+ correct: C
2494
+ explanation: The AgentPresenceManager runs pruneStale() every 30 seconds. Any agent whose lastActivity timestamp is more than 2 minutes old is removed from the agents Map and an agent:leave message is broadcast to all browser clients. The browser's agentStore filters out the agent, and the header presence dots update. This prevents ghost agents from accumulating.
2495
+ slot: slot-111
2496
+ section: para-501
2497
+ - id: plsat-112
2498
+ scenario: You're building a new Platform section that should respond to agent highlight commands. The existing sections (Graph, Lore, Overview) already work with the agent-driven UI system.
2499
+ question: Which browser-side component is responsible for receiving WebSocket agent messages and updating the Zustand store?
2500
+ choices:
2501
+ A: platformStore.ts — all state flows through the main platform store
2502
+ B: useActivityReporter — it handles all WebSocket communication bidirectionally
2503
+ C: useAgentEffects — it connects WebSocket `agent:*` messages to agentStore.handleAgentMessage, with auto-reconnect on disconnect
2504
+ D: AgentToast — it listens for WebSocket messages and renders toasts
2505
+ E: The Platform server pushes state updates directly into the browser's agentStore via a shared reference
2506
+ correct: C
2507
+ explanation: useAgentEffects is the WebSocket→store bridge. It establishes a WebSocket connection to ws://localhost:{port}/ws, listens for messages whose type starts with 'agent:', and dispatches them to agentStore.handleAgentMessage. It also handles auto-reconnect (3s delay on close). useActivityReporter (B) handles the opposite direction — reporting user actions TO the server. AgentToast (D) only renders; it reads from the store but doesn't handle WebSocket.
2508
+ slot: slot-112
2509
+ section: para-501
2510
+ - id: plsat-113
2511
+ scenario: A builder agent finishes implementing a new REST endpoint. It modified 4 files, resolved 1 blocker, and the tests pass. The agent needs to record this somewhere in the knowledge streams.
2512
+ question: Which knowledge stream is the correct destination for this entry?
2513
+ choices:
2514
+ A: Learning Journal — the agent learned how to build the endpoint
2515
+ B: Team Decision — the team decided to add an endpoint
2516
+ C: Work Log — it records what got done, files modified, outcome, and next steps
2517
+ D: Event Stream — it is an event that happened during work
2518
+ E: Lore — all project history goes into lore entries
2519
+ correct: C
2520
+ explanation: The Work Log stream answers 'what got done.' It captures the agent, summary, files_modified, symbols_touched, outcome (pass/fail/partial/blocked), and next_steps. It is project-scoped and ephemeral — designed for sprint boards and standup summaries. The Learning Journal is for personal insights the agent gains, not task completion. Team Decisions record rationale and alternatives for institutional choices.
2521
+ slot: slot-113
2522
+ section: para-601
2523
+ variants:
2524
+ - id: plsat-113b
2525
+ scenario: An agent spent 45 minutes debugging a flaky test in the CI pipeline. It turned out to be a race condition in the database setup. The agent fixed it, and the test suite passes now. The agent wants to log this work.
2526
+ question: Which knowledge stream should this entry go into?
2527
+ choices:
2528
+ A: Team Decision — the team needs to know about flaky tests
2529
+ B: Work Log — it captures what was done, duration, outcome, and what was fixed
2530
+ C: Learning Journal — the agent discovered a race condition pattern
2531
+ D: Event Stream — file-modified events are emitted automatically
2532
+ E: Both Work Log and Learning Journal equally — there is no distinction
2533
+ correct: B
2534
+ explanation: The Work Log is the correct primary destination — it records what got done (fixed flaky test), the outcome (pass), duration (45 min), and files modified. The agent might also record a Learning Journal entry about the race condition pattern it discovered, but the work itself belongs in the Work Log. These are separate entries in separate streams — the journal entry would link back to the work log via linked_work_log.
2535
+ - id: plsat-114
2536
+ scenario: During a code review, the architect agent points out that the builder's approach to token refresh will cause a race condition under concurrent requests. The builder realizes its mental model of the refresh flow was wrong and adjusts its confidence from 0.8 to 0.5 on `#token-refresh`.
2537
+ question: Which knowledge stream captures this learning moment?
2538
+ choices:
2539
+ A: Work Log — the builder did work (reviewed code)
2540
+ B: Team Decision — the team decided to change the approach
2541
+ C: Learning Journal — the builder received a correction and adjusted its confidence, which is a personal learning event
2542
+ D: Event Stream — the correction is a 'compliance-violation' event
2543
+ E: Work Log with outcome 'fail' — the original approach failed review
2544
+ correct: C
2545
+ explanation: The Learning Journal captures 'what I learned.' The trigger is 'correction_received', the insight is the corrected mental model, confidence_before is 0.8, confidence_after is 0.5, and the entry is agent-private (stored in ~/.paradigm/agents/{id}/journal/). It may or may not be transferable to other projects. The Work Log would separately record the review activity, but the learning itself belongs in the journal.
2546
+ slot: slot-114
2547
+ section: para-601
2548
+ variants:
2549
+ - id: plsat-114b
2550
+ scenario: A security agent reviewed an authentication module and predicted with 0.9 confidence that the session handling was secure. A penetration test later revealed a session fixation vulnerability. The security agent needs to record this calibration miss.
2551
+ question: Which knowledge stream is appropriate for this entry?
2552
+ choices:
2553
+ A: Team Decision — the team needs to decide how to fix the vulnerability
2554
+ B: Work Log — the penetration test results are work output
2555
+ C: Event Stream — emit an 'error-encountered' event
2556
+ D: Learning Journal — the agent had a confidence miss and needs to record the corrected understanding
2557
+ E: Both Work Log and Team Decision — the journal is only for minor insights
2558
+ correct: D
2559
+ explanation: 'The Learning Journal is the right stream for calibration misses. The trigger is ''confidence_miss'', with confidence_before: 0.9 and confidence_after adjusted downward. The journal is agent-private, stored in ~/.paradigm/agents/{id}/journal/, and travels with the agent across projects if marked transferable. A separate Team Decision might record the fix approach, but the personal calibration adjustment belongs in the journal.'
2560
+ - id: plsat-115
2561
+ scenario: The team holds a design discussion about whether to use WebSockets or Server-Sent Events for real-time updates. The architect proposes WebSockets, the builder supports it, and the reviewer dissents (preferring SSE for simplicity). They go with WebSockets. The rationale and alternatives need to be preserved.
2562
+ question: Which knowledge stream captures this?
2563
+ choices:
2564
+ A: Work Log — a design discussion is work
2565
+ B: Learning Journal — everyone learned something from the debate
2566
+ C: Team Decision — it records the choice, rationale, participants with stances, and alternatives considered
2567
+ D: Event Stream — emit a 'decision-made' event and the stream captures it
2568
+ E: Lore entry of type 'human-note' — a human-authored design choice belongs in the timeline
2569
+ correct: C
2570
+ explanation: 'Team Decisions capture institutional memory: the title, decision text, rationale, participants (with stances like ''proposed'', ''supported'', ''dissented''), alternatives_considered (with rejected_because), and affected symbols. They are project-scoped and long-lived (status: active/superseded/deprecated). The event stream might emit a ''decision-made'' event, but that is a notification — the decision record itself lives in the Team Decisions stream at .paradigm/decisions/. Note that "Team Decision" here refers to the *decisions stream* — distinct from the v5-era `decision` lore type, which was removed in v6.0. The decisions stream is fed by `paradigm_decision_record` and emits a companion lore `insight` entry to the journal stream for timeline coverage.'
2571
+ slot: slot-115
2572
+ section: para-601
2573
+ variants:
2574
+ - id: plsat-115b
2575
+ scenario: After a production incident, the team decides to add rate limiting to all public API endpoints. The security agent proposed it, the architect supported it, and the builder abstained. They considered IP-based limiting vs token-bucket and chose token-bucket. This needs to be recorded for future reference.
2576
+ question: Which knowledge stream should hold this record?
2577
+ choices:
2578
+ A: Work Log — an incident response is work that was done
2579
+ B: Team Decision — it preserves the choice, who participated, their stances, and why token-bucket was chosen over IP-based
2580
+ C: Learning Journal — the team learned from the incident
2581
+ D: All three streams equally — incidents generate entries everywhere
2582
+ E: Event Stream — the decision is an event type 'decision-made'
2583
+ correct: B
2584
+ explanation: 'Team Decisions are the institutional record for choices like this. The entry includes participants (security: proposed, architect: supported, builder: abstained), alternatives_considered (IP-based: rejected because it fails behind proxies), symbols_affected (#rate-limiter, #api-gateway), and status: active. The incident itself might generate work log entries and journal entries, but the decision about the approach belongs in the Team Decisions stream.'
2585
+ - id: plsat-116
2586
+ scenario: 'A security agent has attention patterns configured as: symbols: [''^*'', ''#*-auth'', ''#*-middleware''], paths: [''auth/**'', ''middleware/**''], concepts: [''permission'', ''JWT'', ''RBAC''], signals: [{ type: ''gate-added'' }, { type: ''route-created'' }], threshold: 0.4. A new event arrives: type: ''file-modified'', path: ''src/utils/date-formatter.ts'', symbols: [''#date-formatter''], keywords: [''formatting'', ''locale''].'
2587
+ question: Will the security agent self-nominate for this event?
2588
+ choices:
2589
+ A: Yes — the agent's threshold is low (0.4), so it nominates for most events
2590
+ B: No — the symbol '#date-formatter' does not match '^*', '#*-auth', or '#*-middleware'; the path 'src/utils/' does not match 'auth/**' or 'middleware/**'; the keywords have no concept overlap; and no signal matches. All four scores are 0, which is below 0.4
2591
+ C: Yes — '#date-formatter' matches '#*' because * is a wildcard for any suffix
2592
+ D: No — the agent only responds to signals, not file modifications
2593
+ E: Yes — every agent nominates for every event to ensure nothing is missed
2594
+ correct: B
2595
+ explanation: 'Attention scoring evaluates four dimensions: symbolMatch (does ''#date-formatter'' match ''^*'', ''#*-auth'', or ''#*-middleware''? No — it would need to end with ''-auth'' or ''-middleware''), pathMatch (''src/utils/date-formatter.ts'' doesn''t match ''auth/**'' or ''middleware/**''), conceptMatch (no overlap between [''formatting'', ''locale''] and [''permission'', ''JWT'', ''RBAC'']), and signalMatch (event type ''file-modified'' is not ''gate-added'' or ''route-created''). The final score is max(0, 0, 0, 0) = 0, which is below the 0.4 threshold.'
2596
+ slot: slot-116
2597
+ section: para-601
2598
+ variants:
2599
+ - id: plsat-116b
2600
+ scenario: 'A tester agent has attention patterns: paths: [''**/*.test.*'', ''**/*.spec.*''], concepts: [''test'', ''coverage'', ''assertion''], signals: [{ type: ''error-encountered'' }], threshold: 0.5. An event arrives: type: ''file-modified'', path: ''src/services/payment.ts'', symbols: [''#payment-service''], keywords: [''refactor'', ''extract method''].'
2601
+ question: Will the tester agent self-nominate?
2602
+ choices:
2603
+ A: Yes — any file modification could break tests, so the tester is always relevant
2604
+ B: Yes — the threshold of 0.5 is met because 'refactor' is similar to 'test'
2605
+ C: No — the path does not match '**/*.test.*' or '**/*.spec.*', no concept overlap ('refactor' and 'extract method' do not match 'test', 'coverage', or 'assertion'), and the event type 'file-modified' is not 'error-encountered'. Score is 0
2606
+ D: No — tester agents can only observe test files, not source files
2607
+ E: Yes — '#payment-service' triggers the tester because payments need testing
2608
+ correct: C
2609
+ explanation: 'The tester''s attention has no symbol patterns, so symbolMatch is 0. The path ''src/services/payment.ts'' doesn''t match ''**/*.test.*'' or ''**/*.spec.*'', so pathMatch is 0. The keywords [''refactor'', ''extract method''] have no overlap with [''test'', ''coverage'', ''assertion''], so conceptMatch is 0. The event type ''file-modified'' is not ''error-encountered'', so signalMatch is 0. Final score: max(0, 0, 0, 0) = 0, below the 0.5 threshold.'
2610
+ - id: plsat-117
2611
+ scenario: 'An architect agent has attention: symbols: [''$*'', ''#*''], concepts: [''architecture'', ''design'', ''pattern''], threshold: 0.5. An event arrives: type: ''flow-modified'', symbols: [''$checkout-flow'', ''#cart-service''], keywords: [''added step'', ''validation''], context: ''New payment validation step added to checkout flow''.'
2612
+ question: Will the architect self-nominate, and what is the approximate attention score?
2613
+ choices:
2614
+ A: No — the architect only cares about design documents, not flow changes
2615
+ B: Yes — '$checkout-flow' matches '$*' giving symbolMatch=1.0, and the score exceeds the 0.5 threshold
2616
+ C: Yes — but only because the keyword 'validation' triggers a concept match
2617
+ D: No — the threshold of 0.5 requires at least two dimensions to match
2618
+ E: Yes — every event with symbols triggers every agent that has symbol patterns
2619
+ correct: B
2620
+ explanation: The architect's symbol pattern '$*' matches '$checkout-flow' (any symbol starting with $), giving symbolMatch=1.0. Additionally '#*' matches '#cart-service' (already capped at 1.0). The final score is max(symbolMatch, pathMatch, conceptMatch, signalMatch) = max(1.0, 0, partial, 0) = 1.0, well above the 0.5 threshold. The agent will self-nominate. Note that conceptMatch might also contribute ('pattern' could partial-match against context), but symbolMatch alone is sufficient.
2621
+ slot: slot-117
2622
+ section: para-601
2623
+ variants:
2624
+ - id: plsat-117b
2625
+ scenario: 'A reviewer agent has attention: concepts: [''code quality'', ''bug'', ''smell'', ''convention''], threshold: 0.6. An event arrives: type: ''error-encountered'', keywords: [''null pointer'', ''uncaught exception'', ''bug''], context: ''TypeError: Cannot read properties of undefined in auth middleware'', severity: ''error''.'
2626
+ question: Will the reviewer self-nominate?
2627
+ choices:
2628
+ A: No — the reviewer has no symbol or path patterns, so it cannot score above 0
2629
+ B: No — 'error-encountered' events are only for tester agents
2630
+ C: Yes — the keyword 'bug' matches the concept 'bug', giving conceptMatch of at least 0.25 (1 of 4 concepts). Since max score uses the highest dimension, this gives 0.25 which is below 0.6, so actually no
2631
+ D: Yes — 'bug' matches concept 'bug' (1/4 = 0.25 conceptMatch), but the score is max(0, 0, 0.25, 0) = 0.25, which is below the 0.6 threshold, so the agent stays quiet
2632
+ E: Yes — the severity 'error' automatically forces all agents to nominate
2633
+ correct: D
2634
+ explanation: The reviewer has no symbol patterns (symbolMatch=0), no path patterns (pathMatch=0), and no signal patterns (signalMatch=0). For concepts, 'bug' matches 1 of 4 concepts, giving conceptMatch = 1/4 = 0.25. The final score is max(0, 0, 0.25, 0) = 0.25, which is below the 0.6 threshold. The agent's quietReason would be 'below-threshold'. Severity does not override the attention threshold — agents only speak when their specific attention patterns fire strongly enough.
2635
+ - id: plsat-118
2636
+ scenario: 'A learning journal entry contains: insight: ''When JWT tokens use RS256 with the PAYMENTS_SIGNING_KEY, rotation requires coordinating across 3 services.'' The data policy has learning_journal ring: ''user-scoped'' with redaction patterns: [{ pattern: ''\\b[A-Z_]{2,}_KEY\\b'' }, { pattern: ''password|secret|token'' }].'
2637
+ question: Which trust ring does the learning journal belong to, and what happens to the content?
2638
+ choices:
2639
+ A: Ring 1 (project-locked) — journals never leave the project
2640
+ B: Ring 2 (user-scoped) — the journal travels across the user's projects, but 'PAYMENTS_SIGNING_KEY' is redacted by the [A-Z_]{2,}_KEY pattern and 'token' is redacted by the password|secret|token pattern
2641
+ C: Ring 3 (creator-upstream) — journal insights flow to agent creators anonymized
2642
+ D: Ring 2 (user-scoped) — but no redaction occurs because journals are already private
2643
+ E: Ring 4 (network-public) — aggregated learning patterns are shared publicly
2644
+ correct: B
2645
+ explanation: The learning journal's ring is 'user-scoped' (Ring 2), meaning it travels across the user's own projects but never beyond. The data policy's redaction patterns are applied at the 'journal-recording' enforcement boundary. The regex '\b[A-Z_]{2,}_KEY\b' matches 'PAYMENTS_SIGNING_KEY', and 'password|secret|token' matches 'token' in 'JWT tokens'. Both are redacted before storage. Trust rings are concentric — data classified as Ring 2 can be read in Ring 1 (project) and Ring 2 (user) contexts, but never reaches Ring 3 (upstream) or Ring 4 (network).
2646
+ slot: slot-118
2647
+ section: para-601
2648
+ variants:
2649
+ - id: plsat-118b
2650
+ scenario: 'A work log entry records: summary: ''Fixed database connection pooling for the POSTGRES_SECRET_URL endpoint, updated #db-pool configuration.'' The data policy has work_log ring: ''project-locked'' with deny_content: [''code_snippets'', ''file_contents'', ''diff_content''] and no redaction patterns.'
2651
+ question: What trust ring contains this work log, and is the content filtered?
2652
+ choices:
2653
+ A: Ring 1 (project-locked) — the content stays in the project; deny_content blocks code snippets but the summary text is allowed because 'file_paths' and 'symbol_names' are in allow_content
2654
+ B: Ring 2 (user-scoped) — work logs travel with the user across projects
2655
+ C: Ring 1 (project-locked) — but 'POSTGRES_SECRET_URL' should be redacted
2656
+ D: Ring 1 (project-locked) — the entire entry is blocked because it mentions a secret URL
2657
+ E: Ring 3 (creator-upstream) — work logs are feedback for agent creators
2658
+ correct: A
2659
+ explanation: Work logs default to Ring 1 (project-locked) — they never leave the project. The allow_content list includes 'file_paths', 'symbol_names', and 'outcome', so the summary mentioning '#db-pool' and describing the fix is fine. The deny_content blocks 'code_snippets', 'file_contents', and 'diff_content', but a summary paragraph is not a code snippet. Note that 'POSTGRES_SECRET_URL' is not redacted because the work_log stream has no redaction patterns in the default policy — it relies on project-locked ring containment instead.
2660
+ - id: plsat-119
2661
+ scenario: 'A project''s CLAUDE.md has grown to 850 lines. It includes: the symbol table (5 symbols), commit conventions, agent onboarding steps, a 200-line logging guide with directory mapping tables, a 150-line portal protocol specification, and 100 lines of MCP workflow details. The team wants to reduce base context cost.'
2662
+ question: Using Paradigm's guidance resource model, what should stay inline in CLAUDE.md and what should move?
2663
+ choices:
2664
+ A: Everything stays in CLAUDE.md — agents need all context upfront to avoid errors
2665
+ B: Move everything to guidance resources — CLAUDE.md should only have the project name
2666
+ C: The symbol table, commit conventions, and agent onboarding steps stay inline (~150 lines). The logging guide, portal protocol, and MCP workflow move to on-demand guidance resources (paradigm://guidance/logging, paradigm://guidance/portal, paradigm://guidance/mcp-workflow)
2667
+ D: Only the symbol table stays inline. Everything else, including commit conventions, moves to guidance resources
2668
+ E: Keep the logging guide inline (agents need it every session) and move everything else to guidance
2669
+ correct: C
2670
+ explanation: 'Paradigm''s guidance resource model splits CLAUDE.md into two tiers: (1) always-loaded base context (~150 lines) containing the symbol system, conventions, and onboarding steps that every session needs, and (2) on-demand guidance resources loaded via MCP resource URIs only when relevant. The logging guide, portal protocol, and MCP workflow are reference material — an agent building a React component doesn''t need the portal spec. Moving them to paradigm://guidance/* reduces base context from ~850 to ~150 lines while keeping all guidance one tool call away.'
2671
+ slot: slot-119
2672
+ section: para-601
2673
+ variants:
2674
+ - id: plsat-119b
2675
+ scenario: A developer notices that their CLAUDE.md includes a detailed multi-agent orchestration section (120 lines), a flow-first development guide (80 lines), a workspace configuration reference (60 lines), and the core symbol system with conventions (100 lines). They want to follow Paradigm's context efficiency pattern.
2676
+ question: What is the correct split between inline CLAUDE.md and on-demand guidance?
2677
+ choices:
2678
+ A: Keep everything — 360 lines is acceptable for CLAUDE.md
2679
+ B: The core symbol system and conventions stay inline. Orchestration, flow-first, and workspace references become guidance resources loaded via paradigm://guidance/orchestration, paradigm://guidance/flows, and paradigm://guidance/workspaces
2680
+ C: Move everything to guidance resources and leave CLAUDE.md empty
2681
+ D: Keep orchestration inline because multi-agent work is common; move only workspaces
2682
+ E: Split each section in half — keep summaries inline, details in guidance
2683
+ correct: B
2684
+ explanation: 'The core symbol system (# $ ^ ! ~) and project conventions are needed in every session — they stay inline. Orchestration, flow-first development, and workspace configuration are situational: you only need orchestration when running multi-agent tasks, flows when documenting a complex process, and workspaces when working across projects. These become on-demand resources. The CLAUDE.md even includes a resource table mapping topics to URIs so agents know what is available without loading the full content.'
2685
+ - id: plsat-120
2686
+ scenario: 'A builder agent modifies `src/auth/jwt-verify.ts` and adds a new gate `^token-valid`. The event stream emits: type: ''gate-added'', source: ''post-write-hook'', symbols: [''^token-valid'', ''#jwt-verify''], path: ''src/auth/jwt-verify.ts'', keywords: [''JWT'', ''validation'', ''gate'', ''authentication'']. A security agent has attention: symbols: [''^*'', ''#*-auth'', ''#*-middleware''], concepts: [''permission'', ''JWT'', ''RBAC''], signals: [{ type: ''gate-added'' }], threshold: 0.4.'
2687
+ question: What urgency level should the security agent's nomination use?
2688
+ choices:
2689
+ A: Low — gate additions are routine maintenance
2690
+ B: Medium — any security-related change warrants moderate urgency
2691
+ C: High or Critical — a new gate in the auth layer is a security-sensitive change; the agent's nomination.speak_when.urgency likely includes 'gate_missing' and 'security_risk', and the signal match on 'gate-added' directly triggers the security review pattern
2692
+ D: The urgency is always 'medium' unless the event severity is 'critical'
2693
+ E: No nomination — the security agent only reviews when asked directly
2694
+ correct: C
2695
+ explanation: 'The security agent''s attention fires on three dimensions simultaneously: symbolMatch (''^token-valid'' matches ''^*''), conceptMatch (''JWT'' and ''authentication'' match concepts), and signalMatch (''gate-added'' matches a signal). With a threshold of 0.4 and a score of 1.0, the agent clearly self-nominates. For urgency, gate additions in authentication code are security-sensitive changes — the nomination.speak_when.urgency array typically includes ''gate_missing'' and ''security_risk''. A new gate needs review to verify it is correctly implemented and not bypassed.'
2696
+ slot: slot-120
2697
+ section: para-601
2698
+ variants:
2699
+ - id: plsat-120b
2700
+ scenario: 'A builder agent updates a CSS file: type: ''file-modified'', path: ''src/styles/button.css'', symbols: [''#button-styles''], keywords: [''padding'', ''border-radius'', ''color'']. A security agent has attention: symbols: [''^*'', ''#*-auth''], paths: [''auth/**'', ''middleware/**''], concepts: [''permission'', ''JWT''], signals: [{ type: ''gate-added'' }], threshold: 0.4.'
2701
+ question: What happens with the security agent's nomination for this event?
2702
+ choices:
2703
+ A: The security agent nominates with urgency 'low' — all changes get reviewed
2704
+ B: The security agent does not nominate — score is 0 across all dimensions (no symbol match, path is not auth/middleware, no concept overlap, no signal match), and quietReason is 'below-threshold'
2705
+ C: The security agent nominates because the low threshold (0.4) catches most events
2706
+ D: The security agent nominates with urgency 'medium' — CSS could affect security UI
2707
+ E: The security agent defers — it waits to see if other agents nominate first
2708
+ correct: B
2709
+ explanation: 'The security agent''s attention patterns produce zero matches: ''#button-styles'' does not match ''^*'' or ''#*-auth''; ''src/styles/button.css'' does not match ''auth/**'' or ''middleware/**''; [''padding'', ''border-radius'', ''color''] have no overlap with [''permission'', ''JWT'']; and event type ''file-modified'' is not ''gate-added''. The score is max(0, 0, 0, 0) = 0, far below the 0.4 threshold. The agent stays quiet with quietReason: ''below-threshold''. A low threshold does not mean the agent nominates for everything — it means the agent is more sensitive to weak matches in its attention domain.'
2710
+ - id: plsat-121
2711
+ scenario: A security agent nominates with urgency 'critical' after detecting a missing gate on a payment endpoint. At the same time, a reviewer agent nominates with urgency 'low' about a variable naming convention issue in the same file. Both nominations target overlapping symbols.
2712
+ question: How should the urgency levels affect surfacing to the human?
2713
+ choices:
2714
+ A: Both are shown equally — urgency is just metadata with no behavioral effect
2715
+ B: Only the critical nomination is shown — low urgency nominations are always suppressed
2716
+ C: The critical nomination is surfaced immediately; the low urgency nomination may be batched or deferred based on the SurfacingConfig's min_urgency setting for the reviewer agent
2717
+ D: They are merged into a single nomination with urgency 'high' (the average)
2718
+ E: The reviewer's nomination is upgraded to 'critical' because it touches the same symbols
2719
+ correct: C
2720
+ explanation: 'SurfacingConfig controls how nominations reach the human. Each agent can have a min_urgency setting — if the reviewer''s SurfacingPreference has min_urgency: ''medium'', the ''low'' urgency naming convention issue would be batched or suppressed until the human is less busy. The security agent''s ''critical'' nomination exceeds any reasonable min_urgency threshold and is surfaced immediately. Nominations are not merged or averaged — they are independent contributions that may be grouped as a ''complementary'' Debate if they touch overlapping symbols.'
2721
+ slot: slot-121
2722
+ section: para-601
2723
+ variants:
2724
+ - id: plsat-121b
2725
+ scenario: 'An event fires: type: ''route-created'' for a new `/api/admin/users` endpoint. The security agent nominates with urgency ''high'' (missing gate review). The architect agent nominates with urgency ''medium'' (suggesting a different URL structure). The builder agent scores below threshold and stays quiet.'
2726
+ question: What nomination urgency behavior is correct here?
2727
+ choices:
2728
+ A: The architect's nomination is suppressed because the security agent already nominated
2729
+ B: 'Both nominations surface: security at ''high'' urgency (shows first) and architect at ''medium'' urgency. They may be grouped into a Debate because they target the same route symbol. The builder''s quietReason is ''below-threshold'''
2730
+ C: All three agents must nominate — staying quiet is not an option
2731
+ D: The urgency levels are recalculated based on the number of agents that nominate
2732
+ E: Only the highest urgency nomination surfaces; others are queued for later
2733
+ correct: B
2734
+ explanation: 'Each agent independently scores the event and decides whether to nominate. The security and architect agents exceeded their thresholds and nominated with different urgency levels. Both are surfaced (high shows before medium in priority order). Because they target the same route/symbols, the system may group them as a Debate (type: ''complementary'' since they address different concerns). The builder agent scored below its threshold and stays quiet with quietReason: ''below-threshold'' — this is correct behavior, not a failure.'
2735
+ - id: plsat-122
2736
+ scenario: 'A project''s data policy has: upstream ring: ''creator-upstream'', allowed: [''task_type'', ''outcome'', ''helpfulness'', ''duration_bucket'', ''error_category''], denied: [''code_of_any_kind'', ''file_paths'', ''symbol_names'', ''conversation_content'', ''user_identity'']. An agent creator''s analytics dashboard requests feedback data.'
2737
+ question: Which data reaches the agent creator?
2738
+ choices:
2739
+ A: Everything the agent produced — the creator needs full visibility to improve the agent
2740
+ B: 'Only the allowed fields: task_type (''feature implementation''), outcome (''pass''), helpfulness (''high''), duration_bucket (''30-60min''), error_category (null). No code, file paths, symbol names, conversation content, or user identity is transmitted'
2741
+ C: A summary of the agent's work log entries
2742
+ D: Nothing — the 'denied' list overrides the 'allowed' list entirely
2743
+ E: Only 'outcome' and 'helpfulness' — those are the minimum required fields
2744
+ correct: B
2745
+ explanation: 'The upstream rules at Ring 3 (creator-upstream) define exactly what flows to agent creators. The ''allowed'' list enumerates the specific fields that can be transmitted: task_type, outcome, helpfulness, duration_bucket, and error_category. The ''denied'' list explicitly blocks code_of_any_kind, file_paths, symbol_names, conversation_content, and user_identity. These two lists work together — only allowed fields pass, and denied fields are hard-blocked even if they would otherwise be inferred. The creator sees anonymized quality metrics, never the user''s actual code or identity.'
2746
+ slot: slot-122
2747
+ section: para-601
2748
+ variants:
2749
+ - id: plsat-122b
2750
+ scenario: 'A team decision entry contains: decision: ''Use PostgreSQL with pgvector for embeddings'', rationale: ''Lower latency than Pinecone for our dataset size'', symbols_affected: [''#embedding-store'', ''#search-service'']. The data policy has team_decisions ring: ''project-locked'', deny_content: [''implementation_details'']. An agent from another user project requests this data.'
2751
+ question: What happens when the cross-project request arrives?
2752
+ choices:
2753
+ A: The decision is shared — team decisions are public knowledge
2754
+ B: Only the rationale is shared — the decision text contains implementation details
2755
+ C: The request is blocked entirely — team decisions are in Ring 1 (project-locked), which means they never leave the project regardless of what the requesting agent needs
2756
+ D: The decision title is shared but the rationale is redacted
2757
+ E: The data is shared if the other project is in the same workspace
2758
+ correct: C
2759
+ explanation: Team decisions default to Ring 1 (project-locked). The trust ring system is the primary enforcement boundary — data classified in Ring 1 never leaves the project, period. The 'cross-project-transfer' enforcement boundary checks the ring before any content filtering. Even though the deny_content only blocks 'implementation_details', the ring restriction prevents the entire entry from being transmitted. Workspaces do not override ring restrictions — they enable symbol awareness across projects, not data sharing.
2760
+ - id: plsat-123
2761
+ scenario: 'The data policy''s default ring is ''project-locked''. The observation rules are: allow: [''src/**'', ''.paradigm/**'', ''portal.yaml''], deny: [''.env*'', ''**/*.key'', ''**/*.pem'', ''**/secrets/**'']. A builder agent tries to read ''.env.production'' to check a database URL.'
2762
+ question: What happens at the observation enforcement boundary?
2763
+ choices:
2764
+ A: The read succeeds — '.env.production' matches 'src/**' because it starts with a dot
2765
+ B: The read is blocked — '.env.production' matches the deny pattern '.env*', and deny overrides allow. The agent is prevented from observing the file contents
2766
+ C: The read succeeds but the content is redacted — only the file name is returned
2767
+ D: The read is blocked only if the file contains actual secrets; the policy checks content
2768
+ E: The read succeeds because the default ring is 'project-locked', which means all project files are accessible
2769
+ correct: B
2770
+ explanation: The observation rules control what agents can see. The deny list takes precedence over the allow list — '.env.production' matches the deny glob '.env*', so the read is blocked at the 'event-emission' enforcement boundary. This is a hard deny, not a content-aware filter. The default ring being 'project-locked' means data stays within the project, but observation deny patterns prevent agents from accessing sensitive files regardless of ring level. The agent would need the deny pattern removed from the data policy to read this file.
2771
+ slot: slot-123
2772
+ section: para-601
2773
+ variants:
2774
+ - id: plsat-123b
2775
+ scenario: 'The observation rules allow: [''src/**'', ''.paradigm/**''], deny: [''**/*.key'', ''**/*.pem'', ''**/secrets/**'']. A security agent wants to audit the file ''src/config/secrets/api-keys.json'' to check for hardcoded credentials.'
2776
+ question: What does the data policy enforce?
2777
+ choices:
2778
+ A: The read is allowed — 'src/**' matches and the security agent has special override privileges
2779
+ B: The read is allowed — 'src/config/secrets/api-keys.json' matches 'src/**' in allow
2780
+ C: The read is blocked — 'src/config/secrets/api-keys.json' matches '**/secrets/**' in deny, which overrides the 'src/**' allow pattern
2781
+ D: The read is partially allowed — the file name is visible but contents are redacted
2782
+ E: The read depends on the agent's permissions.paths.read setting, not the data policy
2783
+ correct: C
2784
+ explanation: Deny patterns override allow patterns in observation rules. While 'src/config/secrets/api-keys.json' does match 'src/**' in the allow list, it also matches '**/secrets/**' in the deny list. The deny takes precedence. This is a deliberate security design — even well-intentioned audit access to secrets directories is blocked. If the security agent needs to verify no hardcoded secrets exist, it would need a different approach (e.g., a human-run audit) or a per-agent override in agent_overrides that explicitly allows that path.
2785
+ - id: plsat-124
2786
+ scenario: An event is emitted by the post-write hook after a file save. You need to understand the anatomy of the StreamEvent object.
2787
+ question: Which fields are ALWAYS present on every StreamEvent, regardless of the event type or source?
2788
+ choices:
2789
+ A: id, type, source, timestamp, path, symbols, agent
2790
+ B: id, type, source, timestamp — these four are required. Fields like path, symbols, keywords, context, agent, tool, severity, and data are all optional and depend on the event type
2791
+ C: id, timestamp, and agent — the agent is always set because events come from agents
2792
+ D: All fields are always present — optional fields default to empty arrays or null
2793
+ E: type and timestamp only — the id is generated lazily when the event is queried
2794
+ correct: B
2795
+ explanation: 'A StreamEvent has four required fields: id (auto-generated like ''ev-1711000000000-0042''), type (an EventType like ''file-modified'', ''gate-added'', ''error-encountered''), source (an EventSource like ''post-write-hook'', ''mcp-tool-call'', ''conversation''), and timestamp (ISO 8601). All other fields are optional: path is present for file events, symbols for Paradigm-aware events, agent for agent-originated events, tool for MCP tool calls, severity for compliance/error events, and data for arbitrary structured metadata.'
2796
+ slot: slot-124
2797
+ section: para-601
2798
+ variants:
2799
+ - id: plsat-124b
2800
+ scenario: 'You are debugging an event that arrived in the stream: { id: ''ev-1711036800000-0317'', type: ''gate-checked'', source: ''mcp-tool-call'', timestamp: ''2026-03-21T12:00:00Z'', symbols: [''^admin-only''], tool: ''paradigm_gates_for_route'', context: ''Checking gates for /api/admin/config'' }.'
2801
+ question: Which fields on this event are the optional ones (not present on every event)?
2802
+ choices:
2803
+ A: All fields shown are required — this is the minimum event structure
2804
+ B: Only 'context' is optional — everything else is always present
2805
+ C: symbols, tool, and context are the optional fields. id, type, source, and timestamp are the four required fields present on every event
2806
+ D: id is optional — some events are anonymous
2807
+ E: source and tool are both optional — they are redundant
2808
+ correct: C
2809
+ explanation: The four required fields are id, type, source, and timestamp. In this event, symbols (['^admin-only']), tool ('paradigm_gates_for_route'), and context ('Checking gates for /api/admin/config') are all optional fields that happen to be populated. Other events might lack these — for example, a 'file-modified' event from 'post-write-hook' would have path instead of tool, and might not have symbols at all if the file is not covered by a .purpose file.
2810
+ - id: plsat-125
2811
+ scenario: 'A builder agent starts a new session on project ''acme-api''. You call `paradigm_context_compose` with agent: ''builder'', symbols: [''#api-gateway'', ''#rate-limiter''], include_nominations: true, include_decisions: true, include_journal: true.'
2812
+ question: What sections does the composed context include?
2813
+ choices:
2814
+ A: Only the agent's profile — context compose just returns the .agent file
2815
+ B: The full CLAUDE.md, all .purpose files, and the complete event stream
2816
+ C: Profile enrichment (personality, relevant expertise for the given symbols, transferable patterns), recent active team decisions, transferable journal entries for this agent, and pending nominations — four distinct sections composed into a markdown block
2817
+ D: Only nominations and decisions — the profile is loaded separately
2818
+ E: The agent's attention patterns and learning configuration — context compose is about ambient setup
2819
+ correct: C
2820
+ explanation: 'paradigm_context_compose builds a session context from four sources: (1) Profile enrichment — the agent''s personality, expertise entries matching the given symbols, and transferable patterns; (2) Recent team decisions with status ''active'' (up to max_decisions, default 5); (3) Transferable journal entries for this agent (insights that apply across projects, up to max_journal, default 5); (4) Pending nominations that haven''t been surfaced yet. Each section can be toggled via include_* flags. The result is a markdown string for prompt injection, not raw data.'
2821
+ slot: slot-125
2822
+ section: para-601
2823
+ variants:
2824
+ - id: plsat-125b
2825
+ scenario: 'A security agent calls `paradigm_context_compose` with agent: ''security'', include_decisions: true, include_journal: true, include_nominations: false, max_decisions: 3, max_journal: 10.'
2826
+ question: What does the composed context contain?
2827
+ choices:
2828
+ A: All 4 sections are always included — the include_* flags are just suggestions
2829
+ B: Profile enrichment for the security agent, up to 3 recent active team decisions, up to 10 transferable journal entries, and NO nominations section (because include_nominations is false)
2830
+ C: Only the journal entries — security agents do not get profile enrichment
2831
+ D: The full event stream filtered to security-relevant events
2832
+ E: Profile, decisions, and journal — but max_journal caps at 5 regardless of the parameter
2833
+ correct: B
2834
+ explanation: 'The include_* flags control which sections appear in the composed context. With include_nominations: false, that section is skipped entirely. Profile enrichment is always included (it is the base). max_decisions: 3 limits team decisions to the 3 most recent active ones (instead of the default 5). max_journal: 10 allows up to 10 transferable journal entries (overriding the default 5). The result is a markdown context block with three sections: profile enrichment, decisions, and journal — ready for prompt injection.'
2835
+ - id: plsat-126
2836
+ scenario: 'The data policy network rules are: ring: ''network-public'', opt_in: false, if_opted_in: [''aggregated_task_success_rates'', ''anonymized_pattern_frequency'']. A network aggregation service requests data from this project.'
2837
+ question: What data flows to the network?
2838
+ choices:
2839
+ A: aggregated_task_success_rates and anonymized_pattern_frequency — the if_opted_in list defines what is shared
2840
+ B: 'Nothing — opt_in is false, so no data reaches Ring 4 (network-public) regardless of the if_opted_in list. The user must explicitly set opt_in: true before any data flows'
2841
+ C: Only anonymized_pattern_frequency — success rates could identify the project
2842
+ D: Aggregated statistics are always shared — opt_in only controls detailed data
2843
+ E: The data is shared but with a 30-day delay for privacy
2844
+ correct: B
2845
+ explanation: 'The network rules require explicit opt-in. With opt_in: false, the ''network-aggregation'' enforcement boundary blocks ALL data from reaching Ring 4 (network-public). The if_opted_in list only takes effect when opt_in is true — it defines which specific metrics would be shared if the user chooses to participate. This is a deliberate design: no data flows to the network by default. The user must consciously enable it, and even then, only the listed metric types (aggregated task success rates and anonymized pattern frequency) are transmitted.'
2846
+ slot: slot-126
2847
+ section: para-601
2848
+ variants:
2849
+ - id: plsat-126b
2850
+ scenario: 'An upstream rule has: ring: ''creator-upstream'', allowed: [''task_type'', ''outcome''], denied: [''code_of_any_kind'', ''file_paths'', ''symbol_names'', ''conversation_content'', ''user_identity'']. The agent generates a work log entry that mentions file paths and symbol names in the summary. This entry is allowed at Ring 1. Now the upstream boundary is checked.'
2851
+ question: What reaches the agent creator?
2852
+ choices:
2853
+ A: The full work log entry — it was allowed at Ring 1 so it passes all rings
2854
+ B: Only the task_type and outcome fields. Even though the summary mentions file paths and symbols, the upstream boundary enforces the denied list. File paths, symbol names, and any code are stripped. Only the enumerated allowed fields pass through to Ring 3
2855
+ C: The summary with file paths and symbol names redacted inline
2856
+ D: Nothing — the denied list cancels out the allowed list
2857
+ E: The work log ID and timestamp only — those are always transmitted
2858
+ correct: B
2859
+ explanation: 'The upstream enforcement boundary operates on specific fields, not on the text content of entries. The ''allowed'' list names exactly which fields pass: task_type and outcome. The ''denied'' list provides an additional hard block on categories like file_paths, symbol_names, and code_of_any_kind. Data that was allowed at Ring 1 (project-locked) does not automatically flow to Ring 3 (creator-upstream) — each ring''s enforcement boundary re-evaluates what passes. The creator receives structured, enumerated fields only — never free-text summaries that might leak sensitive information.'
2860
+ - id: plsat-127
2861
+ scenario: A new project is set up with Paradigm Ambient. The CLAUDE.md mentions a 'learning loop' where agents improve over time. A junior developer asks about the sequence of steps in the ambient learning loop.
2862
+ question: What is the correct order of the ambient learning loop?
2863
+ choices:
2864
+ A: Observe → Learn → Act → Record
2865
+ B: Act → Record → Learn → Apply
2866
+ C: Event emitted → Attention scoring → Self-nomination → Surfacing → Human engagement → Feedback → Journal recording → Confidence adjustment → Pattern extraction → Context enrichment (next session)
2867
+ D: Record everything → Filter later → Show on demand
2868
+ E: Human assigns → Agent acts → Agent reports → Human reviews
2869
+ correct: C
2870
+ explanation: 'The ambient learning loop is a 10-step cycle: (1) An event is emitted (file save, tool call, error). (2) Each agent scores it against their attention patterns. (3) Agents that exceed their threshold self-nominate with a type and urgency. (4) Nominations are surfaced to the human based on SurfacingConfig. (5) The human engages (accepts, dismisses, defers). (6) The response becomes feedback. (7) The agent records insights in its Learning Journal. (8) Confidence scores are adjusted via exponential moving average. (9) Transferable patterns are extracted for cross-project use. (10) Next session, paradigm_context_compose injects the updated patterns and insights.'
2871
+ slot: slot-127
2872
+ section: para-601
2873
+ variants:
2874
+ - id: plsat-127b
2875
+ scenario: A team is evaluating whether Paradigm Ambient's learning loop actually improves agent performance over time. They want to understand the mechanism.
2876
+ question: Which sequence correctly describes how an agent's future behavior improves from a single learning event?
2877
+ choices:
2878
+ A: The agent's code is retrained on the new data point
2879
+ B: The correction is stored but only used if the exact same scenario recurs
2880
+ C: Feedback → Journal entry (trigger + insight + confidence adjustment) → Pattern extraction (if transferable) → Notebook promotion (if auto-promote enabled) → Context compose injects the pattern in future sessions → Agent's expertise scores are updated
2881
+ D: The human writes a rule in the data policy and the agent follows it
2882
+ E: The agent's threshold is lowered so it speaks up more often
2883
+ correct: C
2884
+ explanation: 'Agent improvement follows a structured path: (1) Feedback from human engagement triggers a journal entry with the specific trigger type (correction_received, confidence_miss, etc.), the insight, and confidence adjustment (before/after). (2) If the learning is transferable, a pattern is extracted with applies_when and correct_approach. (3) If notebook_auto_promote is enabled in the agent''s learning config, the pattern promotes to the agent''s notebook. (4) In future sessions, paradigm_context_compose loads transferable journal entries and patterns into the agent''s context. (5) The agent''s expertise scores (EMA) are updated, shifting future attention scoring. The agent is not retrained — it improves through richer context injection.'
2885
+ - id: plsat-128
2886
+ scenario: 'An architect agent has attention: symbols: [''$*'', ''#*''], concepts: [''architecture'', ''design'', ''pattern'', ''refactor''], threshold: 0.5. An event arrives: type: ''concept-mentioned'', source: ''conversation'', keywords: [''refactor'', ''extract'', ''design pattern'', ''strategy pattern''], context: ''Discussing whether to refactor the notification system using the strategy pattern.'''
2887
+ question: What is the architect's conceptMatch score, and does it self-nominate?
2888
+ choices:
2889
+ A: conceptMatch = 0.25 (1 of 4 concepts), score = 0.25, does not nominate (below 0.5)
2890
+ B: conceptMatch = 1.0 — any concept match gives full score
2891
+ C: conceptMatch = 0.75 — 'architecture' does not appear but 'design', 'pattern', and 'refactor' all match, giving 3 of 4 concepts matched. Score = max(0, 0, 0.75, 0) = 0.75, which exceeds 0.5, so the agent self-nominates
2892
+ D: conceptMatch = 0.5 — only exact keyword matches count, and only 'refactor' and 'pattern' are exact matches (2 of 4)
2893
+ E: The architect does not nominate because 'concept-mentioned' is not in its signals list
2894
+ correct: C
2895
+ explanation: 'Concept matching joins the event''s context, keywords, and type into a single lowercase string, then checks how many of the agent''s concepts appear. The combined text includes ''refactor'', ''extract'', ''design pattern'', ''strategy pattern'', ''concept-mentioned''. Against the agent''s concepts [''architecture'', ''design'', ''pattern'', ''refactor'']: ''design'' matches, ''pattern'' matches, ''refactor'' matches. ''architecture'' does not appear. That is 3 out of 4 = 0.75. The final score is max(symbolMatch, pathMatch, conceptMatch, signalMatch). With no symbols or path on this event, symbolMatch and pathMatch are 0. signalMatch is 0 (no signal patterns configured). Score = 0.75, above the 0.5 threshold. The architect self-nominates.'
2896
+ slot: slot-128
2897
+ section: para-601
2898
+ variants:
2899
+ - id: plsat-128b
2900
+ scenario: 'A security agent has attention: symbols: [''^*'', ''#*-auth'', ''#*-middleware''], paths: [''auth/**'', ''middleware/**''], concepts: [''permission'', ''JWT'', ''RBAC'', ''XSS'', ''injection''], signals: [{ type: ''gate-added'' }, { type: ''route-created'' }], threshold: 0.4. An event arrives: type: ''route-created'', source: ''post-write-hook'', path: ''src/routes/admin.ts'', symbols: [''#admin-routes''], keywords: [''new endpoint'', ''admin panel'', ''user management''].'
2901
+ question: What is the security agent's overall score and should it nominate?
2902
+ choices:
2903
+ A: Score = 0 — '#admin-routes' does not match any symbol pattern
2904
+ B: Score = 0.4 — only the path partially matches, barely meeting threshold
2905
+ C: Score = 1.0 — signalMatch is 1.0 because 'route-created' matches signals. symbolMatch is 0 ('#admin-routes' does not match '^*', '#*-auth', or '#*-middleware'). pathMatch is 0 ('src/routes/admin.ts' does not match 'auth/**' or 'middleware/**'). The final score is max(0, 0, 0, 1.0) = 1.0, well above the 0.4 threshold
2906
+ D: Score = 0.2 — partial matches across dimensions are averaged
2907
+ E: Score = 1.0 — all four dimensions fire because admin is security-related
2908
+ correct: C
2909
+ explanation: 'The scoring uses max() across four dimensions, not an average. symbolMatch: ''#admin-routes'' does not end with ''-auth'' or ''-middleware'' and does not start with ''^'', so 0. pathMatch: ''src/routes/admin.ts'' does not match ''auth/**'' or ''middleware/**'', so 0. conceptMatch: keywords [''new endpoint'', ''admin panel'', ''user management''] do not contain ''permission'', ''JWT'', ''RBAC'', ''XSS'', or ''injection'', so 0. signalMatch: the event type ''route-created'' matches the signal { type: ''route-created'' }, so 1.0. Final score: max(0, 0, 0, 1.0) = 1.0. The security agent self-nominates — new route creation is exactly the kind of event that warrants security review.'