@a-company/paradigm 5.38.0 → 6.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (328) hide show
  1. package/dist/{accept-orchestration-OATWIRHP.js → accept-orchestration-QQISPINV.js} +1 -1
  2. package/dist/add-UOR4INIV.js +8 -0
  3. package/dist/{agent-loader-RIVI6QPP.js → agent-loader-2WJHD46U.js} +1 -1
  4. package/dist/{agent-loader-RJRVO5GQ.js → agent-loader-YKS2PQWO.js} +1 -1
  5. package/dist/{ambient-76YMUA5Q.js → ambient-BE3SQXNN.js} +1 -1
  6. package/dist/{ambient-WTLYUAQM.js → ambient-NVKQCW2A.js} +12 -12
  7. package/dist/{assess-UFPYEJKP.js → assess-63WXHWJV.js} +1 -1
  8. package/dist/{calibration-OLJYB5HN.js → calibration-BDHGYJOK.js} +1 -1
  9. package/dist/{chunk-5QOCKWK5.js → chunk-4PSD5R7N.js} +2 -2
  10. package/dist/{chunk-HOBHJPTL.js → chunk-6SKSV5B2.js} +1 -1
  11. package/dist/{chunk-4L7665QV.js → chunk-FEYOQMZ5.js} +1 -1
  12. package/dist/{chunk-NEJ4ZLCY.js → chunk-GAFKOFAV.js} +1 -1
  13. package/dist/chunk-GRZQIKST.js +2 -0
  14. package/dist/{chunk-RLCH7DXQ.js → chunk-K7X3Z3GL.js} +1 -1
  15. package/dist/{chunk-4VKSEOXZ.js → chunk-LPBCQM5Y.js} +3 -3
  16. package/dist/{chunk-74SGKSRQ.js → chunk-M2HKWR25.js} +1 -1
  17. package/dist/{chunk-BOYQAMGC.js → chunk-M3PPXJU4.js} +1 -1
  18. package/dist/chunk-PHEX6LU4.js +111 -0
  19. package/dist/chunk-Q527BPUF.js +2 -0
  20. package/dist/chunk-R5ECMBIV.js +11 -0
  21. package/dist/{chunk-X3U3IGYT.js → chunk-TBWWFRL5.js} +1 -1
  22. package/dist/{chunk-MQIG6SMF.js → chunk-TNVWGPCE.js} +1 -1
  23. package/dist/chunk-TZDYIPVU.js +521 -0
  24. package/dist/{chunk-3XGNXXCT.js → chunk-UZ5H7K6Q.js} +1 -1
  25. package/dist/chunk-VIG5LSGZ.js +2 -0
  26. package/dist/chunk-VNIX5KBT.js +3 -0
  27. package/dist/{chunk-AGFPVSX5.js → chunk-VXIIVMTM.js} +1 -1
  28. package/dist/{chunk-ORDKEGII.js → chunk-WESTEMIM.js} +1 -1
  29. package/dist/{chunk-DOCDDDTD.js → chunk-YNDPSWOE.js} +5 -5
  30. package/dist/chunk-Z5QW6USC.js +2 -0
  31. package/dist/{compliance-D7GD6ZYC.js → compliance-BNFWQPKM.js} +1 -1
  32. package/dist/config-schema-FLHRVZMI.js +2 -0
  33. package/dist/{context-audit-XRPT3OU2.js → context-audit-JVCA6GSV.js} +1 -1
  34. package/dist/{cursorrules-U5O4G5T4.js → cursorrules-ZXPXPZ3P.js} +1 -1
  35. package/dist/decision-loader-HELL2AMX.js +2 -0
  36. package/dist/{delete-P5VULXR4.js → delete-2C6ALLYY.js} +1 -1
  37. package/dist/{diff-YGHBIJY5.js → diff-MF55KQZH.js} +1 -1
  38. package/dist/{dist-KGRCLBJP-2QAPFYNF.js → dist-GQ42YS5N-4HIJZVBB.js} +10 -10
  39. package/dist/{docs-USDAF26F.js → docs-O37YLLRN.js} +1 -1
  40. package/dist/doctor-IG5XM4C4.js +2 -0
  41. package/dist/{edit-GUU3HBVW.js → edit-P3MDAZLU.js} +1 -1
  42. package/dist/{flow-FVZR3YJ4.js → flow-BGXOVE2V.js} +1 -1
  43. package/dist/index.js +6 -6
  44. package/dist/init-M44SO65G.js +2 -0
  45. package/dist/{init-XYB62Q3X.js → init-V4KSEKPK.js} +1 -1
  46. package/dist/{list-YKIQNKGB.js → list-2XIWUEMA.js} +1 -1
  47. package/dist/list-CFHINXIS.js +12 -0
  48. package/dist/lore-loader-D2ISOASW.js +2 -0
  49. package/dist/lore-loader-PXFKMKAN.js +2 -0
  50. package/dist/mcp.js +4 -4
  51. package/dist/metrics-UESGUHTA.js +2 -0
  52. package/dist/migrate-assessments-YSITX7KM.js +4 -0
  53. package/dist/migrate-decisions-NPLQOEEH.js +6 -0
  54. package/dist/migrate-plsat-EM2ACIQ3.js +6 -0
  55. package/dist/{nomination-engine-EALA5MGI.js → nomination-engine-QPZJH6XO.js} +1 -1
  56. package/dist/{notebook-loader-PXNRBBXD.js → notebook-loader-3J2OFMS3.js} +1 -1
  57. package/dist/{orchestrate-M5PBZBJQ.js → orchestrate-RID7HHHH.js} +1 -1
  58. package/dist/{platform-server-DNAMH4YI.js → platform-server-UD45NTGV.js} +1 -1
  59. package/dist/{portal-check-ZMLVBIGW.js → portal-check-DV2VSJ5E.js} +1 -1
  60. package/dist/portal-compliance-JONQ4SOP.js +2 -0
  61. package/dist/{probe-3FTG6LYO.js → probe-5HAXULAD.js} +1 -1
  62. package/dist/{providers-AWA7WLLM.js → providers-4PXMWA7V.js} +1 -1
  63. package/dist/quiz-WYIZJG5K.js +10 -0
  64. package/dist/{record-YXPB34MY.js → record-N3VNYYKJ.js} +1 -1
  65. package/dist/reindex-FWPD2VGM.js +2 -0
  66. package/dist/{retag-N5XF3KXP.js → retag-72R2OSZV.js} +1 -1
  67. package/dist/{review-77QI6VOC.js → review-2INNWLTW.js} +1 -1
  68. package/dist/{sentinel-HYAZ3CO5.js → sentinel-EFPEX246.js} +1 -1
  69. package/dist/{sentinel-bridge-VR357PKL.js → sentinel-bridge-UR2MKARY.js} +1 -1
  70. package/dist/{serve-U47GULB6.js → serve-MO35XIZE.js} +1 -1
  71. package/dist/serve-OQYUO7CR.js +12 -0
  72. package/dist/{server-4YNUIK4W.js → server-4D77LCST.js} +1 -1
  73. package/dist/server-FGUL2FWQ.js +7 -0
  74. package/dist/session-tracker-KGORN6B5.js +2 -0
  75. package/dist/{session-work-log-PAKXOFGL.js → session-work-log-4IEVE4KK.js} +1 -1
  76. package/dist/{session-work-log-ZP45TREI.js → session-work-log-EE4UIZ33.js} +1 -1
  77. package/dist/{setup-FEWSYS3Y.js → setup-ZSEC72BS.js} +1 -1
  78. package/dist/{shift-PC6C7NUX.js → shift-TVNY2CQF.js} +6 -6
  79. package/dist/{show-PJ5LFLIL.js → show-JH7LJ5MT.js} +1 -1
  80. package/dist/show-WVHAL4VU.js +7 -0
  81. package/dist/{spawn-M5BAV252.js → spawn-UH5RENSE.js} +1 -1
  82. package/dist/status-S7Z5FVIE.js +6 -0
  83. package/dist/{summary-PYTEIJ4U.js → summary-WLI3NF4G.js} +2 -2
  84. package/dist/{sweep-HU74OPVW.js → sweep-7TZFN5NS.js} +1 -1
  85. package/dist/sync-55U6QPIA.js +2 -0
  86. package/dist/{sync-llms-7CAI74QL.js → sync-llms-GF7DDQDI.js} +1 -1
  87. package/dist/{team-PDK64JXI.js → team-MGT66HZQ.js} +1 -1
  88. package/dist/{timeline-K3ZFKJ3R.js → timeline-RK7O2SCM.js} +1 -1
  89. package/dist/tools-QJHAVYI6.js +2 -0
  90. package/dist/university-content/notes/N-para-001-build-something.md +126 -0
  91. package/dist/university-content/notes/N-para-001-meet-the-team.md +85 -0
  92. package/dist/university-content/notes/N-para-001-shift-setup.md +74 -0
  93. package/dist/university-content/notes/N-para-101-component-types.md +99 -0
  94. package/dist/university-content/notes/N-para-101-first-steps.md +134 -0
  95. package/dist/university-content/notes/N-para-101-five-symbols.md +128 -0
  96. package/dist/university-content/notes/N-para-101-paradigm-logger.md +89 -0
  97. package/dist/university-content/notes/N-para-101-portal-yaml.md +112 -0
  98. package/dist/university-content/notes/N-para-101-project-structure.md +143 -0
  99. package/dist/university-content/notes/N-para-101-purpose-files.md +121 -0
  100. package/dist/university-content/notes/N-para-101-tags-and-classification.md +93 -0
  101. package/dist/university-content/notes/N-para-101-welcome.md +51 -0
  102. package/dist/university-content/notes/N-para-201-architecture-review.md +175 -0
  103. package/dist/university-content/notes/N-para-201-aspect-graph.md +79 -0
  104. package/dist/university-content/notes/N-para-201-aspects-and-anchors.md +112 -0
  105. package/dist/university-content/notes/N-para-201-component-patterns.md +138 -0
  106. package/dist/university-content/notes/N-para-201-cross-cutting-concerns.md +145 -0
  107. package/dist/university-content/notes/N-para-201-disciplines.md +187 -0
  108. package/dist/university-content/notes/N-para-201-flows-deep-dive.md +119 -0
  109. package/dist/university-content/notes/N-para-201-gates-deep-dive.md +165 -0
  110. package/dist/university-content/notes/N-para-201-portal-protocol.md +133 -0
  111. package/dist/university-content/notes/N-para-201-signal-patterns.md +159 -0
  112. package/dist/university-content/notes/N-para-201-symbol-naming.md +149 -0
  113. package/dist/university-content/notes/N-para-301-context-management.md +53 -0
  114. package/dist/university-content/notes/N-para-301-decisions.md +99 -0
  115. package/dist/university-content/notes/N-para-301-doctor-and-validation.md +70 -0
  116. package/dist/university-content/notes/N-para-301-enforcement-levels.md +102 -0
  117. package/dist/university-content/notes/N-para-301-fragility-tracking.md +50 -0
  118. package/dist/university-content/notes/N-para-301-history-system.md +42 -0
  119. package/dist/university-content/notes/N-para-301-navigation-system.md +55 -0
  120. package/dist/university-content/notes/N-para-301-operations-review.md +55 -0
  121. package/dist/university-content/notes/N-para-301-paradigm-shift.md +93 -0
  122. package/dist/university-content/notes/N-para-301-protocols.md +113 -0
  123. package/dist/university-content/notes/N-para-301-ripple-analysis.md +53 -0
  124. package/dist/university-content/notes/N-para-301-sentinel-observability.md +87 -0
  125. package/dist/university-content/notes/N-para-301-sync-and-maintenance.md +57 -0
  126. package/dist/university-content/notes/N-para-301-wisdom-system.md +89 -0
  127. package/dist/university-content/notes/N-para-401-agent-identity.md +99 -0
  128. package/dist/university-content/notes/N-para-401-agent-interop.md +87 -0
  129. package/dist/university-content/notes/N-para-401-agent-roles.md +107 -0
  130. package/dist/university-content/notes/N-para-401-commit-conventions.md +82 -0
  131. package/dist/university-content/notes/N-para-401-mastery-review.md +71 -0
  132. package/dist/university-content/notes/N-para-401-mcp-tools-overview.md +102 -0
  133. package/dist/university-content/notes/N-para-401-multi-agent-coordination.md +80 -0
  134. package/dist/university-content/notes/N-para-401-notebooks-permissions.md +66 -0
  135. package/dist/university-content/notes/N-para-401-orchestration-workflow.md +101 -0
  136. package/dist/university-content/notes/N-para-401-pm-governance.md +71 -0
  137. package/dist/university-content/notes/N-para-401-provider-cascade.md +75 -0
  138. package/dist/university-content/notes/N-para-401-quick-check.md +95 -0
  139. package/dist/university-content/notes/N-para-501-advanced-workflows.md +122 -0
  140. package/dist/university-content/notes/N-para-501-aspect-graph-advanced.md +195 -0
  141. package/dist/university-content/notes/N-para-501-aspect-graph-internals.md +97 -0
  142. package/dist/university-content/notes/N-para-501-assessment-loops.md +116 -0
  143. package/dist/university-content/notes/N-para-501-conductor-workspace.md +77 -0
  144. package/dist/university-content/notes/N-para-501-habits-practice.md +164 -0
  145. package/dist/university-content/notes/N-para-501-hook-enforcement.md +100 -0
  146. package/dist/university-content/notes/N-para-501-lore-system.md +155 -0
  147. package/dist/university-content/notes/N-para-501-platform-agent-ui.md +108 -0
  148. package/dist/university-content/notes/N-para-501-review-compliance.md +72 -0
  149. package/dist/university-content/notes/N-para-501-sentinel-deep-dive.md +173 -0
  150. package/dist/university-content/notes/N-para-501-session-intelligence.md +104 -0
  151. package/dist/university-content/notes/N-para-501-symphony-a-mail.md +120 -0
  152. package/dist/university-content/notes/N-para-501-symphony-networking.md +119 -0
  153. package/dist/university-content/notes/N-para-501-task-management.md +100 -0
  154. package/dist/university-content/notes/N-para-601-agent-renaissance.md +121 -0
  155. package/dist/university-content/notes/N-para-601-attention-scoring.md +129 -0
  156. package/dist/university-content/notes/N-para-601-context-composition.md +146 -0
  157. package/dist/university-content/notes/N-para-601-data-sovereignty.md +140 -0
  158. package/dist/university-content/notes/N-para-601-event-stream.md +126 -0
  159. package/dist/university-content/notes/N-para-601-knowledge-streams.md +144 -0
  160. package/dist/university-content/notes/N-para-601-learning-loop.md +68 -0
  161. package/dist/university-content/notes/N-para-601-maestro-team-collab.md +136 -0
  162. package/dist/university-content/notes/N-para-601-nominations-debates.md +115 -0
  163. package/dist/university-content/notes/N-para-701-agent-notebooks.md +131 -0
  164. package/dist/university-content/notes/N-para-701-agent-pods-nevrland.md +182 -0
  165. package/dist/university-content/notes/N-para-701-agent-profiles.md +197 -0
  166. package/dist/university-content/notes/N-para-701-agent-roster.md +82 -0
  167. package/dist/university-content/notes/N-para-701-agent-state.md +180 -0
  168. package/dist/university-content/notes/N-para-701-learning-feedback-loop.md +188 -0
  169. package/dist/university-content/notes/N-para-701-model-tier-resolution.md +204 -0
  170. package/dist/university-content/notes/N-para-701-orchestration-enforcement.md +169 -0
  171. package/dist/university-content/notes/N-para-701-per-project-rosters.md +198 -0
  172. package/dist/university-content/notes/N-para-701-symphony-visibility.md +142 -0
  173. package/dist/university-content/paths/LP-para-001.yaml +29 -0
  174. package/dist/university-content/paths/LP-para-101.yaml +59 -0
  175. package/dist/university-content/paths/LP-para-201.yaml +69 -0
  176. package/dist/university-content/paths/LP-para-301.yaml +84 -0
  177. package/dist/university-content/paths/LP-para-401.yaml +74 -0
  178. package/dist/university-content/paths/LP-para-501.yaml +89 -0
  179. package/dist/university-content/paths/LP-para-601.yaml +59 -0
  180. package/dist/university-content/paths/LP-para-701.yaml +64 -0
  181. package/dist/university-content/quizzes/Q-para-001-build-something.yaml +46 -0
  182. package/dist/university-content/quizzes/Q-para-001-meet-the-team.yaml +46 -0
  183. package/dist/university-content/quizzes/Q-para-001-shift-setup.yaml +46 -0
  184. package/dist/university-content/quizzes/Q-para-101-component-types.yaml +46 -0
  185. package/dist/university-content/quizzes/Q-para-101-first-steps.yaml +56 -0
  186. package/dist/university-content/quizzes/Q-para-101-five-symbols.yaml +66 -0
  187. package/dist/university-content/quizzes/Q-para-101-paradigm-logger.yaml +56 -0
  188. package/dist/university-content/quizzes/Q-para-101-portal-yaml.yaml +56 -0
  189. package/dist/university-content/quizzes/Q-para-101-project-structure.yaml +66 -0
  190. package/dist/university-content/quizzes/Q-para-101-purpose-files.yaml +56 -0
  191. package/dist/university-content/quizzes/Q-para-101-tags-and-classification.yaml +56 -0
  192. package/dist/university-content/quizzes/Q-para-101-welcome.yaml +56 -0
  193. package/dist/university-content/quizzes/Q-para-201-architecture-review.yaml +66 -0
  194. package/dist/university-content/quizzes/Q-para-201-aspect-graph.yaml +46 -0
  195. package/dist/university-content/quizzes/Q-para-201-aspects-and-anchors.yaml +56 -0
  196. package/dist/university-content/quizzes/Q-para-201-component-patterns.yaml +56 -0
  197. package/dist/university-content/quizzes/Q-para-201-cross-cutting-concerns.yaml +56 -0
  198. package/dist/university-content/quizzes/Q-para-201-disciplines.yaml +66 -0
  199. package/dist/university-content/quizzes/Q-para-201-flows-deep-dive.yaml +66 -0
  200. package/dist/university-content/quizzes/Q-para-201-gates-deep-dive.yaml +66 -0
  201. package/dist/university-content/quizzes/Q-para-201-portal-protocol.yaml +56 -0
  202. package/dist/university-content/quizzes/Q-para-201-signal-patterns.yaml +56 -0
  203. package/dist/university-content/quizzes/Q-para-201-symbol-naming.yaml +66 -0
  204. package/dist/university-content/quizzes/Q-para-301-context-management.yaml +56 -0
  205. package/dist/university-content/quizzes/Q-para-301-decisions.yaml +76 -0
  206. package/dist/university-content/quizzes/Q-para-301-doctor-and-validation.yaml +66 -0
  207. package/dist/university-content/quizzes/Q-para-301-enforcement-levels.yaml +46 -0
  208. package/dist/university-content/quizzes/Q-para-301-fragility-tracking.yaml +46 -0
  209. package/dist/university-content/quizzes/Q-para-301-history-system.yaml +56 -0
  210. package/dist/university-content/quizzes/Q-para-301-navigation-system.yaml +56 -0
  211. package/dist/university-content/quizzes/Q-para-301-operations-review.yaml +66 -0
  212. package/dist/university-content/quizzes/Q-para-301-paradigm-shift.yaml +46 -0
  213. package/dist/university-content/quizzes/Q-para-301-protocols.yaml +56 -0
  214. package/dist/university-content/quizzes/Q-para-301-ripple-analysis.yaml +56 -0
  215. package/dist/university-content/quizzes/Q-para-301-sentinel-observability.yaml +46 -0
  216. package/dist/university-content/quizzes/Q-para-301-sync-and-maintenance.yaml +46 -0
  217. package/dist/university-content/quizzes/Q-para-301-wisdom-system.yaml +56 -0
  218. package/dist/university-content/quizzes/Q-para-401-agent-identity.yaml +66 -0
  219. package/dist/university-content/quizzes/Q-para-401-agent-interop.yaml +46 -0
  220. package/dist/university-content/quizzes/Q-para-401-agent-roles.yaml +56 -0
  221. package/dist/university-content/quizzes/Q-para-401-commit-conventions.yaml +56 -0
  222. package/dist/university-content/quizzes/Q-para-401-mastery-review.yaml +66 -0
  223. package/dist/university-content/quizzes/Q-para-401-mcp-tools-overview.yaml +66 -0
  224. package/dist/university-content/quizzes/Q-para-401-multi-agent-coordination.yaml +76 -0
  225. package/dist/university-content/quizzes/Q-para-401-notebooks-permissions.yaml +61 -0
  226. package/dist/university-content/quizzes/Q-para-401-orchestration-workflow.yaml +66 -0
  227. package/dist/university-content/quizzes/Q-para-401-pm-governance.yaml +66 -0
  228. package/dist/university-content/quizzes/Q-para-401-provider-cascade.yaml +56 -0
  229. package/dist/university-content/quizzes/Q-para-401-quick-check.yaml +46 -0
  230. package/dist/university-content/quizzes/Q-para-501-advanced-workflows.yaml +66 -0
  231. package/dist/university-content/quizzes/Q-para-501-aspect-graph-advanced.yaml +66 -0
  232. package/dist/university-content/quizzes/Q-para-501-aspect-graph-internals.yaml +66 -0
  233. package/dist/university-content/quizzes/Q-para-501-assessment-loops.yaml +46 -0
  234. package/dist/university-content/quizzes/Q-para-501-conductor-workspace.yaml +46 -0
  235. package/dist/university-content/quizzes/Q-para-501-habits-practice.yaml +56 -0
  236. package/dist/university-content/quizzes/Q-para-501-hook-enforcement.yaml +66 -0
  237. package/dist/university-content/quizzes/Q-para-501-lore-system.yaml +66 -0
  238. package/dist/university-content/quizzes/Q-para-501-platform-agent-ui.yaml +66 -0
  239. package/dist/university-content/quizzes/Q-para-501-review-compliance.yaml +61 -0
  240. package/dist/university-content/quizzes/Q-para-501-sentinel-deep-dive.yaml +86 -0
  241. package/dist/university-content/quizzes/Q-para-501-session-intelligence.yaml +66 -0
  242. package/dist/university-content/quizzes/Q-para-501-symphony-a-mail.yaml +66 -0
  243. package/dist/university-content/quizzes/Q-para-501-symphony-networking.yaml +66 -0
  244. package/dist/university-content/quizzes/Q-para-501-task-management.yaml +46 -0
  245. package/dist/university-content/quizzes/Q-para-601-agent-renaissance.yaml +66 -0
  246. package/dist/university-content/quizzes/Q-para-601-attention-scoring.yaml +56 -0
  247. package/dist/university-content/quizzes/Q-para-601-context-composition.yaml +66 -0
  248. package/dist/university-content/quizzes/Q-para-601-data-sovereignty.yaml +56 -0
  249. package/dist/university-content/quizzes/Q-para-601-event-stream.yaml +66 -0
  250. package/dist/university-content/quizzes/Q-para-601-knowledge-streams.yaml +66 -0
  251. package/dist/university-content/quizzes/Q-para-601-learning-loop.yaml +56 -0
  252. package/dist/university-content/quizzes/Q-para-601-maestro-team-collab.yaml +86 -0
  253. package/dist/university-content/quizzes/Q-para-601-nominations-debates.yaml +66 -0
  254. package/dist/university-content/quizzes/Q-para-701-agent-notebooks.yaml +66 -0
  255. package/dist/university-content/quizzes/Q-para-701-agent-pods-nevrland.yaml +66 -0
  256. package/dist/university-content/quizzes/Q-para-701-agent-profiles.yaml +66 -0
  257. package/dist/university-content/quizzes/Q-para-701-agent-roster.yaml +66 -0
  258. package/dist/university-content/quizzes/Q-para-701-agent-state.yaml +66 -0
  259. package/dist/university-content/quizzes/Q-para-701-learning-feedback-loop.yaml +66 -0
  260. package/dist/university-content/quizzes/Q-para-701-model-tier-resolution.yaml +66 -0
  261. package/dist/university-content/quizzes/Q-para-701-orchestration-enforcement.yaml +66 -0
  262. package/dist/university-content/quizzes/Q-para-701-per-project-rosters.yaml +66 -0
  263. package/dist/university-content/quizzes/Q-para-701-symphony-visibility.yaml +66 -0
  264. package/dist/university-content/quizzes/Q-plsat-v2.yaml +904 -0
  265. package/dist/university-content/quizzes/Q-plsat-v3.yaml +2909 -0
  266. package/dist/university-content/reference.json +2 -2
  267. package/dist/university-ui/assets/{index-CecQrfSn.js → index-nNgzO1il.js} +2 -2
  268. package/dist/university-ui/assets/{index-CecQrfSn.js.map → index-nNgzO1il.js.map} +1 -1
  269. package/dist/university-ui/index.html +1 -1
  270. package/dist/{upgrade-GX56QE3C.js → upgrade-NKN63VTY.js} +2 -2
  271. package/dist/validate-XUQZTF3H.js +9 -0
  272. package/dist/{watch-YCODNIET.js → watch-25GJHQYT.js} +1 -1
  273. package/lore-ui/dist/assets/{index-Bk-K0qgN.js → index-DKhNxgtW.js} +10 -10
  274. package/lore-ui/dist/index.html +1 -1
  275. package/package.json +2 -2
  276. package/platform-ui/dist/assets/{AmbientSection-BYjt75R1.js → AmbientSection-CwatqcBD.js} +1 -1
  277. package/platform-ui/dist/assets/{CanvasSection-rKvA_vZj.js → CanvasSection-dFAthehN.js} +1 -1
  278. package/platform-ui/dist/assets/{DocsSection-CI9K73M-.js → DocsSection-BZ2SFJBZ.js} +1 -1
  279. package/platform-ui/dist/assets/{GitSection-DSGj_c6S.js → GitSection-MNNYU1tO.js} +1 -1
  280. package/platform-ui/dist/assets/{GraphSection-CawN7pC5.js → GraphSection-COYjb4Pt.js} +1 -1
  281. package/platform-ui/dist/assets/LoreSection-B0hUbfsJ.js +1 -0
  282. package/platform-ui/dist/assets/{SentinelSection-DNgoYMH0.js → SentinelSection-BCxW1DCp.js} +1 -1
  283. package/platform-ui/dist/assets/{SymphonySection-C0zfcqv3.js → SymphonySection-BsucZRqy.js} +1 -1
  284. package/platform-ui/dist/assets/{TeamSection-Bzd3Dt9Q.js → TeamSection-C0QNTudW.js} +1 -1
  285. package/platform-ui/dist/assets/{UniversitySection-tBr62R0S.js → UniversitySection-DN1-g9pw.js} +1 -1
  286. package/platform-ui/dist/assets/{index-BaOmyn11.js → index-DwUT8pju.js} +2 -2
  287. package/platform-ui/dist/index.html +1 -1
  288. package/dist/add-P76GEMGF.js +0 -8
  289. package/dist/chunk-JQKKVAAN.js +0 -2
  290. package/dist/chunk-NQ47TA6C.js +0 -111
  291. package/dist/chunk-ODVKPZZ4.js +0 -2
  292. package/dist/chunk-Q2J542ST.js +0 -2
  293. package/dist/chunk-RBLK34IA.js +0 -11
  294. package/dist/chunk-RN4VE6P3.js +0 -521
  295. package/dist/chunk-WS2N27RX.js +0 -3
  296. package/dist/config-schema-GUQY2QN7.js +0 -2
  297. package/dist/decision-loader-2XPZE4EZ.js +0 -2
  298. package/dist/doctor-WMVULMQD.js +0 -2
  299. package/dist/list-5IUGP3ZB.js +0 -7
  300. package/dist/lore-loader-RVQI5GXL.js +0 -2
  301. package/dist/lore-loader-XY5MZRR2.js +0 -2
  302. package/dist/migrate-assessments-GEI5WMI2.js +0 -4
  303. package/dist/portal-compliance-6YR27IQU.js +0 -2
  304. package/dist/quiz-FE5UGAY2.js +0 -10
  305. package/dist/reindex-I6LPAKCC.js +0 -2
  306. package/dist/serve-OY6XYL7F.js +0 -12
  307. package/dist/server-2MNROHF6.js +0 -7
  308. package/dist/session-tracker-MWJAJA6Z.js +0 -2
  309. package/dist/show-BOAVWZPZ.js +0 -7
  310. package/dist/status-A37ECYNJ.js +0 -6
  311. package/dist/sync-DLUBV5HQ.js +0 -2
  312. package/dist/tools-5ITPEPSV.js +0 -2
  313. package/dist/university-content/courses/.purpose +0 -492
  314. package/dist/university-content/courses/para-001.json +0 -166
  315. package/dist/university-content/courses/para-101.json +0 -615
  316. package/dist/university-content/courses/para-201.json +0 -794
  317. package/dist/university-content/courses/para-301.json +0 -830
  318. package/dist/university-content/courses/para-401.json +0 -868
  319. package/dist/university-content/courses/para-501.json +0 -1166
  320. package/dist/university-content/courses/para-601.json +0 -719
  321. package/dist/university-content/courses/para-701.json +0 -807
  322. package/dist/university-content/plsat/.purpose +0 -162
  323. package/dist/university-content/plsat/v2.0.json +0 -760
  324. package/dist/university-content/plsat/v3.0.json +0 -3453
  325. package/dist/validate-C6SMKGYD.js +0 -9
  326. package/platform-ui/dist/assets/LoreSection-oO5dCe6O.js +0 -1
  327. /package/dist/{chunk-BV5PRPLB.js → chunk-IZSBGW6E.js} +0 -0
  328. /package/templates/paradigm/specs/{scan.md → probe.md} +0 -0
@@ -1,3453 +0,0 @@
1
- {
2
- "version": "3.0",
3
- "frameworkVersion": "2.0",
4
- "timeLimit": 5400,
5
- "totalSlots": 128,
6
- "passThreshold": 0.9,
7
- "title": "The PLSAT \u2014 Paradigm Licensure Standardized Assessment Test",
8
- "description": "99 questions. 90 minutes. 90% to pass. Good luck, scholar.",
9
- "items": [
10
- {
11
- "type": "standalone",
12
- "slot": "slot-001",
13
- "course": "para-101",
14
- "variants": [
15
- {
16
- "id": "plsat-001",
17
- "scenario": "You've just joined a team that uses Paradigm. You open the project and see directories like `.paradigm/`, several `.purpose` files, and a `portal.yaml` at the root. A colleague asks you to document a new utility function they wrote in `src/lib/format-currency.ts`.",
18
- "question": "Which symbol prefix should you use to document this utility?",
19
- "choices": {
20
- "A": "`$format-currency` \u2014 because it describes a process (formatting)",
21
- "B": "`!format-currency` \u2014 because it signals a transformation event",
22
- "C": "`#format-currency` \u2014 because it is a documented code unit",
23
- "D": "`~format-currency` \u2014 because it applies a rule (formatting rules)",
24
- "E": "`^format-currency` \u2014 because it gates what format is allowed"
25
- },
26
- "correct": "C",
27
- "explanation": "In Paradigm, every documented code unit uses the `#` (Component) symbol. There are only 5 operational symbols, and `#` is the universal prefix for any code unit \u2014 utilities, services, handlers, components, hooks, you name it. `$` is for multi-step flows, `!` for signals/events, `~` for aspects with code anchors, and `^` for condition gates. A simple utility function is a component."
28
- },
29
- {
30
- "id": "plsat-001b",
31
- "scenario": "A teammate wrote a helper function in `src/utils/validate-email.ts` that checks email format. They want to document it in Paradigm but aren't sure which symbol to use.",
32
- "question": "Which symbol prefix is correct for documenting this utility?",
33
- "choices": {
34
- "A": "`$validate-email` \u2014 it describes a validation process",
35
- "B": "`!validate-email` \u2014 it signals whether an email is valid",
36
- "C": "`#validate-email` \u2014 it is a documented code unit",
37
- "D": "`~validate-email` \u2014 it enforces a validation rule",
38
- "E": "`^validate-email` \u2014 it gates what emails are accepted"
39
- },
40
- "correct": "C",
41
- "explanation": "Every documented code unit in Paradigm uses the `#` (Component) symbol. A utility function is a code unit \u2014 it gets `#`. The other symbols have specific meanings: `$` for multi-step flows, `!` for signals/events, `~` for aspects with code anchors, and `^` for condition gates."
42
- }
43
- ]
44
- },
45
- {
46
- "type": "standalone",
47
- "slot": "slot-002",
48
- "course": "para-101",
49
- "variants": [
50
- {
51
- "id": "plsat-002",
52
- "scenario": "A component manages user authentication state \u2014 tracking the current user, login status, and session tokens. You need to document it in a `.purpose` file.",
53
- "question": "How should this component be documented in Paradigm?",
54
- "choices": {
55
- "A": "`#auth-state` with no tags \u2014 the component name is descriptive enough",
56
- "B": "`#auth-state` with `tags: [state]` \u2014 the `[state]` tag classifies its role",
57
- "C": "`!auth-state` \u2014 authentication state changes should be modeled as signals",
58
- "D": "`$auth-state` \u2014 state management is a multi-step flow",
59
- "E": "`~auth-state` \u2014 authentication state is a cross-cutting concern requiring an aspect"
60
- },
61
- "correct": "B",
62
- "explanation": "State management components use the `#` (Component) symbol with a `[state]` tag from the tag bank. Paradigm uses only 5 operational symbols for structure, and a tag bank for classification. The `[state]` tag tells humans and AI agents that this component's primary role is managing state, while `#` identifies it as a documented code unit. Signals (`!`) are for events, flows (`$`) are for multi-step processes, and aspects (`~`) are for enforced rules with code anchors."
63
- }
64
- ]
65
- },
66
- {
67
- "type": "standalone",
68
- "slot": "slot-003",
69
- "course": "para-101",
70
- "variants": [
71
- {
72
- "id": "plsat-003",
73
- "scenario": "Your team is debating logging practices. One developer argues that using `console.log` everywhere is fine because logs are just for debugging. Another insists on using structured, symbol-aware logging with Paradigm's logger (e.g., `log.component('#checkout-service').info('Processing payment', { amount })`).",
74
- "question": "What is the STRONGEST argument for using Paradigm's structured logger over raw `console.log`?",
75
- "choices": {
76
- "A": "Structured logging is faster at runtime than `console.log`",
77
- "B": "Paradigm's logger automatically fixes bugs when it detects errors in the logs",
78
- "C": "Symbol-aware logging connects runtime behavior to the documented architecture, making it possible to trace issues back to specific components, flows, and gates",
79
- "D": "Using `console.log` will cause Paradigm's CI checks to fail",
80
- "E": "Structured logging is required by law for production applications"
81
- },
82
- "correct": "C",
83
- "explanation": "The core value of Paradigm's structured logger is traceability: every log line is tagged with a symbol (`#component`, `^gate`, `!signal`, etc.), which means you can correlate runtime behavior with the architectural documentation. When something goes wrong, you can trace from the log back to the component in the `.purpose` file, understand its flows, check its gates, and review its history. Raw `console.log` loses this connection. Performance (A) is not the primary benefit. Paradigm doesn't auto-fix bugs (B) or enforce logging by law (E). CI checks (D) depend on team configuration, not a universal rule."
84
- },
85
- {
86
- "id": "plsat-003b",
87
- "scenario": "During a code review, you notice a developer replaced all `log.component('#user-service').info(...)` calls with plain `console.log(...)` to 'simplify things.' The PR has 15 files changed.",
88
- "question": "What is the MOST important reason to reject this change?",
89
- "choices": {
90
- "A": "`console.log` is slower than Paradigm's structured logger",
91
- "B": "Paradigm's logger automatically prevents bugs in production",
92
- "C": "Removing symbol-aware logging breaks the connection between runtime behavior and documented architecture, making it impossible to trace issues to specific components",
93
- "D": "The CI pipeline requires Paradigm logger calls to pass",
94
- "E": "It violates JavaScript best practices to use `console.log`"
95
- },
96
- "correct": "C",
97
- "explanation": "The fundamental value of Paradigm's structured logger is traceability. Each log line tagged with a symbol (#, ^, !, etc.) creates a bridge between runtime behavior and the architectural documentation. Removing this breaks the ability to trace from a log entry back to a component, its flows, gates, and history in .purpose files."
98
- }
99
- ]
100
- },
101
- {
102
- "type": "standalone",
103
- "slot": "slot-004",
104
- "course": "para-101",
105
- "variants": [
106
- {
107
- "id": "plsat-004",
108
- "scenario": "You're setting up a brand new project with Paradigm. You run `paradigm shift` and it creates the `.paradigm/` directory structure. Your project will have a REST API with several endpoints that require condition checks \u2014 such as authentication, feature flags, and rate limiting.",
109
- "question": "Which file MUST you create at the project root?",
110
- "choices": {
111
- "A": "`auth.yaml` \u2014 Paradigm's dedicated authentication config",
112
- "B": "`gates.yaml` \u2014 where all `^gate` definitions live",
113
- "C": "`portal.yaml` \u2014 where gates and protected routes are defined",
114
- "D": "`.paradigm/security.yaml` \u2014 security config goes in the paradigm directory",
115
- "E": "No file needed \u2014 gates are defined inline in `.purpose` files only"
116
- },
117
- "correct": "C",
118
- "explanation": "`portal.yaml` is REQUIRED at the project root whenever your project has protected routes. It defines gates (`^` symbols) with their check expressions, and maps routes to the gates that protect them. Gates can represent any condition checkpoint \u2014 authentication, authorization, feature flags, rate limits, or custom business rules. Gates can also appear in `.purpose` files for documentation, but `portal.yaml` is the authoritative source for route protection."
119
- }
120
- ]
121
- },
122
- {
123
- "type": "standalone",
124
- "slot": "slot-006",
125
- "course": "para-101",
126
- "variants": [
127
- {
128
- "id": "plsat-006",
129
- "scenario": "Your team uses the following directory structure:\n\n```\nsrc/\n middleware/auth.ts\n events/payment-events.ts\n services/billing.ts\n flows/onboarding.ts\n aspects/rate-limiter.ts\n```\n\nA new developer asks which Paradigm logger method to use in each file. Your team follows the conventional directory-to-symbol mapping as a guideline.",
130
- "question": "Which file-to-logger mapping is INCORRECT?",
131
- "choices": {
132
- "A": "`middleware/auth.ts` \u2192 `log.gate('^auth-check')`",
133
- "B": "`events/payment-events.ts` \u2192 `log.signal('!payment-received')`",
134
- "C": "`services/billing.ts` \u2192 `log.component('#billing-service')`",
135
- "D": "`flows/onboarding.ts` \u2192 `log.signal('!onboarding-started')`",
136
- "E": "`aspects/rate-limiter.ts` \u2192 `log.aspect('~rate-limited')`"
137
- },
138
- "correct": "D",
139
- "explanation": "Files in the `flows/` directory conventionally correspond to `$` (Flow) symbols and should use `log.flow()`, not `log.signal()`. The correct call would be `log.flow('$onboarding').info(...)`. The conventional directory-to-symbol mapping is: middleware/auth/guards \u2192 `^` (gate), events/handlers/listeners \u2192 `!` (signal), services/lib/components \u2192 `#` (component), flows/sagas/workflows \u2192 `$` (flow), aspects/rules \u2192 `~` (aspect). While teams may adapt these conventions to their needs, this mapping provides a consistent default that helps developers and AI agents reason about the codebase."
140
- },
141
- {
142
- "id": "plsat-006b",
143
- "scenario": "Your team follows directory-to-symbol conventions. A new file `src/guards/feature-flags.ts` is created to check whether features are enabled before allowing access. A developer uses `log.component('#feature-flags')` in the file.",
144
- "question": "Is this logger usage correct according to conventional directory mapping?",
145
- "choices": {
146
- "A": "Yes \u2014 all code units use `log.component()`",
147
- "B": "No \u2014 `guards/` maps to `^` (gate), so it should be `log.gate('^feature-flags')`",
148
- "C": "No \u2014 feature flags are signals, so it should be `log.signal('!feature-flags')`",
149
- "D": "No \u2014 feature flags are aspects, so it should be `log.aspect('~feature-flags')`",
150
- "E": "Yes \u2014 but the symbol should be `#feature-flags-guard` for clarity"
151
- },
152
- "correct": "B",
153
- "explanation": "The `guards/` directory conventionally maps to `^` (gate) symbols and should use `log.gate()`. Guards, middleware, and auth directories all map to gates because they represent condition checkpoints. Feature flags check conditions before allowing access \u2014 that's a gate. The correct call is `log.gate('^feature-flags').info(...)`."
154
- }
155
- ]
156
- },
157
- {
158
- "type": "standalone",
159
- "slot": "slot-007",
160
- "course": "para-101",
161
- "variants": [
162
- {
163
- "id": "plsat-007",
164
- "scenario": "Your team is prototyping a new feature \u2014 an experimental search widget that may or may not ship. You've built the component and want to document it in a `.purpose` file. You also want to make it clear to other developers and AI agents that this is an idea in progress, not a committed part of the product.",
165
- "question": "How should you classify this component to indicate it is experimental?",
166
- "choices": {
167
- "A": "Use a special `?experimental-widget` symbol prefix reserved for ideas",
168
- "B": "Add `status: experimental` to the component definition",
169
- "C": "Add the `[idea]` tag to the component: `tags: [idea]`",
170
- "D": "Create an aspect `~experimental` and apply it to the component",
171
- "E": "Comment it out in the `.purpose` file with `# EXPERIMENTAL`"
172
- },
173
- "correct": "C",
174
- "explanation": "Paradigm's tag bank includes the `[idea]` tag for exactly this purpose. You'd define the component as `#experimental-widget` with `tags: [idea]`. Tags are the classification layer in Paradigm: the 5 operational symbols (`#`, `$`, `^`, `!`, `~`) provide structure, while tags from the tag bank provide classification metadata like `[idea]`, `[feature]`, `[state]`, `[integration]`, `[critical]`, etc. Choice B (status field) is plausible but `status` is typically for lifecycle states like `active` or `deprecated`, not for ideation. There is no `?` symbol prefix in Paradigm (A). Commenting it out (E) removes it from the symbol graph entirely."
175
- }
176
- ]
177
- },
178
- {
179
- "type": "standalone",
180
- "slot": "slot-008",
181
- "course": "para-101",
182
- "variants": [
183
- {
184
- "id": "plsat-008",
185
- "scenario": "A `.paradigm/tags.yaml` file has three sections: `core`, `project`, and `suggested`. The `core` section contains tags like `[feature]`, `[integration]`, `[state]`, `[critical]`, and `[security]`.",
186
- "question": "What is the `suggested` section for?",
187
- "choices": {
188
- "A": "Tags that Paradigm automatically generates based on code analysis",
189
- "B": "Tags proposed by AI agents awaiting human approval before use",
190
- "C": "Tags from the Paradigm community marketplace that can be installed",
191
- "D": "Tags that are deprecated and will be removed in the next version",
192
- "E": "Tags that the framework suggests but are optional to implement"
193
- },
194
- "correct": "B",
195
- "explanation": "The `suggested` section in `tags.yaml` holds tags proposed by AI agents (via `paradigm_tags_suggest`) that haven't been approved by a human yet. This is part of Paradigm's governance model: AI can propose new classifications, but a human must promote them to `project` or `core` before they become official. This prevents tag sprawl and keeps the taxonomy intentional. The `core` tags ship with Paradigm, `project` tags are team-defined, and `suggested` are AI proposals awaiting review."
196
- }
197
- ]
198
- },
199
- {
200
- "type": "standalone",
201
- "slot": "slot-009",
202
- "course": "para-101",
203
- "variants": [
204
- {
205
- "id": "plsat-009",
206
- "scenario": "You're reading a project's `.paradigm/config.yaml` and see:\n\n```yaml\ndiscipline: fullstack\nconventions:\n naming: kebab-case\n components: PascalCase\n```",
207
- "question": "Based on these conventions, which symbol ID is correctly formatted?",
208
- "choices": {
209
- "A": "`#paymentService` \u2014 camelCase for services",
210
- "B": "`#payment-service` \u2014 kebab-case for IDs, PascalCase for class-like references",
211
- "C": "`#Payment_Service` \u2014 PascalCase with underscores",
212
- "D": "`#PAYMENT_SERVICE` \u2014 SCREAMING_SNAKE for services",
213
- "E": "`#payment.service` \u2014 dot notation for namespaced components"
214
- },
215
- "correct": "B",
216
- "explanation": "Paradigm conventions specify kebab-case for all symbol IDs. The `components: PascalCase` convention means that when referring to class-like components in prose or code, you use PascalCase (e.g., `#PaymentService`), but the canonical ID in `.purpose` files is kebab-case (`#payment-service`). This dual convention is documented in the CLAUDE.md: 'Use kebab-case for all symbol IDs' and 'Use PascalCase for class-like components'."
217
- }
218
- ]
219
- },
220
- {
221
- "type": "standalone",
222
- "slot": "slot-011",
223
- "course": "para-201",
224
- "variants": [
225
- {
226
- "id": "plsat-011",
227
- "scenario": "You're designing an e-commerce checkout process that involves: (1) validating the cart, (2) checking inventory, (3) processing payment via Stripe, (4) creating the order record, and (5) sending a confirmation email. This spans 5 different components across 3 directories.",
228
- "question": "How should this be documented in Paradigm?",
229
- "choices": {
230
- "A": "As a single `#checkout` component with sub-steps in its description",
231
- "B": "As a `$checkout-flow` with ordered steps referencing each component",
232
- "C": "As five separate `!` signals chained together",
233
- "D": "As a `~checkout-required` aspect applied to all five components",
234
- "E": "As five `^` gates that must be passed sequentially"
235
- },
236
- "correct": "B",
237
- "explanation": "A multi-step process spanning 3+ components is the textbook definition of a Flow (`$`). You'd define `$checkout-flow` with ordered steps, each referencing the responsible component and action: `#cart-validator` validates, `#inventory-checker` checks stock, `#stripe-service` processes payment, `#order-service` creates the record, `#email-sender` confirms. Flows document the sequence and make it visible to `paradigm_flows_affected` for impact analysis. Signals (`!`) may be emitted during the flow, but they don't define the sequence."
238
- },
239
- {
240
- "id": "plsat-011b",
241
- "scenario": "Your team is building a user registration process: (1) validate form input, (2) check if email is taken, (3) hash the password, (4) create the user record, (5) send a welcome email, (6) trigger analytics. This spans components across `validators/`, `services/`, and `integrations/`.",
242
- "question": "What is the correct Paradigm symbol for documenting this process?",
243
- "choices": {
244
- "A": "`#user-registration` \u2014 it's a single feature component",
245
- "B": "`$user-registration` \u2014 it's a multi-step flow spanning multiple components",
246
- "C": "`!user-registered` \u2014 it's an event that triggers side effects",
247
- "D": "`~registration-required` \u2014 it's a cross-cutting concern",
248
- "E": "`^registration-valid` \u2014 it's a validation gate"
249
- },
250
- "correct": "B",
251
- "explanation": "A multi-step process spanning 3+ components is a Flow (`$`). You'd define `$user-registration` with ordered steps referencing each responsible component. Flows make the sequence visible for impact analysis via `paradigm_flows_affected`. While `!user-registered` might be emitted at the END of the flow, the process itself is the flow."
252
- }
253
- ]
254
- },
255
- {
256
- "type": "passage",
257
- "slot": "passage-portal-review",
258
- "course": "para-201",
259
- "passage": "Your team's portal.yaml for a project management app:\n\n```yaml\nversion: \"1.0\"\ngates:\n ^authenticated:\n description: User must be logged in\n check: req.user != null\n ^project-member:\n description: User must be a member of the project\n check: project.members.includes(req.user.id)\n requires: [\"^authenticated\"]\n ^project-admin:\n description: User must be an admin of the project\n check: project.admins.includes(req.user.id)\n requires: [\"^authenticated\"]\n ^comment-author:\n description: User must be the comment author\n check: comment.authorId === req.user.id\n\nroutes:\n \"GET /api/projects\": [^authenticated]\n \"POST /api/projects\": [^authenticated]\n \"GET /api/projects/:id\": [^authenticated, ^project-member]\n \"PUT /api/projects/:id\": [^project-admin]\n \"DELETE /api/projects/:id\": [^project-admin]\n \"POST /api/projects/:id/comments\": [^authenticated, ^project-member]\n```",
260
- "questions": [
261
- {
262
- "slot": "pg-portal-q1",
263
- "variants": [
264
- {
265
- "id": "plsat-pg1-q1a",
266
- "scenario": "",
267
- "question": "A new endpoint `DELETE /api/projects/:id` needs to be added. Only project admins should be able to delete projects. What is the correct route entry in portal.yaml?",
268
- "choices": {
269
- "A": "`\"DELETE /api/projects/:id\": [^authenticated, ^project-admin]`",
270
- "B": "`\"DELETE /api/projects/:id\": [^project-admin]`",
271
- "C": "`\"DELETE /api/projects/:id\": [^authenticated]`",
272
- "D": "`\"DELETE /api/projects/:id\": [^project-admin, ^authenticated]`",
273
- "E": "`\"DELETE /api/projects/:id\": [^admin]`"
274
- },
275
- "correct": "B",
276
- "explanation": "Since `^project-admin` already has `requires: [\"^authenticated\"]`, listing `^authenticated` in the route is redundant. The gate dependency chain means `^project-admin` will automatically enforce `^authenticated` first. The route only needs to specify `[^project-admin]`."
277
- }
278
- ]
279
- },
280
- {
281
- "slot": "pg-portal-q2",
282
- "variants": [
283
- {
284
- "id": "plsat-pg1-q2a",
285
- "scenario": "",
286
- "question": "A colleague adds a new endpoint: `DELETE /api/projects/:id/comments/:commentId`. Only the comment author should be able to delete their own comment. Which gate configuration follows Paradigm best practices?",
287
- "choices": {
288
- "A": "Add `[^authenticated, ^project-admin]` \u2014 admins can delete anything",
289
- "B": "Add `[^authenticated, ^project-member, ^comment-author]` \u2014 must be a member AND the author",
290
- "C": "Add `[^comment-author]` \u2014 authorship implies authentication and membership",
291
- "D": "Add `[^authenticated, ^comment-author]` \u2014 logged in and owns the comment",
292
- "E": "Don't add it to portal.yaml \u2014 handle it in the route handler code only"
293
- },
294
- "correct": "B",
295
- "explanation": "The correct approach is `[^authenticated, ^project-member, ^comment-author]`. Being the comment author doesn't automatically mean you're authenticated or a project member. Each gate has one responsibility: `^authenticated` checks login, `^project-member` checks project access, `^comment-author` checks ownership. Choice E violates the cardinal rule: all protected routes MUST be in portal.yaml."
296
- }
297
- ]
298
- },
299
- {
300
- "slot": "pg-portal-q3",
301
- "variants": [
302
- {
303
- "id": "plsat-pg1-q3a",
304
- "scenario": "",
305
- "question": "Looking at the existing routes, the `PUT /api/projects/:id` route uses `[^project-admin]` without `^authenticated`. Is this a problem?",
306
- "choices": {
307
- "A": "Yes \u2014 every route must explicitly list `^authenticated` for security",
308
- "B": "No \u2014 `^project-admin` has `requires: [\"^authenticated\"]`, so authentication is enforced implicitly",
309
- "C": "Yes \u2014 the `requires` field is only documentation, not enforcement",
310
- "D": "No \u2014 PUT requests don't need authentication because they're idempotent",
311
- "E": "Yes \u2014 portal.yaml should always list gates in order of evaluation"
312
- },
313
- "correct": "B",
314
- "explanation": "This is correct portal.yaml usage. The `^project-admin` gate has `requires: [\"^authenticated\"]`, which means authentication is automatically enforced as a prerequisite. Listing `^authenticated` explicitly would be redundant. The `requires` field creates a dependency chain that gates must satisfy before evaluation."
315
- }
316
- ]
317
- }
318
- ]
319
- },
320
- {
321
- "type": "passage",
322
- "slot": "passage-purpose-review",
323
- "course": "para-101",
324
- "passage": "You're reviewing a `.purpose` file submitted in a pull request for `src/notifications/`:\n\n```yaml\nname: Notifications\ncomponents:\n #email-sender:\n description: Sends transactional emails\n file: email.ts\n tags: [integration, sendgrid]\n #push-notifier:\n description: Sends push notifications\n file: push.ts\n #notification-preferences:\n description: User notification settings\n file: preferences.ts\n tags: [state]\nsignals:\n !notification-sent:\n description: Fires after any notification is delivered\n emitters: [\"#email-sender\", \"#push-notifier\"]\n !preferences-updated:\n description: User changed notification settings\n emitters: [\"#notification-preferences\"]\n```",
325
- "questions": [
326
- {
327
- "slot": "pg-purpose-q1",
328
- "variants": [
329
- {
330
- "id": "plsat-pg2-q1a",
331
- "scenario": "",
332
- "question": "What is the correct way to reference `#email-sender` from another `.purpose` file in a different directory?",
333
- "choices": {
334
- "A": "`notifications/#email-sender` \u2014 use the directory path as a namespace",
335
- "B": "`#email-sender` \u2014 symbol IDs are globally unique across the project",
336
- "C": "`&email-sender` \u2014 integrations use the `&` prefix when cross-referenced",
337
- "D": "`#notifications.email-sender` \u2014 use dot notation for cross-module references",
338
- "E": "`import: #email-sender from notifications` \u2014 use import syntax"
339
- },
340
- "correct": "B",
341
- "explanation": "Symbol IDs are globally unique across the entire Paradigm project. You reference `#email-sender` the same way everywhere \u2014 no namespacing, no path prefixes, no import syntax. Paradigm's index tracks where each symbol is defined."
342
- }
343
- ]
344
- },
345
- {
346
- "slot": "pg-purpose-q2",
347
- "variants": [
348
- {
349
- "id": "plsat-pg2-q2a",
350
- "scenario": "",
351
- "question": "This `.purpose` file is missing a critical top-level field. What is it?",
352
- "choices": {
353
- "A": "`version` \u2014 every purpose file must specify a schema version",
354
- "B": "`description` \u2014 every purpose file should describe what the module does",
355
- "C": "`tags` \u2014 top-level tags are required for indexing",
356
- "D": "`gates` \u2014 every module must define its authorization requirements",
357
- "E": "`flows` \u2014 signals require at least one flow to be meaningful"
358
- },
359
- "correct": "B",
360
- "explanation": "Every `.purpose` file should have a `description` field explaining what the module/directory does. While `name` identifies it, `description` provides the context that AI agents and developers need to understand the module's role."
361
- }
362
- ]
363
- },
364
- {
365
- "slot": "pg-purpose-q3",
366
- "variants": [
367
- {
368
- "id": "plsat-pg2-q3a",
369
- "scenario": "",
370
- "question": "A developer wants to add a new component `#sms-sender` to this module that sends SMS via Twilio. Which definition follows the existing patterns in this file?",
371
- "choices": {
372
- "A": "`#sms-sender: { description: Sends SMS, file: sms.ts, tags: [integration, twilio] }` and add it to `!notification-sent` emitters",
373
- "B": "`!sms-sent: { description: SMS notification signal }` \u2014 SMS is an event",
374
- "C": "`#sms-sender: { description: Sends SMS, file: sms.ts }` with no tags or signal updates",
375
- "D": "`~sms-required: { description: SMS must be available }` \u2014 SMS availability is an aspect",
376
- "E": "`$sms-flow: { description: Send SMS process }` \u2014 sending SMS is a flow"
377
- },
378
- "correct": "A",
379
- "explanation": "Following the file's existing pattern: `#email-sender` has `tags: [integration, sendgrid]` and is listed in `!notification-sent` emitters. The new `#sms-sender` should mirror this: use `tags: [integration, twilio]` for classification and add it as an emitter of `!notification-sent`. Consistency with existing patterns is key."
380
- }
381
- ]
382
- }
383
- ]
384
- },
385
- {
386
- "type": "standalone",
387
- "slot": "slot-014",
388
- "course": "para-201",
389
- "variants": [
390
- {
391
- "id": "plsat-014",
392
- "scenario": "You're implementing a webhook handler that receives events from a third-party payment provider. When a payment succeeds, your system needs to: update the order status, send a receipt email, and notify the analytics service. These are independent side effects that don't need to happen in order.",
393
- "question": "What is the MOST appropriate Paradigm modeling for these independent side effects?",
394
- "choices": {
395
- "A": "A `$payment-webhook-flow` with three sequential steps",
396
- "B": "A `!payment-succeeded` signal with three subscriber components",
397
- "C": "Three separate `^` gates that the webhook must pass",
398
- "D": "A `~payment-side-effects` aspect applied to the webhook handler",
399
- "E": "Three `#` components with no formal connection between them"
400
- },
401
- "correct": "B",
402
- "explanation": "When side effects are independent and don't require ordering, a Signal (`!`) is the right model. You define `!payment-succeeded` and document three subscriber components (`#order-service`, `#email-sender`, `#analytics-tracker`) that react to it. Signals are for events that trigger side effects. A Flow (`$`) would be appropriate if the steps needed to happen in a specific order or if one step depended on the output of another. Gates (`^`) are for condition checks, not business logic."
403
- },
404
- {
405
- "id": "plsat-014b",
406
- "scenario": "When a user completes their profile, three independent things should happen: (1) update the user's completion badge, (2) notify their team admin, (3) log the event to analytics. None of these depend on each other.",
407
- "question": "How should these independent reactions be modeled in Paradigm?",
408
- "choices": {
409
- "A": "A `$profile-completion-flow` with three sequential steps",
410
- "B": "A `!profile-completed` signal with three subscriber components",
411
- "C": "Three `^` gates that must all pass",
412
- "D": "A single `#profile-handler` component that does all three things",
413
- "E": "Three `~` aspects applied to the profile component"
414
- },
415
- "correct": "B",
416
- "explanation": "Independent side effects that don't require ordering are modeled as Signal (`!`) subscribers. Define `!profile-completed` and document three subscriber components that react to it. A Flow (`$`) would be appropriate only if the steps needed specific ordering or depended on each other's output."
417
- }
418
- ]
419
- },
420
- {
421
- "type": "standalone",
422
- "slot": "slot-015",
423
- "course": "para-201",
424
- "variants": [
425
- {
426
- "id": "plsat-015",
427
- "scenario": "Your project's discipline is `fullstack`. You're adding a new feature: team invitations. The feature involves a UI component for the invite form, an API endpoint, a service for generating invite tokens, and an email sender. You create a new directory `src/features/team-invites/`.",
428
- "question": "What should the `.purpose` file in this directory contain at minimum?",
429
- "choices": {
430
- "A": "Just a `name` field \u2014 Paradigm will auto-discover the rest",
431
- "B": "A `name`, `description`, and at least one `#component` entry",
432
- "C": "A full `$flow` definition with all steps documented",
433
- "D": "Gate definitions (`^`) for all protected endpoints in this feature",
434
- "E": "Signal definitions (`!`) for all events this feature emits"
435
- },
436
- "correct": "B",
437
- "explanation": "At minimum, a `.purpose` file needs a `name` and `description` (to orient agents and developers) and at least one `#component` documenting a code unit. Gates go in `portal.yaml` (the authoritative source for route protection), not in purpose files. Flows and signals are important but not required for every feature \u2014 they should be added when the feature has multi-step processes or emits events. Paradigm does not auto-discover components; they must be explicitly documented."
438
- }
439
- ]
440
- },
441
- {
442
- "type": "standalone",
443
- "slot": "slot-017",
444
- "course": "para-201",
445
- "variants": [
446
- {
447
- "id": "plsat-017",
448
- "scenario": "Your team defines the following flow:\n\n```yaml\nflows:\n $user-onboarding:\n description: New user setup after registration\n steps:\n - component: \"#email-verifier\"\n action: send-verification-email\n - component: \"#profile-wizard\"\n action: collect-profile-data\n - component: \"#team-assigner\"\n action: assign-default-team\n - component: \"#welcome-emailer\"\n action: send-welcome-email\n signals: [\"!user-onboarded\"]\n```\n\nYou need to modify `#email-verifier` to use a new email provider.",
449
- "question": "Before making the code change, what should you do FIRST?",
450
- "choices": {
451
- "A": "Run `paradigm_search` to find all references to `#email-verifier`",
452
- "B": "Read the `#email-verifier` source file to understand the current implementation",
453
- "C": "Call `paradigm_ripple` on `#email-verifier` to understand the impact",
454
- "D": "Call `paradigm_orchestrate_inline` to plan the migration",
455
- "E": "Update the `.purpose` file first to reflect the new provider"
456
- },
457
- "correct": "C",
458
- "explanation": "Before modifying ANY symbol, the first step is always `paradigm_ripple`. This shows you what depends on `#email-verifier` directly and indirectly \u2014 in this case, you'd discover it's part of `$user-onboarding` and any other flows or components that reference it. This prevents you from making changes that break downstream dependencies. After ripple analysis, you'd read the source (B), check wisdom/history, and then implement. Orchestration (D) is for complex multi-file tasks, not single-component changes."
459
- }
460
- ]
461
- },
462
- {
463
- "type": "standalone",
464
- "slot": "slot-018",
465
- "course": "para-201",
466
- "variants": [
467
- {
468
- "id": "plsat-018",
469
- "scenario": "You're defining a gate for a multi-tenant SaaS application. Users can belong to multiple organizations, and each organization has its own resources. You need a gate that checks whether the requesting user is a member of the organization that owns the requested resource.",
470
- "question": "Which gate definition follows the Paradigm portal pattern?",
471
- "choices": {
472
- "A": "```yaml\n^org-member:\n description: User is a member of the organization AND has read permission\n check: org.members.includes(req.user.id) && req.user.permissions.read\n```",
473
- "B": "```yaml\n^org-member:\n description: User is a member of the resource's organization\n check: org.members.includes(req.user.id)\n```",
474
- "C": "```yaml\n^org-access:\n description: User can access organization resources with full CRUD\n check: org.members.includes(req.user.id) && req.user.role !== 'viewer'\n```",
475
- "D": "```yaml\n^resource-check:\n description: Validates resource exists and user has access\n check: resource != null && org.members.includes(req.user.id)\n```",
476
- "E": "```yaml\n^member-or-admin:\n description: User is either a member or an admin of the organization\n check: org.members.includes(req.user.id) || org.admins.includes(req.user.id)\n```"
477
- },
478
- "correct": "B",
479
- "explanation": "Paradigm's gate philosophy is 'one responsibility per gate.' Choice B does exactly one thing: checks organization membership. Choice A bundles membership with permissions (two responsibilities). Choice C adds a role check. Choice D validates resource existence (that's a different concern). Choice E conflates membership and admin roles. If you need permission checks, create `^org-reader`, `^org-writer` as separate gates with `requires: [\"^org-member\"]`. Keep gates minimal and composable."
480
- }
481
- ]
482
- },
483
- {
484
- "type": "standalone",
485
- "slot": "slot-020",
486
- "course": "para-201",
487
- "variants": [
488
- {
489
- "id": "plsat-020",
490
- "scenario": "You open `.paradigm/config.yaml` and see `discipline: fullstack`. You're curious what this means and whether it affects how symbols are mapped.",
491
- "question": "What does the `discipline` field control in Paradigm?",
492
- "choices": {
493
- "A": "It determines which programming language the project uses",
494
- "B": "It configures the symbol-to-directory mapping and recommended patterns for the project type",
495
- "C": "It restricts which symbols are allowed \u2014 e.g., some disciplines don't use flows",
496
- "D": "It's purely cosmetic \u2014 shown in the dashboard but has no functional effect",
497
- "E": "It determines the deployment pipeline configuration"
498
- },
499
- "correct": "B",
500
- "explanation": "The `discipline` field tells Paradigm what kind of project this is, which influences symbol mappings (e.g., where components vs. gates typically live), suggested patterns, and how AI agents reason about the codebase. A `fullstack` discipline suggests `middleware/` maps to gates, `services/` to components, etc. Different disciplines (e.g., `cli`, `library`, `embedded`) would have different default mappings. Since v2, Paradigm auto-detects the discipline from project structure at init time. It doesn't restrict symbols \u2014 all 5 are always available."
501
- }
502
- ]
503
- },
504
- {
505
- "type": "standalone",
506
- "slot": "slot-021",
507
- "course": "para-201",
508
- "variants": [
509
- {
510
- "id": "plsat-021",
511
- "scenario": "While reviewing a PR, you notice a developer added a new webhook endpoint:\n\n```typescript\n// src/api/webhooks/stripe.ts\napp.post('/api/webhooks/stripe', async (req, res) => {\n const event = verifyStripeSignature(req);\n if (event.type === 'payment_intent.succeeded') {\n await updateOrderStatus(event.data);\n await sendReceipt(event.data);\n }\n res.json({ received: true });\n});\n```\n\nThe developer did not update any Paradigm files.",
512
- "question": "Which Paradigm files should be updated? Select the MOST complete answer.",
513
- "choices": {
514
- "A": "Only `portal.yaml` \u2014 add the webhook route with appropriate gates",
515
- "B": "Only the nearest `.purpose` file \u2014 add `#stripe-webhook-handler` as a component",
516
- "C": "Both `portal.yaml` and the nearest `.purpose` file",
517
- "D": "The `.purpose` file, `portal.yaml`, and add a `!payment-succeeded` signal definition",
518
- "E": "No updates needed \u2014 webhooks are external and don't need Paradigm documentation"
519
- },
520
- "correct": "D",
521
- "explanation": "The most complete answer includes: (1) The `.purpose` file needs a `#stripe-webhook-handler` component with `tags: [integration, stripe]`. (2) `portal.yaml` needs the route \u2014 even webhook endpoints may need gates (e.g., signature verification as `^stripe-signature-valid`). (3) The payment success event should be documented as `!payment-succeeded` since it triggers side effects (order update, receipt). This is the Paradigm principle: if it exists in code, it should exist in the symbol graph."
522
- }
523
- ]
524
- },
525
- {
526
- "type": "standalone",
527
- "slot": "slot-022",
528
- "course": "para-201",
529
- "variants": [
530
- {
531
- "id": "plsat-022",
532
- "scenario": "A developer writes this commit message:\n\n```\nadded apple pay button and updated checkout\n```",
533
- "question": "What is wrong with this commit message according to Paradigm conventions?",
534
- "choices": {
535
- "A": "It should use past tense ('added' is correct, actually)",
536
- "B": "It's missing the conventional commit type, primary symbol in parentheses, and the `Symbols:` trailer",
537
- "C": "It should be in ALL CAPS for visibility",
538
- "D": "It should reference the Jira ticket number instead of symbols",
539
- "E": "Nothing is wrong \u2014 commit messages are personal preference"
540
- },
541
- "correct": "B",
542
- "explanation": "Paradigm commit messages follow a strict format: `type(#primary-symbol): description` in the subject, symbol references in the body, and a `Symbols:` trailer for machine parsing. The correct message would be:\n\n```\nfeat(#payment-form): add Apple Pay support\n\n- Add #apple-pay-button component\n- Update $checkout-flow with new payment step\n\nSymbols: #payment-form, #apple-pay-button, $checkout-flow\n```\n\nThe `Symbols:` trailer is parsed by the post-commit hook for automatic history capture."
543
- }
544
- ]
545
- },
546
- {
547
- "type": "standalone",
548
- "slot": "slot-023",
549
- "course": "para-201",
550
- "variants": [
551
- {
552
- "id": "plsat-023",
553
- "scenario": "You're building a real-time notification system. Notifications can arrive via WebSocket, and the user can mark them as read, archive them, or delete them. The system also needs to handle notification preferences (email, push, in-app) and batching for high-volume scenarios.",
554
- "question": "A team member suggests modeling each notification action (read, archive, delete) as a separate `$` flow. What is the BEST response?",
555
- "choices": {
556
- "A": "Agree \u2014 each action is a distinct process that should have its own flow",
557
- "B": "Disagree \u2014 single actions are not flows; use `#` components for each action and a `$notification-lifecycle` flow for the overall process",
558
- "C": "Disagree \u2014 these should all be `!` signals since they're user-triggered events",
559
- "D": "Agree, but only if each action involves 3+ components",
560
- "E": "Disagree \u2014 model them as `^` gates since they require condition checks"
561
- },
562
- "correct": "B",
563
- "explanation": "Flows (`$`) are for multi-step processes spanning 3+ components. A single action like 'mark as read' is just a component method, not a flow. The correct modeling is: `#notification-reader`, `#notification-archiver`, etc. as components, with `!notification-read`, `!notification-archived` as signals for side effects. If the OVERALL lifecycle (receive \u2192 display \u2192 interact \u2192 archive/delete) is worth documenting, THAT is the flow: `$notification-lifecycle`. Choice D gets close but misses the key insight about modeling the lifecycle."
564
- }
565
- ]
566
- },
567
- {
568
- "type": "standalone",
569
- "slot": "slot-024",
570
- "course": "para-201",
571
- "variants": [
572
- {
573
- "id": "plsat-024",
574
- "scenario": "Your project has the following aspect definition:\n\n```yaml\n~audit-required:\n description: Financial operations must produce audit logs\n anchors:\n - src/middleware/audit.ts:15-35\n - src/decorators/auditable.ts:1-20\n applies-to: [\"#*Service\"]\n enforcement: middleware\n```\n\nA colleague refactors `audit.ts` and moves the audit logic from lines 15-35 to lines 50-70.",
575
- "question": "What happens if the anchors are not updated?",
576
- "choices": {
577
- "A": "Nothing \u2014 anchors are just documentation hints and aren't validated",
578
- "B": "`paradigm doctor` will report stale anchors, and `paradigm_aspect_check` will flag the mismatch",
579
- "C": "The application will crash because the aspect can't find its enforcement code",
580
- "D": "The audit middleware will stop working because Paradigm controls execution",
581
- "E": "The CI pipeline will block the merge due to anchor validation"
582
- },
583
- "correct": "B",
584
- "explanation": "Paradigm is a documentation/intelligence layer, not a runtime. Stale anchors won't crash your app or stop middleware from working. However, `paradigm doctor` (the validation command) and `paradigm_aspect_check` (the MCP tool) will detect that the anchored lines no longer match the expected code. This is important because anchors are the mechanism that keeps aspects grounded in real code rather than becoming aspirational documentation. The developer should update anchors to `src/middleware/audit.ts:50-70` as part of the refactor."
585
- }
586
- ]
587
- },
588
- {
589
- "type": "standalone",
590
- "slot": "slot-025",
591
- "course": "para-201",
592
- "variants": [
593
- {
594
- "id": "plsat-025",
595
- "scenario": "You're adding a new API endpoint `POST /api/billing/invoices` to your project. Before writing any code, you want to follow Paradigm best practices.",
596
- "question": "What is the recommended sequence of steps?",
597
- "choices": {
598
- "A": "Write the code \u2192 Update `.purpose` file \u2192 Update `portal.yaml` \u2192 Commit",
599
- "B": "Call `paradigm_gates_for_route` \u2192 Update `portal.yaml` \u2192 Write the code \u2192 Update `.purpose` file \u2192 Commit",
600
- "C": "Update `portal.yaml` \u2192 Write the code \u2192 Run `paradigm doctor` \u2192 Commit",
601
- "D": "Write the code \u2192 Run `paradigm scan` \u2192 Let it auto-generate the purpose file \u2192 Commit",
602
- "E": "Call `paradigm_orchestrate_inline` \u2192 Spawn agents \u2192 Let agents handle everything"
603
- },
604
- "correct": "B",
605
- "explanation": "The recommended flow for adding endpoints is: (1) Call `paradigm_gates_for_route` to get gate suggestions for `POST /api/billing/invoices`, (2) Update `portal.yaml` with the route and its gates, (3) Implement the endpoint with proper gate enforcement, (4) Update the nearest `.purpose` file with the new `#` component, signals, etc. (5) Commit with proper Paradigm commit format. This 'portal first' approach ensures security is designed before implementation. Choice D is wrong because `paradigm scan` discovers existing symbols but doesn't auto-generate purpose files from code."
606
- }
607
- ]
608
- },
609
- {
610
- "type": "standalone",
611
- "slot": "slot-026",
612
- "course": "para-301",
613
- "variants": [
614
- {
615
- "id": "plsat-026",
616
- "scenario": "You're in the middle of a long Claude Code session. You've made 47 tool calls, modified 12 files, and you're about to start a complex refactor of the payment system. You vaguely recall something about context management but can't remember the details.",
617
- "question": "What should you do before starting the refactor?",
618
- "choices": {
619
- "A": "Just continue \u2014 context management is handled automatically",
620
- "B": "Call `paradigm_session_health` to see if a handoff is recommended",
621
- "C": "Start a new Claude session immediately to get fresh context",
622
- "D": "Save all files and run `paradigm scan` to rebuild the index",
623
- "E": "Call `paradigm_session_stats` and if over 100 tool calls, panic"
624
- },
625
- "correct": "B",
626
- "explanation": "The protocol says to call `paradigm_session_health` periodically (every 10-15 tool calls) during long sessions. At 47 tool calls, you're well overdue for a check. This tool analyzes your context window usage and recommends whether to continue, prepare a handoff, or urgently wrap up. If usage is over 85%, you should prioritize completing your current task and prepare a handoff with `paradigm_handoff_prepare`. Don't just start a new session (C) without preparing a handoff \u2014 you'd lose all context about what was done."
627
- }
628
- ]
629
- },
630
- {
631
- "type": "standalone",
632
- "slot": "slot-027",
633
- "course": "para-301",
634
- "variants": [
635
- {
636
- "id": "plsat-027",
637
- "scenario": "You run `paradigm doctor` and get the following output:\n\n```\nWARNING: #payment-processor has been modified 7 times in 14 days\nWARNING: #payment-processor has 3 rollbacks in history\nFRAGILITY SCORE: 0.85 (HIGH)\n```",
638
- "question": "What does a fragility score of 0.85 indicate, and what should you do?",
639
- "choices": {
640
- "A": "The component has a bug 85% of the time \u2014 rewrite it from scratch",
641
- "B": "85% of the codebase depends on it \u2014 extract it into a separate service",
642
- "C": "The component is highly unstable due to frequent changes and rollbacks \u2014 proceed with extra caution, review wisdom, and consider refactoring",
643
- "D": "The component needs 85% more test coverage \u2014 write tests first",
644
- "E": "The score is informational only \u2014 ignore it and proceed normally"
645
- },
646
- "correct": "C",
647
- "explanation": "A fragility score of 0.85 (out of 1.0) means the component is highly unstable. It's been changed 7 times in 2 weeks with 3 rollbacks \u2014 a clear pattern of churn. Before modifying it: (1) Call `paradigm_wisdom_context` to check if there are antipatterns or decisions recorded about it, (2) Call `paradigm_history_context` to understand the recent changes, (3) Consider whether a refactor is needed before adding more changes. The score doesn't mean 'has bugs 85%' or 'needs 85% coverage' \u2014 it's a stability metric based on change frequency and rollback rate."
648
- }
649
- ]
650
- },
651
- {
652
- "type": "standalone",
653
- "slot": "slot-028",
654
- "course": "para-301",
655
- "variants": [
656
- {
657
- "id": "plsat-028",
658
- "scenario": "After a production incident where the payment system double-charged a customer, the team discovers the root cause: a developer removed a deduplication check while refactoring `#payment-processor`. The team wants to prevent this from happening again.",
659
- "question": "What is the MOST appropriate Paradigm response to this incident?",
660
- "choices": {
661
- "A": "Add a comment in the code: `// DO NOT REMOVE THIS CHECK`",
662
- "B": "Record an antipattern in wisdom: 'Never remove deduplication from #payment-processor' with the alternative approach",
663
- "C": "Create a `^deduplication-enforced` gate in portal.yaml",
664
- "D": "Add `#payment-processor` to a 'do not touch' list in `.paradigm/config.yaml`",
665
- "E": "Record the incident in Sentinel AND record an antipattern in wisdom"
666
- },
667
- "correct": "E",
668
- "explanation": "The most complete response uses both Sentinel and Wisdom. (1) Record the incident with `paradigm_sentinel_record` so it appears in incident tracking with symbolic context (the `#payment-processor` symbol, error details, timeline). (2) Record an antipattern with `paradigm_wisdom_record` so future AI agents and developers are warned before modifying `#payment-processor`. The antipattern would say: 'Never remove deduplication logic' with alternative: 'If refactoring payment processing, always preserve the deduplication middleware and add tests for it.' A code comment (A) is easily missed. A gate (C) is for condition checks on routes, not business logic."
669
- }
670
- ]
671
- },
672
- {
673
- "type": "standalone",
674
- "slot": "slot-029",
675
- "course": "para-301",
676
- "variants": [
677
- {
678
- "id": "plsat-029",
679
- "scenario": "You need to understand the impact of changing `^authenticated` \u2014 the main authentication gate used across your entire application. You want to know what depends on it.",
680
- "question": "Which MCP tool call gives you the MOST useful dependency information?",
681
- "choices": {
682
- "A": "`paradigm_search({ query: '^authenticated' })` \u2014 find all references",
683
- "B": "`paradigm_related({ symbol: '^authenticated' })` \u2014 show direct relations",
684
- "C": "`paradigm_ripple({ symbol: '^authenticated', depth: 3 })` \u2014 show direct AND indirect dependencies up to 3 levels",
685
- "D": "`paradigm_navigate({ intent: 'find', target: '^authenticated' })` \u2014 locate the gate",
686
- "E": "`paradigm_flows_affected({ symbol: '^authenticated' })` \u2014 show affected flows"
687
- },
688
- "correct": "C",
689
- "explanation": "`paradigm_ripple` with a depth parameter gives you the cascading dependency analysis. For a widely-used gate like `^authenticated`, you need to see not just what directly references it, but what depends on things that depend on it (transitive dependencies). At depth 3, you'd see: Level 1 \u2014 all gates with `requires: [^authenticated]`, all routes using it. Level 2 \u2014 all components behind those routes. Level 3 \u2014 flows involving those components. `paradigm_related` (B) only shows direct connections. `paradigm_search` (A) finds textual references but doesn't analyze the dependency graph."
690
- }
691
- ]
692
- },
693
- {
694
- "type": "standalone",
695
- "slot": "slot-030",
696
- "course": "para-301",
697
- "variants": [
698
- {
699
- "id": "plsat-030",
700
- "scenario": "It's Monday morning. You open Claude Code to continue work on a project. Last Friday, a different Claude session was making changes to the user management module. You have no idea what state things are in.",
701
- "question": "What is the FIRST thing you should do?",
702
- "choices": {
703
- "A": "Run `git log` to see recent commits",
704
- "B": "Call `paradigm_session_recover` to load breadcrumbs from the previous session",
705
- "C": "Call `paradigm_status` for a general project overview",
706
- "D": "Read the `.paradigm/config.yaml` to understand the project",
707
- "E": "Start fresh \u2014 don't worry about what the previous session did"
708
- },
709
- "correct": "B",
710
- "explanation": "`paradigm_session_recover` is designed exactly for this scenario. It loads breadcrumbs from previous sessions, showing you what was done, what files were modified, what symbols were touched, and what the next steps were. This is more useful than `paradigm_status` (C) because status gives you general project info, not session-specific context. After recovering the session, THEN you'd call `paradigm_status` and check the relevant files. Starting fresh (E) risks duplicating work or missing important context."
711
- }
712
- ]
713
- },
714
- {
715
- "type": "standalone",
716
- "slot": "slot-031",
717
- "course": "para-301",
718
- "variants": [
719
- {
720
- "id": "plsat-031",
721
- "scenario": "Your application is experiencing intermittent 500 errors on the checkout flow. You suspect it's related to the Stripe integration. You want to check if there's a known pattern for this type of failure.",
722
- "question": "Which sequence of Sentinel tools should you use?",
723
- "choices": {
724
- "A": "`paradigm_sentinel_triage` to see open incidents, then `paradigm_sentinel_patterns` to find matching failure patterns",
725
- "B": "`paradigm_sentinel_stats` to see overall health, then guess at the root cause",
726
- "C": "`paradigm_sentinel_record` to create a new incident immediately",
727
- "D": "`paradigm_sentinel_suggest_pattern` without any incident context",
728
- "E": "`paradigm_sentinel_resolve` to close any open incidents and hope it goes away"
729
- },
730
- "correct": "A",
731
- "explanation": "The diagnostic workflow is: (1) `paradigm_sentinel_triage` with a filter like `search: '500'` or `symbol: '#stripe-service'` to see if there are existing open incidents matching your symptoms. (2) `paradigm_sentinel_patterns` to find known failure patterns (with confidence scores) that match the error. If a pattern matches, it includes resolution steps and code hints. You'd only record a NEW incident (C) if triage shows this is a novel failure. Resolving without investigating (E) is never the right answer. Stats (B) give you health metrics but not diagnostic details."
732
- }
733
- ]
734
- },
735
- {
736
- "type": "standalone",
737
- "slot": "slot-032",
738
- "course": "para-301",
739
- "variants": [
740
- {
741
- "id": "plsat-032",
742
- "scenario": "You want to find who on your team has the most expertise with the payment system before making a significant architectural change.",
743
- "question": "Which Paradigm tool helps you find the right person?",
744
- "choices": {
745
- "A": "`paradigm_search({ query: 'payments' })` and look at file authors in git blame",
746
- "B": "`paradigm_wisdom_expert({ area: 'payments' })` to find recognized experts",
747
- "C": "`paradigm_history_context({ symbols: ['#payment-service'] })` and infer from commit history",
748
- "D": "`paradigm_navigate({ intent: 'explore', target: 'payments' })` and read the code to figure out who wrote it",
749
- "E": "Ask in Deus / Slack / Teams"
750
- },
751
- "correct": "B",
752
- "explanation": "`paradigm_wisdom_expert` is purpose-built for finding human experts by area or symbol. It returns people who are recognized as knowledgeable about the payment system, based on recorded wisdom and history. While `paradigm_history_context` (C) can show who recently worked on the code, that doesn't mean they're the expert \u2014 they might have just fixed a typo. `paradigm_wisdom_expert` tracks deliberate expertise attribution, not just commit frequency."
753
- }
754
- ]
755
- },
756
- {
757
- "type": "standalone",
758
- "slot": "slot-033",
759
- "course": "para-301",
760
- "variants": [
761
- {
762
- "id": "plsat-033",
763
- "scenario": "After completing a significant refactor of the authentication module, you want to record what you did for future sessions and team members.",
764
- "question": "What is the correct way to record this in Paradigm's history system?",
765
- "choices": {
766
- "A": "Write a detailed comment in the `.purpose` file",
767
- "B": "Call `paradigm_history_record` with type 'refactor', affected symbols, and description",
768
- "C": "Update `.paradigm/docs/changelog.md` with the changes",
769
- "D": "Just commit with a good message \u2014 git history is sufficient",
770
- "E": "Call `paradigm_wisdom_record` with type 'decision' explaining the refactor rationale"
771
- },
772
- "correct": "B",
773
- "explanation": "`paradigm_history_record` is the right tool for recording implementation events. You'd call it with `type: 'refactor'`, `symbols: ['^authenticated', '#auth-middleware', ...]`, and a description of what was changed. This feeds into the history system that powers `paradigm_history_context` and `paradigm_history_fragility`. A good commit message (D) is important but separate \u2014 Paradigm's history provides symbolic context that git alone doesn't. If the refactor involved an architectural DECISION, you'd ALSO record wisdom (E), but that's supplementary, not a replacement."
774
- }
775
- ]
776
- },
777
- {
778
- "type": "standalone",
779
- "slot": "slot-034",
780
- "course": "para-301",
781
- "variants": [
782
- {
783
- "id": "plsat-034",
784
- "scenario": "You've run tests after implementing a new feature. 15 tests passed, 2 failed, and 1 was skipped. You previously recorded the implementation with `paradigm_history_record` and got back an implementation ID.",
785
- "question": "How should you record the test results?",
786
- "choices": {
787
- "A": "Update the implementation record by calling `paradigm_history_record` again",
788
- "B": "Call `paradigm_history_validate` with result 'partial' and the test counts",
789
- "C": "Call `paradigm_history_validate` with result 'fail' because not all tests passed",
790
- "D": "Don't record it \u2014 fix the failing tests first, then record a 'pass'",
791
- "E": "Call `paradigm_sentinel_record` to log the test failures as incidents"
792
- },
793
- "correct": "B",
794
- "explanation": "`paradigm_history_validate` is the validation companion to `paradigm_history_record`. With 15 passed, 2 failed, and 1 skipped, the result is 'partial' (not full pass, not complete failure). You'd call it with `result: 'partial'` and `tests: { passed: 15, failed: 2, skipped: 1 }`. This creates a validation record linked to the implementation. Recording it as 'fail' (C) is too harsh \u2014 partial acknowledges progress. Waiting to record (D) loses valuable history about the initial state. Test failures aren't production incidents (E) unless they indicate a production problem."
795
- }
796
- ]
797
- },
798
- {
799
- "type": "standalone",
800
- "slot": "slot-035",
801
- "course": "para-301",
802
- "variants": [
803
- {
804
- "id": "plsat-035",
805
- "scenario": "A context check returns the following:\n\n```\nContext usage: ~82%\nRecommendation: prepare-handoff\nMessage: Context getting full. Complete current task and prepare handoff.\n```\n\nYou're in the middle of implementing a feature that's about 70% done.",
806
- "question": "What is the correct course of action?",
807
- "choices": {
808
- "A": "Ignore the warning and finish the feature \u2014 82% means you still have 18% left",
809
- "B": "Stop immediately and call `paradigm_handoff_prepare` with what you've done so far",
810
- "C": "Finish the current task as quickly as possible, then call `paradigm_handoff_prepare` with a summary, modified files, and next steps",
811
- "D": "Delete some earlier context by running `paradigm_session_recover` to free up space",
812
- "E": "Switch to a different, smaller task that can be completed in the remaining context"
813
- },
814
- "correct": "C",
815
- "explanation": "At 82% with a 'prepare-handoff' recommendation, you should complete your current task (not start new ones) and then hand off. The protocol says: when context usage is high but not critical (>85%), prioritize completing the current task, then prepare a handoff. `paradigm_handoff_prepare` takes your summary, modified files, symbols touched, and next steps \u2014 giving the next session everything it needs to pick up where you left off. Ignoring it (A) risks running out of context mid-task. Stopping immediately (B) wastes the 70% progress. You can't delete context (D)."
816
- }
817
- ]
818
- },
819
- {
820
- "type": "standalone",
821
- "slot": "slot-036",
822
- "course": "para-301",
823
- "variants": [
824
- {
825
- "id": "plsat-036",
826
- "scenario": "You want to understand the complete structure of the authentication module without reading every file. The project has 200+ files across nested directories.",
827
- "question": "What is the MOST token-efficient way to explore this?",
828
- "choices": {
829
- "A": "Read every file in `src/auth/` one by one",
830
- "B": "Call `paradigm_navigate({ intent: 'explore', target: 'auth' })` to browse the area",
831
- "C": "Run `paradigm_search({ query: 'auth' })` and read every matching file",
832
- "D": "Read `.paradigm/navigator.yaml` and then every file it references",
833
- "E": "Call `paradigm_status` and hope it includes auth module details"
834
- },
835
- "correct": "B",
836
- "explanation": "`paradigm_navigate` with intent 'explore' is designed for exactly this: browsing an area of the codebase without reading individual files. At ~200 tokens per call, it's vastly more efficient than reading files (~500-2000 tokens each). It returns the structural overview of the auth area \u2014 components, gates, flows, signals \u2014 from the indexed symbols. If you need specific implementation details AFTER exploring, then you read individual files. The rule is: MCP for discovery, files for implementation."
837
- }
838
- ]
839
- },
840
- {
841
- "type": "standalone",
842
- "slot": "slot-037",
843
- "course": "para-301",
844
- "variants": [
845
- {
846
- "id": "plsat-037",
847
- "scenario": "You're debugging an issue where the `$order-fulfillment` flow is failing at the 'ship order' step. The flow has 6 steps spanning 4 components. You suspect the gate `^warehouse-authorized` is rejecting valid requests.",
848
- "question": "Which combination of tools gives you the MOST diagnostic information?",
849
- "choices": {
850
- "A": "`paradigm_flow_check({ flowId: '$order-fulfillment' })` + `paradigm_sentinel_triage({ symbol: '^warehouse-authorized' })`",
851
- "B": "`paradigm_search({ query: 'warehouse' })` + read all matching files",
852
- "C": "`paradigm_ripple({ symbol: '$order-fulfillment' })` only",
853
- "D": "`paradigm_history_context({ symbols: ['$order-fulfillment'] })` only",
854
- "E": "`paradigm_navigate({ intent: 'find', target: '^warehouse-authorized' })` + read the gate code"
855
- },
856
- "correct": "A",
857
- "explanation": "The best combination is: (1) `paradigm_flow_check` checks the flow definition against the codebase \u2014 are all steps implemented, do the gates exist, are signals emitted? This could reveal if the flow definition is out of sync with the code. (2) `paradigm_sentinel_triage` filtered by `^warehouse-authorized` shows if there are incidents or known patterns for this gate failing. Together, these give you structural validation AND operational history. Ripple (C) shows dependencies but not failures. History (D) shows changes but not current errors."
858
- }
859
- ]
860
- },
861
- {
862
- "type": "standalone",
863
- "slot": "slot-038",
864
- "course": "para-301",
865
- "variants": [
866
- {
867
- "id": "plsat-038",
868
- "scenario": "Your team has been using Paradigm for 6 months. A new developer joins and asks: 'How do I know if the Paradigm files are actually accurate? What if the code has drifted from the documentation?'",
869
- "question": "What is the correct answer?",
870
- "choices": {
871
- "A": "Trust the Paradigm files \u2014 they're always accurate because they're machine-generated",
872
- "B": "Run `paradigm doctor` to validate consistency between Paradigm files and the codebase, and check `paradigm_aspect_check` for aspect anchor drift",
873
- "C": "Paradigm files are aspirational \u2014 they describe what the code SHOULD be, not what it IS",
874
- "D": "Run `paradigm scan` to regenerate all Paradigm files from scratch",
875
- "E": "Check the git blame on `.purpose` files to see when they were last updated"
876
- },
877
- "correct": "B",
878
- "explanation": "`paradigm doctor` is the validation tool that checks for inconsistencies between Paradigm files and the codebase. It flags missing anchors, undefined symbols referenced in flows, gates referenced in portal.yaml that don't have implementations, and more. `paradigm_aspect_check` specifically validates that aspect anchors still point to valid code. `paradigm_purpose_validate` checks `.purpose` file structural validity. These tools are how you verify accuracy. Paradigm files are NOT auto-generated (A) or aspirational (C) \u2014 they're maintained documentation that has validation tools."
879
- },
880
- {
881
- "id": "plsat-038b",
882
- "scenario": "While writing a `.purpose` file for a new feature, you aren't sure whether the notification service should send emails or push notifications. Rather than guessing, you write:\n\n```yaml\ncomponents:\n notification-dispatcher:\n description: \"Routes notifications to users [NEEDS CLARIFICATION: email, push, or both?]\"\n```\n\nYou then run `paradigm doctor` and `paradigm_purpose_validate`.",
883
- "question": "How do these tools treat the `[NEEDS CLARIFICATION: ...]` marker?",
884
- "choices": {
885
- "A": "As an error \u2014 the `.purpose` file fails validation until the marker is removed",
886
- "B": "As a warning \u2014 it surfaces during checks but does not block validation or break builds",
887
- "C": "They ignore it \u2014 it's just text in a YAML string",
888
- "D": "As a fatal parse error \u2014 the square brackets break YAML syntax",
889
- "E": "As an info-level message only visible with `--verbose` flag"
890
- },
891
- "correct": "B",
892
- "explanation": "Clarification markers (`[NEEDS CLARIFICATION: ...]`) are treated as warnings by both `paradigm doctor` and `paradigm_purpose_validate`. They scan all description fields for this exact pattern and report matches as warnings. This means the marker surfaces during health checks to remind the team of open design questions, but it does not fail validation or block builds. The intent is to make ambiguity visible and trackable rather than silent. Resolve markers before shipping by replacing them with the clarified text."
893
- }
894
- ]
895
- },
896
- {
897
- "type": "standalone",
898
- "slot": "slot-039",
899
- "course": "para-401",
900
- "variants": [
901
- {
902
- "id": "plsat-039",
903
- "scenario": "You need to build a complete user profile feature: a UI component, API endpoint, database service, validation logic, and tests. This will touch at least 6 files across 3 directories.",
904
- "question": "Before writing ANY code, what should you do FIRST?",
905
- "choices": {
906
- "A": "Start coding the UI component \u2014 start with the frontend and work backwards",
907
- "B": "Call `paradigm_orchestrate_inline({ task: 'Build user profile feature', mode: 'plan' })` to get the right agents and plan",
908
- "C": "Call `paradigm_search` for existing profile-related symbols",
909
- "D": "Create the `.purpose` file first to define all the symbols",
910
- "E": "Call `paradigm_ripple` on every component you plan to create"
911
- },
912
- "correct": "B",
913
- "explanation": "When a task affects 3+ files, involves multiple features, or spans security and implementation, the FIRST step is calling `paradigm_orchestrate_inline` with mode='plan'. This returns: the right agent team (e.g., architect + security + builder + tester), estimated token cost, and an execution plan with stages. This prevents you from wasting tokens on ad-hoc implementation when a structured approach would be more efficient. After the plan, you'd call with mode='execute' to get full agent prompts. You can't ripple (E) symbols that don't exist yet."
914
- }
915
- ]
916
- },
917
- {
918
- "type": "standalone",
919
- "slot": "slot-040",
920
- "course": "para-401",
921
- "variants": [
922
- {
923
- "id": "plsat-040",
924
- "scenario": "You call `paradigm_orchestrate_inline` with mode='plan' for a task involving JWT authentication. The plan returns four agents: architect, security, builder, tester. The plan shows two stages:\n\n```\nStage 1: [architect, security] (canRunParallel: true)\nStage 2: [builder, tester] (canRunParallel: false)\n```",
925
- "question": "How should you execute this plan?",
926
- "choices": {
927
- "A": "Run all four agents simultaneously for maximum speed",
928
- "B": "Run architect and security in parallel, wait for both to complete, then run builder, then tester sequentially",
929
- "C": "Run architect first, then security, then builder, then tester \u2014 always sequential",
930
- "D": "Skip the architect and security agents \u2014 just run builder and tester",
931
- "E": "Run builder first to get code written, then architect and security for review"
932
- },
933
- "correct": "B",
934
- "explanation": "The orchestration plan explicitly marks `canRunParallel: true` for Stage 1 (architect + security), meaning they can run simultaneously. Stage 2 has `canRunParallel: false`, meaning builder must complete before tester starts. The correct execution is: launch architect and security in parallel (Stage 1), wait for both to finish, run builder with handoff context from Stage 1, then run tester after builder completes. Skipping agents (D) defeats the purpose of orchestration. Running builder first (E) ignores architecture and security design."
935
- }
936
- ]
937
- },
938
- {
939
- "type": "standalone",
940
- "slot": "slot-041",
941
- "course": "para-401",
942
- "variants": [
943
- {
944
- "id": "plsat-041",
945
- "scenario": "Your team is evaluating which agent provider to use for orchestration. The environment has:\n- `ANTHROPIC_API_KEY` set\n- Claude Code installed (Max subscription)\n- Cursor IDE open\n- No `.paradigm/config.yaml` provider override\n\nThe team runs `paradigm team providers`.",
946
- "question": "Which provider will be used by default based on the cascade?",
947
- "choices": {
948
- "A": "Cursor agent CLI \u2014 because Cursor IDE is detected",
949
- "B": "Claude Code Task tool \u2014 because Max subscription is available",
950
- "C": "Anthropic API (claude) \u2014 because it's first in the cascade and the API key is set",
951
- "D": "Manual file-based handoffs \u2014 because no provider is explicitly configured",
952
- "E": "Claude Code Agent Teams \u2014 because it supports parallel execution"
953
- },
954
- "correct": "C",
955
- "explanation": "The provider cascade tries providers in order: (1) claude (Anthropic API), (2) claude-code-teams, (3) claude-code, (4) cursor-cli, (5) claude-cli, (6) manual. Since `ANTHROPIC_API_KEY` is set, the first provider (`claude` \u2014 Anthropic API) is available and will be used. The cascade stops at the first available provider unless overridden. Even though Cursor and Claude Code are available, they're lower priority. To override, use `paradigm team providers --set cursor-cli` or set `agent-provider` in config."
956
- }
957
- ]
958
- },
959
- {
960
- "type": "standalone",
961
- "slot": "slot-042",
962
- "course": "para-401",
963
- "variants": [
964
- {
965
- "id": "plsat-042",
966
- "scenario": "You're configuring agent models for your team. The task involves:\n- An architectural review (complex reasoning needed)\n- A security audit (critical, needs thoroughness)\n- Building 3 UI components (straightforward implementation)\n- Writing unit tests (repetitive, pattern-based)\n\nYour budget is limited.",
967
- "question": "Which model assignment follows Paradigm's recommended configuration?",
968
- "choices": {
969
- "A": "All agents use opus for maximum quality",
970
- "B": "Architect: opus, Security: opus, Builder: haiku, Tester: haiku",
971
- "C": "All agents use haiku for cost efficiency",
972
- "D": "Architect: sonnet, Security: sonnet, Builder: sonnet, Tester: sonnet",
973
- "E": "Architect: opus, Security: sonnet, Builder: opus, Tester: haiku"
974
- },
975
- "correct": "B",
976
- "explanation": "Paradigm's recommended model configuration is: architect and security agents use opus (complex reasoning, critical decisions), builder uses haiku (fast, cost-effective for straightforward implementation), and tester uses haiku (pattern-based, repetitive work). The reviewer role (not in this scenario) uses sonnet (balanced critique). This balances quality where it matters most (architecture, security) with cost efficiency where tasks are more mechanical (building, testing). All-opus (A) blows the budget. All-haiku (C) risks poor architectural decisions."
977
- },
978
- {
979
- "id": "plsat-042b",
980
- "scenario": "The reviewer agent is reviewing a builder's implementation of a new checkout feature. During Stage 1 (Spec Compliance), the reviewer discovers that the builder created a new `#shipping-calculator` component but did not register it in any `.purpose` file. The code itself looks clean and well-tested.",
981
- "question": "What should the reviewer do?",
982
- "choices": {
983
- "A": "Proceed to Stage 2 (Code Quality) since the code looks good, and mention the missing `.purpose` entry as a note",
984
- "B": "Stop at Stage 1, report a blocking finding for the unregistered component, and hand back to the builder without running Stage 2",
985
- "C": "Register the component in the `.purpose` file on behalf of the builder, then approve",
986
- "D": "Skip both stages and approve since the code is well-tested",
987
- "E": "Run Stage 2 first since code quality is more important than metadata"
988
- },
989
- "correct": "B",
990
- "explanation": "The reviewer follows a strict two-stage protocol. Stage 1 (Spec Compliance) is a hard gate \u2014 if it fails, the reviewer stops immediately and hands back to the builder. A missing `.purpose` registration is a spec compliance violation (blocking finding). There is no point reviewing code quality of spec-noncompliant code. The reviewer never writes code or modifies files (C is wrong). Stage 2 cannot run before Stage 1 passes."
991
- }
992
- ]
993
- },
994
- {
995
- "type": "standalone",
996
- "slot": "slot-043",
997
- "course": "para-401",
998
- "variants": [
999
- {
1000
- "id": "plsat-043",
1001
- "scenario": "You're about to call an MCP tool. Your choices are:\n1. `paradigm_status` (~100 tokens)\n2. `paradigm_navigate` (~200 tokens)\n3. Reading a 400-line TypeScript file (~2000 tokens)\n4. `paradigm_ripple` (~300 tokens)\n\nYou need to understand what components exist in the payments area before modifying `#payment-service`.",
1002
- "question": "What is the MOST token-efficient approach?",
1003
- "choices": {
1004
- "A": "Read the TypeScript file directly \u2014 you need to see the actual code",
1005
- "B": "Call `paradigm_navigate` to explore the payments area, then `paradigm_ripple` on `#payment-service`, then read only the specific file you need to change",
1006
- "C": "Call `paradigm_status` first, then read all payment-related files",
1007
- "D": "Call all four tools to be thorough",
1008
- "E": "Skip all tools and just start coding \u2014 you'll figure it out"
1009
- },
1010
- "correct": "B",
1011
- "explanation": "The optimal approach is: (1) `paradigm_navigate` (~200 tokens) to discover what exists in the payments area without reading files. (2) `paradigm_ripple` (~300 tokens) on `#payment-service` to understand impact before modifying. (3) THEN read the specific file you need to change. Total: ~500 tokens + one targeted file read. This follows the rule: 'MCP for discovery, files for implementation.' Reading files first (A) costs ~2000 tokens before you even know what you're looking at. Calling everything (D) wastes `paradigm_status` tokens on info you don't need for this task."
1012
- }
1013
- ]
1014
- },
1015
- {
1016
- "type": "standalone",
1017
- "slot": "slot-044",
1018
- "course": "para-401",
1019
- "variants": [
1020
- {
1021
- "id": "plsat-044",
1022
- "scenario": "Your team decides that all API responses should include a `requestId` header for tracing. This is an architectural decision that affects every API endpoint in the project.",
1023
- "question": "How should this decision be recorded in Paradigm?",
1024
- "choices": {
1025
- "A": "Add a comment in every API route file",
1026
- "B": "Call `paradigm_wisdom_record` with type 'decision', a descriptive title, rationale with factors and conclusion, and consequences",
1027
- "C": "Create a `~request-id-required` aspect and add it to every component",
1028
- "D": "Update `.paradigm/config.yaml` with a new convention",
1029
- "E": "Both B and C \u2014 record the decision AND create an aspect with code anchors"
1030
- },
1031
- "correct": "E",
1032
- "explanation": "The most complete answer is both. (1) Record the architectural decision with `paradigm_wisdom_record` (type: 'decision') including title, rationale (factors: tracing, debugging, support; conclusion: all API responses must include requestId), and consequences (positive: better debugging; negative: slight overhead; mitigations: use middleware). (2) Create `~request-id-required` aspect with anchors pointing to the middleware that adds the header. The decision documents the WHY, the aspect enforces the WHAT with verifiable code anchors. Just the decision (B) lacks enforcement. Just the aspect (C) lacks rationale."
1033
- }
1034
- ]
1035
- },
1036
- {
1037
- "type": "standalone",
1038
- "slot": "slot-045",
1039
- "course": "para-401",
1040
- "variants": [
1041
- {
1042
- "id": "plsat-045",
1043
- "scenario": "You're writing a commit message for a change that added rate limiting to the payment API, modified the Stripe webhook handler, and added a new `!rate-limit-exceeded` signal.\n\nThe affected symbols are: `#payment-api`, `#stripe-webhook-handler`, `~rate-limited`, `!rate-limit-exceeded`.",
1044
- "question": "Which commit message follows Paradigm conventions?",
1045
- "choices": {
1046
- "A": "```\nfeat: add rate limiting to payments\n```",
1047
- "B": "```\nfeat(#payment-api): add rate limiting to payment endpoints\n\n- Apply ~rate-limited aspect to #payment-api\n- Update #stripe-webhook-handler with rate limit checks\n- Add !rate-limit-exceeded signal for monitoring\n\nSymbols: #payment-api, #stripe-webhook-handler, ~rate-limited, !rate-limit-exceeded\n```",
1048
- "C": "```\nfeat(payments): add rate limiting\n\nAdded rate limiting to payment API and webhook handler.\n```",
1049
- "D": "```\nFEAT(#payment-api): ADD RATE LIMITING\n\nSymbols: ALL PAYMENT SYMBOLS\n```",
1050
- "E": "```\nfeat(~rate-limited): apply rate limiting aspect\n\n- Updated payment-api\n- Updated stripe-webhook-handler\n\nSymbols: ~rate-limited\n```"
1051
- },
1052
- "correct": "B",
1053
- "explanation": "Choice B follows all Paradigm commit conventions: (1) Subject line: `type(#primary-symbol): description` with the primary affected component. (2) Body: references all affected symbols with their prefixes (#, ~, !). (3) `Symbols:` trailer: machine-readable list of ALL affected symbols for the post-commit hook to parse. Choice A lacks symbols entirely. Choice C uses a generic scope instead of a symbol. Choice D is SCREAMING_CASE (no). Choice E uses the aspect as the primary symbol, but the primary change is to `#payment-api`, and the Symbols trailer is incomplete."
1054
- }
1055
- ]
1056
- },
1057
- {
1058
- "type": "standalone",
1059
- "slot": "slot-046",
1060
- "course": "para-401",
1061
- "variants": [
1062
- {
1063
- "id": "plsat-046",
1064
- "scenario": "A developer on your team proposes a new tag: `[webhook-handler]`. They've noticed 5 components across the project that handle incoming webhooks and think a dedicated tag would be useful for classification.",
1065
- "question": "What is the correct process for adding this tag?",
1066
- "choices": {
1067
- "A": "Add it directly to the `core` section of `tags.yaml`",
1068
- "B": "Add it directly to the `project` section of `tags.yaml`",
1069
- "C": "Call `paradigm_tags_suggest` with the tag name, description, reason, and example symbols \u2014 it goes to `suggested` for human approval",
1070
- "D": "Just start using `tags: [webhook-handler]` on components \u2014 tags are freeform",
1071
- "E": "Create a new aspect `~webhook-handler` instead \u2014 tags aren't for this"
1072
- },
1073
- "correct": "C",
1074
- "explanation": "The proper process is `paradigm_tags_suggest`, which adds the tag to the `suggested` section of `tags.yaml` for human review. This is Paradigm's governance model: AI can propose tags, but humans must approve them before they become official. Once approved, a human promotes it to `project` (team-specific) or `core` (if it should ship with Paradigm). Adding directly to `core` (A) is only for Paradigm framework maintainers. Freeform usage (D) leads to tag sprawl. An aspect (E) is wrong because webhook handling isn't a cross-cutting rule requiring code anchors."
1075
- }
1076
- ]
1077
- },
1078
- {
1079
- "type": "standalone",
1080
- "slot": "slot-047",
1081
- "course": "para-401",
1082
- "variants": [
1083
- {
1084
- "id": "plsat-047",
1085
- "scenario": "Your CI/CD pipeline runs `paradigm doctor` as a check. The latest run shows:\n\n```\nERROR: Gate ^project-owner referenced in portal.yaml but not defined\nWARNING: Aspect ~cache-invalidation has stale anchors (file moved)\nERROR: Flow $signup-flow references undefined component #sms-verifier\nINFO: 3 suggested tags awaiting human review\n```",
1086
- "question": "Which issues MUST be fixed before merging, and which can wait?",
1087
- "choices": {
1088
- "A": "All four must be fixed \u2014 `paradigm doctor` errors should block the merge",
1089
- "B": "The two ERRORs must be fixed (undefined gate and undefined component). The WARNING and INFO can wait.",
1090
- "C": "Only the gate error must be fixed (security). Everything else is documentation.",
1091
- "D": "None need to block the merge \u2014 `paradigm doctor` is advisory only",
1092
- "E": "The errors and warning must be fixed. Only the INFO about suggested tags can wait."
1093
- },
1094
- "correct": "B",
1095
- "explanation": "ERRORs indicate broken references that will cause real problems: a gate referenced in `portal.yaml` that doesn't exist means route protection is undefined, and a flow referencing an undefined component means the flow documentation is actively misleading. These MUST be fixed. The WARNING about stale anchors is important but not merge-blocking \u2014 the aspect still works, the documentation just needs updating. The INFO about suggested tags is purely administrative. In practice, many teams also fix WARNINGs before merge, but the ERRORs are the must-fix items."
1096
- }
1097
- ]
1098
- },
1099
- {
1100
- "type": "standalone",
1101
- "slot": "slot-048",
1102
- "course": "para-401",
1103
- "variants": [
1104
- {
1105
- "id": "plsat-048",
1106
- "scenario": "You're preparing a handoff because your context window is at 87%. You've been working on a migration from REST to GraphQL for the user module. You've completed the schema and resolvers but haven't written tests yet.",
1107
- "question": "What information should you include in `paradigm_handoff_prepare`?",
1108
- "choices": {
1109
- "A": "Just the summary \u2014 the next session can figure out the rest",
1110
- "B": "Summary, list of modified files, symbols touched, next steps (write tests), and the open question about whether to keep the REST endpoints during migration",
1111
- "C": "A complete transcript of everything you did in this session",
1112
- "D": "Only the list of modified files \u2014 the next session can read them",
1113
- "E": "Summary and next steps only \u2014 modified files are in git"
1114
- },
1115
- "correct": "B",
1116
- "explanation": "`paradigm_handoff_prepare` accepts: summary (what was done), modifiedFiles (what changed), symbolsTouched (which symbols were affected), nextSteps (what to do next), and openQuestions (unresolved decisions). The MOST useful handoff includes ALL of these. The next session needs to know: (1) what was accomplished (summary), (2) which files to look at (modifiedFiles), (3) which symbols are in play (symbolsTouched), (4) exactly what to do next (write tests), and (5) any decisions that still need to be made (keep REST endpoints?). A transcript (C) is too much. Just files (D) lacks context."
1117
- }
1118
- ]
1119
- },
1120
- {
1121
- "type": "standalone",
1122
- "slot": "slot-049",
1123
- "course": "para-401",
1124
- "variants": [
1125
- {
1126
- "id": "plsat-049",
1127
- "scenario": "You want to validate that a specific flow `$checkout-flow` is correctly implemented. The flow has 5 steps, involves 3 gates, and emits 2 signals. You want to verify that all steps have implementations, gates exist in portal.yaml, and signals are actually emitted in the code.",
1128
- "question": "Which MCP tool call gives you this deep implementation check?",
1129
- "choices": {
1130
- "A": "`paradigm_flow_check({ flowId: '$checkout-flow' })` \u2014 validates flow definition only",
1131
- "B": "`paradigm_flow_check({ flowId: '$checkout-flow', checkImplementation: true })` \u2014 deep check against codebase",
1132
- "C": "`paradigm_purpose_validate()` \u2014 validates all purpose files including flows",
1133
- "D": "`paradigm_ripple({ symbol: '$checkout-flow' })` \u2014 shows flow dependencies",
1134
- "E": "`paradigm_related({ symbol: '$checkout-flow' })` \u2014 shows what's connected to the flow"
1135
- },
1136
- "correct": "B",
1137
- "explanation": "`paradigm_flow_check` with `checkImplementation: true` performs a deep validation: it checks that gates exist in portal.yaml, actions are implemented in the codebase, and signals are emitted. Without `checkImplementation`, it only validates the YAML structure (A). `paradigm_purpose_validate` (C) checks structural validity of purpose files but doesn't do the deep codebase cross-reference. `paradigm_ripple` (D) shows what depends on the flow, not whether it's correctly implemented. The `checkImplementation` flag is the key to going from structural validation to implementation verification."
1138
- }
1139
- ]
1140
- },
1141
- {
1142
- "type": "standalone",
1143
- "slot": "slot-050",
1144
- "course": "para-401",
1145
- "variants": [
1146
- {
1147
- "id": "plsat-050",
1148
- "scenario": "It's 2 AM. Your production system is down. The error logs show:\n\n```\nERROR: Cannot read property 'id' of null\n at PaymentProcessor.processRefund (payment-processor.ts:142)\n at RefundHandler.handle (refund-handler.ts:67)\n```\n\nYou need to investigate and fix this as fast as possible using Paradigm tools. The on-call engineer has no context about the payment system.",
1149
- "question": "What is the optimal 2 AM sequence of actions using Paradigm?",
1150
- "choices": {
1151
- "A": "Read the source files, find the bug, fix it, deploy",
1152
- "B": "`paradigm_sentinel_record` the incident \u2192 `paradigm_sentinel_patterns` for known fixes \u2192 `paradigm_wisdom_context` for `#payment-processor` antipatterns \u2192 Fix \u2192 `paradigm_sentinel_resolve`",
1153
- "C": "`paradigm_orchestrate_inline` to spin up an architect and security agent",
1154
- "D": "`paradigm_status` \u2192 `paradigm_navigate` \u2192 Read every file in payments/ \u2192 Eventually find the bug",
1155
- "E": "`paradigm_ripple` on `#payment-processor` \u2192 Fix everything that depends on it"
1156
- },
1157
- "correct": "B",
1158
- "explanation": "At 2 AM with production down, you want the fastest path to resolution with full traceability: (1) `paradigm_sentinel_record` the incident with the error and stack trace \u2014 this starts the clock and provides symbolic context. (2) `paradigm_sentinel_patterns` to check if this is a KNOWN failure pattern with an existing resolution \u2014 this could save you hours. (3) `paradigm_wisdom_context` for `#payment-processor` to check if there are recorded antipatterns (e.g., 'null check required before accessing refund.id'). (4) Fix the issue with full context. (5) `paradigm_sentinel_resolve` to close the incident with the fix commit. Orchestration (C) is overkill for an emergency fix. Reading everything (D) burns time you don't have."
1159
- }
1160
- ]
1161
- },
1162
- {
1163
- "type": "standalone",
1164
- "slot": "slot-051",
1165
- "course": "para-501",
1166
- "variants": [
1167
- {
1168
- "id": "plsat-051",
1169
- "scenario": "Your team just held a meeting where the lead architect decided to switch from PostgreSQL to CockroachDB for the user service. No code was written yet — this is purely a strategic decision with rationale documented in meeting notes.",
1170
- "question": "Which lore entry type should be used to record this?",
1171
- "choices": {
1172
- "A": "`agent-session` — because the meeting was a work session",
1173
- "B": "`milestone` — because switching databases is a major event",
1174
- "C": "`decision` — because this is an architectural decision with rationale and no implementation",
1175
- "D": "`human-note` — because a human made the decision",
1176
- "E": "`review` — because the team reviewed database options"
1177
- },
1178
- "correct": "C",
1179
- "explanation": "The `decision` entry type is specifically for architectural or design decisions with rationale. No code was written (ruling out `agent-session`), it is not yet a completed achievement (ruling out `milestone`), and while a human made the decision, the entry type describes the *content* not the *author* — the author field separately tracks who recorded it. A standalone decision with rationale is exactly what the `decision` type exists for."
1180
- }
1181
- ]
1182
- },
1183
- {
1184
- "type": "standalone",
1185
- "slot": "slot-052",
1186
- "course": "para-501",
1187
- "variants": [
1188
- {
1189
- "id": "plsat-052",
1190
- "scenario": "An agent just finished a quick session where it fixed a typo in a README and updated one comment in a source file. Total files modified: 2 (README.md and src/utils/format.ts).",
1191
- "question": "Should the agent record a lore entry before ending the session?",
1192
- "choices": {
1193
- "A": "Yes — every session should be recorded regardless of scope",
1194
- "B": "No — 2 files is below the 3-file significance threshold, and the stop hook will not require it",
1195
- "C": "Yes — the source file modification triggers the recording requirement",
1196
- "D": "No — but only because README.md is not a source file",
1197
- "E": "It depends on whether the typo fix was in a critical component"
1198
- },
1199
- "correct": "B",
1200
- "explanation": "The lore recording trigger is 3+ modified source files. With only 2 files modified (and one being a README which is typically excluded from the source file count), this session is below the threshold. The stop hook will not block for a missing lore entry. Agents can still choose to record, but it is not enforced."
1201
- }
1202
- ]
1203
- },
1204
- {
1205
- "type": "standalone",
1206
- "slot": "slot-053",
1207
- "course": "para-501",
1208
- "variants": [
1209
- {
1210
- "id": "plsat-053",
1211
- "scenario": "Sentinel has recorded 5 incidents over the past week. Three involve `#payment-processor` with TypeError, one involves `#payment-processor` with NetworkError, and one involves `#auth-service` with TypeError. Sentinel groups incidents using a 0.6 similarity threshold.",
1212
- "question": "How many incident groups will Sentinel likely create?",
1213
- "choices": {
1214
- "A": "1 group — all 5 incidents involve errors in the same general area",
1215
- "B": "2 groups — one for the 3 TypeError incidents in #payment-processor, and the auth TypeError stays separate because different component",
1216
- "C": "3 groups — one per unique (component, error type) combination",
1217
- "D": "5 groups — each incident is unique",
1218
- "E": "2 groups — one for all #payment-processor incidents (4), one for #auth-service (1)"
1219
- },
1220
- "correct": "C",
1221
- "explanation": "Sentinel groups by symbolic similarity with a 0.6 threshold. The three `#payment-processor` + TypeError incidents share both component and error type — high similarity, one group. The `#payment-processor` + NetworkError shares the component but differs in error type — enough divergence for a separate group. The `#auth-service` + TypeError shares error type with group 1 but differs in component — separate group. Three distinct (component, error type) clusters yield 3 groups."
1222
- }
1223
- ]
1224
- },
1225
- {
1226
- "type": "standalone",
1227
- "slot": "slot-054",
1228
- "course": "para-501",
1229
- "variants": [
1230
- {
1231
- "id": "plsat-054",
1232
- "scenario": "A production error just occurred. You want to record it, check for known fixes, resolve it, and then create a new pattern so future occurrences are handled faster.",
1233
- "question": "What is the correct Sentinel tool sequence?",
1234
- "choices": {
1235
- "A": "`sentinel_add_pattern` → `sentinel_record` → `sentinel_resolve`",
1236
- "B": "`sentinel_triage` → `sentinel_record` → `sentinel_resolve` → `sentinel_add_pattern`",
1237
- "C": "`sentinel_record` → `sentinel_triage` → fix → `sentinel_resolve` → `sentinel_add_pattern`",
1238
- "D": "`sentinel_record` → `sentinel_add_pattern` → `sentinel_resolve`",
1239
- "E": "`sentinel_patterns` → `sentinel_record` → `sentinel_add_pattern` → `sentinel_resolve`"
1240
- },
1241
- "correct": "C",
1242
- "explanation": "The correct lifecycle is: (1) `sentinel_record` — create the incident with error details and symbolic context. (2) `sentinel_triage` — view the incident with matched patterns and suggested resolutions. (3) Fix the issue using the context from triage. (4) `sentinel_resolve` — close the incident with the fix commit. (5) `sentinel_add_pattern` — capture the fix as a reusable pattern. You must record before you can triage, fix before you can resolve, and resolve before creating a pattern from the resolution."
1243
- }
1244
- ]
1245
- },
1246
- {
1247
- "type": "standalone",
1248
- "slot": "slot-055",
1249
- "course": "para-501",
1250
- "variants": [
1251
- {
1252
- "id": "plsat-055",
1253
- "scenario": "You are configuring habits for your project. You want to ensure that agents always call `paradigm_ripple` before modifying symbols, and you want this enforced at the start of every task. The seed habit `ripple-before-modify` exists with `trigger: preflight` and `severity: advisory`.",
1254
- "question": "Which trigger ensures the habit is evaluated before implementation begins?",
1255
- "choices": {
1256
- "A": "`on-stop` — checks compliance at the end of the session",
1257
- "B": "`on-commit` — checks before changes are committed",
1258
- "C": "`preflight` — evaluated before starting implementation",
1259
- "D": "`postflight` — evaluated after completing implementation",
1260
- "E": "`pre-write` — evaluated before each file edit"
1261
- },
1262
- "correct": "C",
1263
- "explanation": "The `preflight` trigger evaluates habits before implementation begins — this is when discovery checks like `paradigm_ripple` should run. `postflight` is too late (implementation already happened), `on-stop` is the end of the session, and `on-commit` is at commit time. There is no `pre-write` trigger — the four triggers are preflight, postflight, on-commit, and on-stop."
1264
- }
1265
- ]
1266
- },
1267
- {
1268
- "type": "standalone",
1269
- "slot": "slot-056",
1270
- "course": "para-501",
1271
- "variants": [
1272
- {
1273
- "id": "plsat-056",
1274
- "scenario": "A project overrides the seed habit `verify-before-done` (originally `severity: warn`) to `severity: block` in `.paradigm/habits.yaml`. An agent finishes implementing a feature but does not call `paradigm_pm_postflight`. The stop hook runs.",
1275
- "question": "What happens?",
1276
- "choices": {
1277
- "A": "A warning is logged but the session completes — `warn` is the seed severity",
1278
- "B": "The session is blocked — the project override to `block` means the stop hook treats this as a blocking violation",
1279
- "C": "Nothing — habit severity only affects habit check output, not the stop hook",
1280
- "D": "The override is ignored because seed habits cannot be overridden",
1281
- "E": "The session completes but the next session receives a mandatory reminder"
1282
- },
1283
- "correct": "B",
1284
- "explanation": "Project overrides in `.paradigm/habits.yaml` take precedence over seed defaults. The three-layer merge (seed → global → project) means the project's `severity: block` override replaces the seed's `severity: warn`. When the stop hook evaluates on-stop habits, it finds a blocking violation because `verify-before-done` was not followed and its severity is now `block`. The session cannot complete until the agent runs `paradigm_pm_postflight`."
1285
- }
1286
- ]
1287
- },
1288
- {
1289
- "type": "standalone",
1290
- "slot": "slot-057",
1291
- "course": "para-501",
1292
- "variants": [
1293
- {
1294
- "id": "plsat-057",
1295
- "scenario": "An agent has finished reading requirements and has a clear plan for implementing a new feature. It has not written any code yet. It wants to save a checkpoint in case the session crashes.",
1296
- "question": "Which checkpoint phase should it use?",
1297
- "choices": {
1298
- "A": "`implementing` — the agent is about to start implementing",
1299
- "B": "`planning` — the agent has a plan but has not started coding",
1300
- "C": "`validating` — the agent needs to validate its plan first",
1301
- "D": "`complete` — the planning phase is complete",
1302
- "E": "`ready` — the agent is ready to implement"
1303
- },
1304
- "correct": "B",
1305
- "explanation": "The `planning` phase captures the state after requirements are understood and a plan exists but before any code is written. If the session crashes, recovery knows: the plan is set, coding has not started, here are the key decisions. `implementing` should only be used after code changes begin. `complete` is for when the entire task is finished. There is no `ready` phase — the four phases are planning, implementing, validating, and complete."
1306
- }
1307
- ]
1308
- },
1309
- {
1310
- "type": "standalone",
1311
- "slot": "slot-058",
1312
- "course": "para-501",
1313
- "variants": [
1314
- {
1315
- "id": "plsat-058",
1316
- "scenario": "The stop hook has blocked your session with two violations:\n1. \"Modified source directories missing .purpose coverage: src/services/refund/\"\n2. \"Lore entry expected: 4 source files modified, no lore recorded\"\n\nYou need to unblock and complete your session.",
1317
- "question": "What is the correct remediation sequence?",
1318
- "choices": {
1319
- "A": "Run `paradigm_reindex` to fix both violations automatically",
1320
- "B": "Create a .purpose file in `src/services/refund/`, record a lore entry with `paradigm_lore_record`, then run `paradigm_reindex`",
1321
- "C": "Delete the `.paradigm/.pending-review` file to clear the tracking",
1322
- "D": "Add `src/services/refund/` to the `.paradigm/config.yaml` skip list",
1323
- "E": "Call `paradigm_pm_postflight` which handles all violations"
1324
- },
1325
- "correct": "B",
1326
- "explanation": "Each violation requires a specific fix: (1) Create or update a .purpose file in or above `src/services/refund/` to provide coverage for the new directory. (2) Call `paradigm_lore_record` with a summary of the session since 4 files exceeds the 3-file threshold. (3) Run `paradigm_reindex` to rebuild the index with the new .purpose file. Deleting `.pending-review` (C) just hides the tracking — the stop hook would still detect uncovered directories. `paradigm_pm_postflight` (E) reports violations but doesn't fix them."
1327
- }
1328
- ]
1329
- },
1330
- {
1331
- "type": "standalone",
1332
- "slot": "slot-059",
1333
- "course": "para-201",
1334
- "variants": [
1335
- {
1336
- "id": "plsat-059",
1337
- "scenario": "You run `paradigm flow validate` on your project and receive this output:\n\n```\n⚠ Circular Dependencies (1)\n\n $order-flow → $inventory-flow → $order-flow\n```\n\nBoth flows reference each other via `relatedFlows`.",
1338
- "question": "What is the best way to resolve this circular dependency?",
1339
- "choices": {
1340
- "A": "Delete one of the flows — circular flows are always a design error",
1341
- "B": "Extract the shared logic into a new `$stock-check-flow` that both flows reference, breaking the cycle",
1342
- "C": "Ignore it — circular dependencies are just warnings and do not affect anything",
1343
- "D": "Rename the flows so the validator does not detect the cycle",
1344
- "E": "Move both flows into the same .purpose file to merge them"
1345
- },
1346
- "correct": "B",
1347
- "explanation": "The recommended resolution for circular flow dependencies is to extract shared logic into a separate flow. If $order-flow and $inventory-flow both need shared behavior, create a third flow (e.g., $stock-check-flow) that both reference unidirectionally. This eliminates the cycle while preserving the relationships. Deletion (A) loses documentation, ignoring (C) hides architectural coupling, and renaming (D) is a workaround that does not fix the underlying issue."
1348
- },
1349
- {
1350
- "id": "plsat-059b",
1351
- "scenario": "Your project has three flows with these `relatedFlows` references:\n- `$checkout-flow` → `[$payment-flow]`\n- `$payment-flow` → `[$receipt-flow]`\n- `$receipt-flow` → `[$checkout-flow]`\n\nYou run `paradigm_flow_check({})` to validate all flows.",
1352
- "question": "What will the circular dependency detection report?",
1353
- "choices": {
1354
- "A": "No issues — each flow only references one other flow",
1355
- "B": "Three separate circular dependencies, one for each flow",
1356
- "C": "One circular dependency: $checkout-flow → $payment-flow → $receipt-flow → $checkout-flow",
1357
- "D": "A warning that flows should not have relatedFlows at all",
1358
- "E": "An error that three-flow cycles are not supported"
1359
- },
1360
- "correct": "C",
1361
- "explanation": "Paradigm's circular dependency detector uses depth-first search to trace the full dependency graph. Starting from $checkout-flow, it follows: $checkout-flow → $payment-flow → $receipt-flow → $checkout-flow, detecting a single 3-node cycle. The cycle is normalized (starting from the lexicographically smallest node) and reported once, not three times."
1362
- }
1363
- ]
1364
- },
1365
- {
1366
- "type": "passage",
1367
- "slot": "passage-habits-review",
1368
- "course": "para-501",
1369
- "passage": "Your team's `.paradigm/habits.yaml` for an e-commerce project:\n\n```yaml\noverrides:\n ripple-before-modify:\n severity: block\n explore-before-implement:\n severity: warn\n test-new-components:\n enabled: false\n record-lore-for-significant:\n severity: block\n\ncustom:\n - id: check-price-validation\n name: Validate Price Calculations\n description: Ensure price calculation tests exist for any payment-related changes\n category: testing\n trigger: postflight\n severity: warn\n check:\n type: tests-exist\n params:\n patterns: [\"**/price*.test.*\", \"**/payment*.test.*\"]\n enabled: true\n```\n\nThe seed habits that are NOT shown in overrides retain their default values. The project has 14 seed habits plus 1 custom habit.",
1370
- "questions": [
1371
- {
1372
- "slot": "pg-habits-q1",
1373
- "variants": [
1374
- {
1375
- "id": "plsat-pg3-q1a",
1376
- "scenario": "",
1377
- "question": "An agent skips calling `paradigm_ripple` before modifying `#checkout-service` and then tries to end the session. What happens?",
1378
- "choices": {
1379
- "A": "Advisory note is logged — ripple-before-modify defaults to advisory severity",
1380
- "B": "Warning is shown — the override upgrades it to warn",
1381
- "C": "Session is blocked — the override sets ripple-before-modify to severity: block",
1382
- "D": "Nothing — ripple-before-modify only checks during preflight, not on-stop",
1383
- "E": "The habit is disabled because test-new-components is disabled"
1384
- },
1385
- "correct": "C",
1386
- "explanation": "The overrides section sets `ripple-before-modify` to `severity: block`. The seed habit's trigger is `preflight`, but when severity is `block`, violations detected during preflight evaluation carry through to the stop hook as blocking violations. The agent cannot complete the session until it calls `paradigm_ripple` for the modified symbols."
1387
- }
1388
- ]
1389
- },
1390
- {
1391
- "slot": "pg-habits-q2",
1392
- "variants": [
1393
- {
1394
- "id": "plsat-pg3-q2a",
1395
- "scenario": "",
1396
- "question": "The team disabled `test-new-components` but added a custom `check-price-validation` habit. What testing discipline does this configuration express?",
1397
- "choices": {
1398
- "A": "No testing discipline — disabling the seed habit removes all test requirements",
1399
- "B": "Targeted testing — the team doesn't require tests for ALL components, but does require them for price/payment code specifically",
1400
- "C": "The custom habit replaces the seed habit entirely",
1401
- "D": "This is a configuration error — you cannot disable a seed habit and add a custom one in the same category",
1402
- "E": "Full testing — the custom habit covers everything the seed habit did"
1403
- },
1404
- "correct": "B",
1405
- "explanation": "Disabling `test-new-components` (which checks for test files globally with `**/*.test.*`) removes the blanket test requirement. The custom `check-price-validation` habit adds a targeted requirement: test files must exist specifically for price and payment code (`**/price*.test.*`, `**/payment*.test.*`). This is a deliberate choice: the team decided that testing everything is too strict, but payment/price logic is critical enough to enforce test coverage."
1406
- }
1407
- ]
1408
- },
1409
- {
1410
- "slot": "pg-habits-q3",
1411
- "variants": [
1412
- {
1413
- "id": "plsat-pg3-q3a",
1414
- "scenario": "",
1415
- "question": "An agent modifies 5 source files including payment logic but does not record a lore entry. What combination of violations will the stop hook report?",
1416
- "choices": {
1417
- "A": "One violation: missing lore entry (block severity)",
1418
- "B": "Two violations: missing lore entry (block) and missing price validation tests (warn)",
1419
- "C": "One violation: missing price validation tests only — lore is advisory by default",
1420
- "D": "Three violations: missing lore, missing tests, and .purpose coverage",
1421
- "E": "The lore violation alone blocks the session — other checks are not evaluated after a block"
1422
- },
1423
- "correct": "B",
1424
- "explanation": "The stop hook evaluates ALL checks independently. `record-lore-for-significant` is overridden to `severity: block` and 5 files exceeds the 3-file threshold — this blocks. `check-price-validation` has `trigger: postflight` and `severity: warn`, and if payment files were modified without matching test files, it warns. Both violations are reported. The stop hook blocks because at least one violation has `severity: block`, but it reports all violations so the agent can fix everything in one pass."
1425
- }
1426
- ]
1427
- }
1428
- ]
1429
- },
1430
- {
1431
- "type": "standalone",
1432
- "slot": "slot-060",
1433
- "course": "para-501",
1434
- "variants": [
1435
- {
1436
- "id": "plsat-060",
1437
- "scenario": "A project has these habits enabled:\n- `commit-message-symbols` (on-commit/advisory) — checks commit messages match `type(#symbol):` format and include a `Symbols:` trailer\n- `flow-coverage-for-multi-component` (postflight/advisory) — checks that changes spanning 3+ components have a documented $flow\n\nAn agent modifies `#auth-handler`, `#session-store`, `#login-page`, and `#password-reset` but does not create a $flow. The agent then commits with message: `fix: update auth logic`.",
1438
- "question": "Which habits are violated?",
1439
- "choices": {
1440
- "A": "Only commit-message-symbols — the message lacks type(#symbol): format",
1441
- "B": "Only flow-coverage — 4 components without a $flow",
1442
- "C": "Both: commit message lacks #symbol in parens and Symbols: trailer, plus 4 components touched without a flow",
1443
- "D": "Neither — both are advisory and don't actually check anything",
1444
- "E": "Only flow-coverage — the commit message format is correct"
1445
- },
1446
- "correct": "C",
1447
- "explanation": "The commit message `fix: update auth logic` matches the conventional prefix `fix:` but lacks a #symbol in parentheses (should be `fix(#auth-handler):`) and has no `Symbols:` trailer. Additionally, 4 components were modified (>= 3 threshold) without a documented $flow. Both habits are violated — the advisory severity means they log notes rather than blocking."
1448
- },
1449
- {
1450
- "id": "plsat-060b",
1451
- "scenario": "A project enables `context-session-awareness` (preflight/advisory) and `aspect-anchors-valid` (postflight/advisory).\n\nAn agent starts a session, immediately begins modifying `~rate-limited` aspect without calling any context or session recovery tools. After modifying the aspect's anchor locations, the agent calls `paradigm_aspect_check` to verify the anchors are valid.",
1452
- "question": "What do the habit evaluations show?",
1453
- "choices": {
1454
- "A": "Both followed — the agent did check the aspect",
1455
- "B": "context-session-awareness: skipped (no context tools called); aspect-anchors-valid: followed (paradigm_aspect_check was called)",
1456
- "C": "Both skipped — advisory habits are always skipped",
1457
- "D": "context-session-awareness: followed (aspect_check counts as context); aspect-anchors-valid: skipped (anchors were modified)",
1458
- "E": "Both partial — the agent did some work for each"
1459
- },
1460
- "correct": "B",
1461
- "explanation": "context-session-awareness checks if paradigm_session_health, paradigm_session_recover, or paradigm_session_checkpoint was called — paradigm_aspect_check does not count. aspect-anchors-valid checks if paradigm_aspect_check was called for touched aspects, which it was. So the first is skipped and the second is followed."
1462
- }
1463
- ]
1464
- },
1465
- {
1466
- "type": "standalone",
1467
- "slot": "slot-061",
1468
- "course": "para-301",
1469
- "variants": [
1470
- {
1471
- "id": "plsat-061",
1472
- "scenario": "Sentinel groups 8 incidents affecting `#payment-service` with these error messages:\n- 4 incidents: \"Stripe API returned 429: rate limited\"\n- 2 incidents: \"Payment webhook timeout after 30s\"\n- 2 incidents: \"Connection reset by peer during payment callback\"\n\nThe pattern suggester infers a resolution strategy from the grouped incidents.",
1473
- "question": "What strategy will the suggester infer?",
1474
- "choices": {
1475
- "A": "fix-code — the default for any group of incidents",
1476
- "B": "retry — timeout and network-related errors dominate the group",
1477
- "C": "scale-up — rate limiting means the service needs more capacity",
1478
- "D": "rollback — the errors suggest a recent deployment broke something",
1479
- "E": "config-change — the 429 means the API key needs updating"
1480
- },
1481
- "correct": "B",
1482
- "explanation": "The strategy inference checks error messages for keywords. 'timeout' and 'connection reset' match the retry strategy (timeout, network keywords). While '429: rate limited' could suggest scale-up, the 'timeout' keyword in 2 messages triggers the retry check first in the keyword priority order. The inference returns the first matching strategy, which is retry for timeout/network errors."
1483
- }
1484
- ]
1485
- },
1486
- {
1487
- "type": "standalone",
1488
- "slot": "slot-062",
1489
- "course": "para-201",
1490
- "variants": [
1491
- {
1492
- "id": "plsat-062a",
1493
- "scenario": "A portal.yaml defines a gate `^subscription-required` with two locks:\n\n```yaml\nlocks:\n - id: has-user\n keys:\n - expression: \"req.user != null\"\n - id: active-sub\n keys:\n - expression: \"req.user.subscription.status === 'active'\"\n - expression: \"req.user.subscription.plan !== 'free'\"\n```\n\nYou run `paradigm portal test --gate ^subscription-required`.",
1494
- "question": "How many test cases does the gate lock introspection auto-generate?",
1495
- "choices": {
1496
- "A": "2 — one passing case and one failing case",
1497
- "B": "3 — one passing case, one per-lock failure case for each of the 2 locks, but no empty entity case",
1498
- "C": "4 — one passing case, one per-lock failure case for each of the 2 locks, and one empty entity case",
1499
- "D": "5 — one case per key expression plus one empty entity case",
1500
- "E": "1 — only the passing case with all properties populated"
1501
- },
1502
- "correct": "C",
1503
- "explanation": "Gate lock introspection generates: (1) a passing case with all properties populated from all key expressions, (2) one failure case per lock (omitting that lock's required properties), and (3) an empty entity case that should always fail. With 2 locks, that's 1 + 2 + 1 = 4 test cases."
1504
- },
1505
- {
1506
- "id": "plsat-062b",
1507
- "scenario": "You need a machine-readable export of your portal configuration for a CI audit pipeline. Your portal.yaml has 5 gates and 12 routes.",
1508
- "question": "Which command produces a structured export suitable for programmatic consumption in CI?",
1509
- "choices": {
1510
- "A": "paradigm portal export --format json",
1511
- "B": "paradigm portal export --format csv",
1512
- "C": "paradigm portal export --format markdown",
1513
- "D": "paradigm doctor --json",
1514
- "E": "paradigm scan --verbose"
1515
- },
1516
- "correct": "A",
1517
- "explanation": "paradigm portal export --format json produces a structured JSON output with gates and routes arrays, ideal for CI pipelines. CSV is for spreadsheet analysis, markdown for documentation. paradigm doctor --json reports health checks, not portal config."
1518
- }
1519
- ]
1520
- },
1521
- {
1522
- "type": "standalone",
1523
- "slot": "slot-063",
1524
- "course": "para-301",
1525
- "variants": [
1526
- {
1527
- "id": "plsat-063",
1528
- "scenario": "You join a team working on a large codebase. Many source directories have code but no `.purpose` files documenting their components. You want to quickly generate draft documentation.",
1529
- "question": "What is the correct approach using Paradigm's lint tooling?",
1530
- "choices": {
1531
- "A": "paradigm lint --fix — automatically creates .purpose files for all undocumented directories",
1532
- "B": "paradigm lint --auto-populate — scans source directories and suggests .purpose drafts, then paradigm lint --auto-populate --fix to write them",
1533
- "C": "paradigm scan --fix — rebuilds the index and creates missing .purpose files",
1534
- "D": "paradigm doctor --fix — finds missing documentation and generates stubs"
1535
- },
1536
- "correct": "B",
1537
- "explanation": "paradigm lint --auto-populate scans source directories (max depth 4) for undocumented components — directories containing source files but no .purpose file. Without --fix it reports suggestions; with --fix it writes draft .purpose files. paradigm lint --fix only fixes lint issues in existing .purpose files, it doesn't create new ones. scan and doctor don't generate .purpose files."
1538
- }
1539
- ]
1540
- },
1541
- {
1542
- "type": "standalone",
1543
- "slot": "slot-064",
1544
- "course": "para-401",
1545
- "variants": [
1546
- {
1547
- "id": "plsat-064a",
1548
- "scenario": "A team has both AGENTS.md and llms.txt in their Paradigm project. A new developer asks what each file is for.",
1549
- "question": "Which statement correctly distinguishes the two files?",
1550
- "choices": {
1551
- "A": "AGENTS.md is for Claude, llms.txt is for all other LLMs",
1552
- "B": "AGENTS.md contains instructions (how to behave), llms.txt contains facts (what exists)",
1553
- "C": "llms.txt replaces AGENTS.md in Paradigm v2",
1554
- "D": "They contain the same information in different formats",
1555
- "E": "AGENTS.md is auto-generated but llms.txt must be hand-written"
1556
- },
1557
- "correct": "B",
1558
- "explanation": "AGENTS.md is prescriptive — it tells agents what tools to use, what conventions to follow, and what workflow to observe. llms.txt is descriptive — it tells agents what symbols exist, what flows are defined, and how the project is structured. Both are auto-generated by Paradigm (sync agents and sync-llms respectively) and serve distinct purposes."
1559
- },
1560
- {
1561
- "id": "plsat-064b",
1562
- "scenario": "An AI agent spawned in isolation needs to orient itself before working on a task. It has access to AGENTS.md, MCP tools, and the full codebase.",
1563
- "question": "What is the most token-efficient orientation sequence?",
1564
- "choices": {
1565
- "A": "Read all .purpose files, then read portal.yaml",
1566
- "B": "Read AGENTS.md → paradigm_session_recover → paradigm_navigate with context intent (~500 tokens total)",
1567
- "C": "paradigm_search for every symbol type → read matching files",
1568
- "D": "Read every file in .paradigm/ for full context",
1569
- "E": "Call paradigm_status repeatedly until context is sufficient"
1570
- },
1571
- "correct": "B",
1572
- "explanation": "The Fresh Context Principle: AGENTS.md provides instructions and conventions, paradigm_session_recover provides previous session context, and paradigm_navigate with context intent provides task-relevant files. Total cost: ~500 tokens — compared to thousands of tokens for file-reading approaches."
1573
- }
1574
- ]
1575
- },
1576
- {
1577
- "type": "standalone",
1578
- "slot": "slot-065",
1579
- "course": "para-201",
1580
- "variants": [
1581
- {
1582
- "id": "plsat-065",
1583
- "scenario": "You have a `$checkout-flow` with these steps:\n1. ^authenticated (gate)\n2. #validate-cart (action)\n3. #process-payment (action)\n4. !order-placed (signal)\n\nYou run `paradigm flow diagram $checkout-flow`.",
1584
- "question": "In the generated Mermaid diagram, what shapes represent each step type?",
1585
- "choices": {
1586
- "A": "All steps are rectangles with different colors",
1587
- "B": "Gates are diamonds, actions are rectangles, signals are rounded boxes",
1588
- "C": "Gates are hexagons, actions are circles, signals are parallelograms",
1589
- "D": "All steps are circles connected by labeled arrows",
1590
- "E": "Gates are rounded boxes, actions are diamonds, signals are rectangles"
1591
- },
1592
- "correct": "B",
1593
- "explanation": "Paradigm's Mermaid diagram generator uses conventional flowchart shapes: diamond shapes (decision points) for gates, rectangles for actions, and rounded rectangles for signals. Gates also show deny paths when a failResponse or errorSignal is defined. Steps are color-coded: yellow for gates, blue for actions, green for signals."
1594
- }
1595
- ]
1596
- },
1597
- {
1598
- "type": "variant-group",
1599
- "slot": "slot-066",
1600
- "course": "para-401",
1601
- "variants": [
1602
- {
1603
- "id": "plsat-066",
1604
- "scenario": "Your MCP-connected agent calls `paradigm_search` for `#auth` twice within 10 seconds. The project has a ToolCache with a 30-second TTL configured.",
1605
- "question": "What happens on the second call?",
1606
- "choices": {
1607
- "A": "The search runs again because each MCP call is stateless",
1608
- "B": "The cached result is returned instantly without re-scanning the index",
1609
- "C": "The cache is checked but always invalidated because search results may change",
1610
- "D": "The second call is queued until the first cache entry expires",
1611
- "E": "An error is returned because duplicate calls are rate-limited"
1612
- },
1613
- "correct": "B",
1614
- "explanation": "The ToolCache uses a time-based TTL (default 30 seconds). When the same tool is called with the same arguments within the TTL window, the cached result is returned immediately without re-executing the underlying scan. This saves significant compute for repeated discovery operations like search, status, and navigate."
1615
- },
1616
- {
1617
- "id": "plsat-066b",
1618
- "scenario": "An agent calls `paradigm_reindex` to rebuild the static index after modifying several .purpose files. The project has ToolCache enabled.",
1619
- "question": "What happens to the ToolCache when reindex completes?",
1620
- "choices": {
1621
- "A": "Nothing — the cache is independent of the index",
1622
- "B": "Only search-related cache entries are invalidated",
1623
- "C": "The entire cache is cleared to ensure fresh results from the rebuilt index",
1624
- "D": "Cache entries are marked stale but still served until they expire naturally",
1625
- "E": "The cache TTL is doubled to avoid redundant scans after reindex"
1626
- },
1627
- "correct": "C",
1628
- "explanation": "When paradigm_reindex completes successfully, it calls toolCache.clear() to invalidate ALL cached entries. This is critical because the reindex rebuilds the underlying data that search, navigate, and status tools depend on. Serving stale cached results after a reindex would return outdated symbol information."
1629
- }
1630
- ]
1631
- },
1632
- {
1633
- "type": "variant-group",
1634
- "slot": "slot-067",
1635
- "course": "para-501",
1636
- "variants": [
1637
- {
1638
- "id": "plsat-067",
1639
- "scenario": "An agent has been working for 45 minutes, modifying 5 source files and touching symbols `#auth-middleware`, `^rate-limited`, and `!login-failed`. The session has 12 breadcrumbs recorded.",
1640
- "question": "What triggers auto-lore drafting?",
1641
- "choices": {
1642
- "A": "Auto-lore drafts after every file modification regardless of count",
1643
- "B": "Auto-lore drafts when 3+ files are modified, generating a partial LoreEntry from session breadcrumbs",
1644
- "C": "Auto-lore drafts only when the agent explicitly calls paradigm_lore_record",
1645
- "D": "Auto-lore drafts at a fixed time interval (every 30 minutes)",
1646
- "E": "Auto-lore drafts only during the on-stop habit check"
1647
- },
1648
- "correct": "B",
1649
- "explanation": "The draftLoreFromBreadcrumbs() function generates a partial LoreEntry when 3+ files have been modified in a session. It extracts tool usage statistics from breadcrumbs, includes the symbols touched and files modified, and tags the draft with 'auto-draft' for review. The 3-file threshold ensures trivial edits don't generate noise."
1650
- },
1651
- {
1652
- "id": "plsat-067b",
1653
- "scenario": "After a long coding session, the auto-lore system generates a draft entry. You inspect the draft and notice it has a tag you didn't add.",
1654
- "question": "What tag does auto-lore always apply to drafted entries?",
1655
- "choices": {
1656
- "A": "`auto-generated` — marking it as machine-created",
1657
- "B": "`auto-draft` — indicating it needs human review before finalization",
1658
- "C": "`session-log` — categorizing it as a session record",
1659
- "D": "`pending-review` — flagging it for team approval",
1660
- "E": "`unverified` — warning that the content may be incomplete"
1661
- },
1662
- "correct": "B",
1663
- "explanation": "Auto-drafted lore entries are always tagged with 'auto-draft' to distinguish them from manually recorded entries. This tag signals that the entry was machine-generated from session breadcrumbs and should be reviewed for accuracy before being treated as authoritative project history."
1664
- }
1665
- ]
1666
- },
1667
- {
1668
- "type": "variant-group",
1669
- "slot": "slot-068",
1670
- "course": "para-501",
1671
- "variants": [
1672
- {
1673
- "id": "plsat-068",
1674
- "scenario": "Your project's `.paradigm/config.yaml` contains:\n```yaml\nlimits:\n habitsCacheTtlMs: 60000\n threadTrailMax: 20\n breadcrumbsMax: 100\n```",
1675
- "question": "What do these configurable limits control?",
1676
- "choices": {
1677
- "A": "Maximum file sizes for paradigm-managed files",
1678
- "B": "Rate limits for MCP tool calls per session",
1679
- "C": "Tunable parameters for habits cache duration, thread trail depth, and breadcrumb history length",
1680
- "D": "Hard caps on the number of symbols, flows, and gates allowed",
1681
- "E": "Timeout durations for CLI commands"
1682
- },
1683
- "correct": "C",
1684
- "explanation": "The LimitsConfig in config.yaml allows projects to tune operational parameters: habitsCacheTtlMs controls how long habit definitions are cached (default 30000ms), threadTrailMax sets the maximum breadcrumbs shown in thread trail output (default 10), and breadcrumbsMax sets the maximum breadcrumbs stored per session. These defaults work for most projects but can be adjusted for larger codebases."
1685
- },
1686
- {
1687
- "id": "plsat-068b",
1688
- "scenario": "A large monorepo project finds that paradigm_search is running too frequently, consuming unnecessary compute. They want to increase the cache duration for MCP tool results.",
1689
- "question": "Which config.yaml field controls MCP tool cache duration?",
1690
- "choices": {
1691
- "A": "`limits.searchCacheTtlMs` — specific to search operations",
1692
- "B": "`limits.toolCacheTtlMs` — controls the ToolCache TTL for all cached MCP tools",
1693
- "C": "`limits.mcpTimeoutMs` — sets the MCP response timeout",
1694
- "D": "`cache.ttl` — global cache setting for all paradigm operations",
1695
- "E": "`limits.habitsCacheTtlMs` — since habits and tools share the same cache"
1696
- },
1697
- "correct": "B",
1698
- "explanation": "The limits.toolCacheTtlMs field in config.yaml controls the TTL for the ToolCache that wraps paradigm_search, paradigm_status, and paradigm_navigate. The default is 30000ms (30 seconds). Increasing this value reduces redundant computations but may serve slightly stale results. It's separate from habitsCacheTtlMs which controls the habits definition cache."
1699
- }
1700
- ]
1701
- },
1702
- {
1703
- "type": "standalone",
1704
- "slot": "slot-069",
1705
- "course": "para-501",
1706
- "variants": [
1707
- {
1708
- "id": "plsat-069",
1709
- "scenario": "Your team uses Paradigm's Global Brain (`~/.paradigm/`) to share wisdom, lore, and history across projects. After a year, the global directory has grown to contain hundreds of old entries.",
1710
- "question": "How do you clean up old Global Brain entries?",
1711
- "choices": {
1712
- "A": "Manually delete files from `~/.paradigm/` using `rm -rf`",
1713
- "B": "`paradigm global clean --older-than 90d` removes files older than the specified duration",
1714
- "C": "`paradigm scan --prune` removes unused global entries",
1715
- "D": "Global Brain entries are automatically pruned on each `paradigm shift`",
1716
- "E": "`paradigm doctor --fix` cleans up stale global files"
1717
- },
1718
- "correct": "B",
1719
- "explanation": "The `paradigm global clean` command scans ~/.paradigm/ directories (wisdom, lore, history, cache) for files older than the specified duration. The --older-than flag accepts human-readable durations like 90d, 30d, or 7d. Use --dry-run first to preview what would be deleted. This is safer than manual deletion because it respects directory structure and cleans up empty directories afterward."
1720
- }
1721
- ]
1722
- },
1723
- {
1724
- "type": "variant-group",
1725
- "slot": "slot-070",
1726
- "course": "para-401",
1727
- "variants": [
1728
- {
1729
- "id": "plsat-070",
1730
- "scenario": "A Claude Code plugin's `hooks.json` includes:\n```json\n{\n \"compatibleVersions\": {\n \"min\": \"3.0.0\",\n \"max\": \"4.0.0\"\n }\n}\n```\nYour installed Paradigm CLI is version 3.1.6.",
1731
- "question": "What happens when you run `paradigm hooks install`?",
1732
- "choices": {
1733
- "A": "Installation fails because 3.1.6 is not exactly 3.0.0 or 4.0.0",
1734
- "B": "Installation proceeds normally — 3.1.6 is within the compatible range",
1735
- "C": "A warning is shown but installation is blocked until you upgrade",
1736
- "D": "The plugin is downgraded to match version 3.0.0",
1737
- "E": "The compatibleVersions field is ignored during installation"
1738
- },
1739
- "correct": "B",
1740
- "explanation": "The plugin version compatibility check compares the installed Paradigm version against the min/max range in hooks.json. Since 3.1.6 >= 3.0.0 and 3.1.6 < 4.0.0, installation proceeds normally. If the version were outside the range (e.g., 2.9.0 or 4.1.0), a warning would be displayed advising the user to update their Paradigm version or the plugin."
1741
- },
1742
- {
1743
- "id": "plsat-070b",
1744
- "scenario": "You're developing a Paradigm plugin and want to ensure it only works with Paradigm versions that support the habits system (introduced in v3.0).",
1745
- "question": "Where do you declare this version requirement?",
1746
- "choices": {
1747
- "A": "In the plugin's `package.json` under `peerDependencies`",
1748
- "B": "In the plugin's `hooks.json` under the `compatibleVersions` field with `min: \"3.0.0\"`",
1749
- "C": "In the plugin's `.purpose` file under `dependencies`",
1750
- "D": "In the project's `.paradigm/config.yaml` under `plugins`",
1751
- "E": "Version requirements are not enforceable — plugins work with any version"
1752
- },
1753
- "correct": "B",
1754
- "explanation": "Plugin version compatibility is declared in the plugin's hooks.json file using the compatibleVersions field. Setting min to '3.0.0' ensures that paradigm hooks install will warn users running older versions that lack habits support. This check runs at the start of hook installation before any hooks are written."
1755
- }
1756
- ]
1757
- },
1758
- {
1759
- "type": "variant-group",
1760
- "slot": "slot-071",
1761
- "course": "para-501",
1762
- "variants": [
1763
- {
1764
- "id": "plsat-071",
1765
- "scenario": "You want to record a lore entry that credits both the human developer and the AI agent that collaborated on a feature. The lore system supports co-authorship tracking.",
1766
- "question": "Which field on a LoreEntry captures AI collaboration?",
1767
- "choices": {
1768
- "A": "`author` — set to the AI agent's name",
1769
- "B": "`assistedBy` — with type ('agent', 'tool', or 'human'), id, and optional role",
1770
- "C": "`contributors` — an array of all participant names",
1771
- "D": "`metadata.aiModel` — storing the model name used",
1772
- "E": "`tags` — add an 'ai-assisted' tag"
1773
- },
1774
- "correct": "B",
1775
- "explanation": "The assistedBy field on LoreEntry provides structured co-authorship tracking. It records the type of assistant (agent, tool, or human), their identifier (e.g., 'claude-opus-4', 'copilot'), and an optional role description. The author field remains the human developer, while assistedBy captures the AI collaboration context for project history."
1776
- },
1777
- {
1778
- "id": "plsat-071b",
1779
- "scenario": "Your team reviews lore entries from the past month and wants to understand how much AI assistance was involved in recent changes.",
1780
- "question": "How does the `assistedBy` field help with this analysis?",
1781
- "choices": {
1782
- "A": "It tracks token usage per AI interaction",
1783
- "B": "It records the AI's confidence score for each change",
1784
- "C": "It provides structured data (type, id, role) showing which AI tools or agents assisted each recorded session",
1785
- "D": "It measures the percentage of code written by AI vs human",
1786
- "E": "It links to the AI conversation transcript"
1787
- },
1788
- "correct": "C",
1789
- "explanation": "The assistedBy field captures three dimensions of AI collaboration: type (was it an agent like Claude, a tool like Copilot, or a human pair-programmer?), id (which specific model or tool?), and role (what was their contribution — implementation, review, planning?). This structured data enables teams to analyze collaboration patterns across their lore timeline."
1790
- }
1791
- ]
1792
- },
1793
- {
1794
- "type": "standalone",
1795
- "slot": "slot-072",
1796
- "course": "para-501",
1797
- "variants": [
1798
- {
1799
- "id": "plsat-072",
1800
- "scenario": "A project has no `limits` section in `.paradigm/config.yaml`. An agent calls tools that rely on configurable limits — habits cache, thread trail, and ToolCache.",
1801
- "question": "What values are used when limits are not configured?",
1802
- "choices": {
1803
- "A": "All limits are set to 0 (unlimited)",
1804
- "B": "An error is thrown requiring explicit configuration",
1805
- "C": "Sensible defaults: habitsCacheTtlMs=30000, threadTrailMax=10, toolCacheTtlMs=30000, breadcrumbsMax=unlimited",
1806
- "D": "Limits are inherited from the Global Brain (~/.paradigm/) configuration",
1807
- "E": "Each tool prompts the user to set a limit on first use"
1808
- },
1809
- "correct": "C",
1810
- "explanation": "All configurable limits have sensible defaults that match the pre-configuration behavior: habits cache refreshes every 30 seconds, thread trail shows the last 10 breadcrumbs, ToolCache entries expire after 30 seconds. These defaults work well for most projects. The limits section in config.yaml is entirely optional — only override when you have a specific need."
1811
- }
1812
- ]
1813
- },
1814
- {
1815
- "type": "standalone",
1816
- "slot": "slot-073",
1817
- "course": "para-401",
1818
- "variants": [
1819
- {
1820
- "id": "plsat-073",
1821
- "scenario": "An agent working on a complex feature calls these MCP tools in sequence:\n1. `paradigm_status` (cached)\n2. `paradigm_search` for `#auth` (cached)\n3. Edits 3 files, adds a new component\n4. `paradigm_reindex`\n5. `paradigm_search` for `#auth` again",
1822
- "question": "Does step 5 return the updated results including the new component?",
1823
- "choices": {
1824
- "A": "No — the search cache still has the old results from step 2",
1825
- "B": "Yes — reindex at step 4 clears all caches, so step 5 runs a fresh search against the rebuilt index",
1826
- "C": "Only if 30 seconds have passed since step 2",
1827
- "D": "Only if the agent explicitly called toolCache.clear()",
1828
- "E": "The search always bypasses cache after a write operation"
1829
- },
1830
- "correct": "B",
1831
- "explanation": "The reindex operation at step 4 has two effects: it rebuilds the static index from .purpose files and clears the entire ToolCache. This means step 5 performs a fresh search against the newly rebuilt index, which includes the new component. This cache-invalidation-on-reindex pattern ensures that discovery tools always reflect the current state after structural changes."
1832
- }
1833
- ]
1834
- },
1835
- {
1836
- "type": "variant-group",
1837
- "slot": "slot-074",
1838
- "course": "para-501",
1839
- "variants": [
1840
- {
1841
- "id": "plsat-074",
1842
- "scenario": "An auto-lore draft is generated from session breadcrumbs after modifying 6 files. The breadcrumbs show: 4 Edit tool calls, 2 Write tool calls, 8 Read tool calls, 3 paradigm_navigate calls.",
1843
- "question": "What information does the auto-lore draft extract from these breadcrumbs?",
1844
- "choices": {
1845
- "A": "Only the file paths that were modified",
1846
- "B": "A complete diff of all code changes",
1847
- "C": "Tool usage statistics (edit count, write count, read count) plus modified files and symbols touched",
1848
- "D": "The full text of every tool call and response",
1849
- "E": "Only the symbols referenced in paradigm_navigate calls"
1850
- },
1851
- "correct": "C",
1852
- "explanation": "The auto-lore drafting function analyzes breadcrumbs to extract tool usage statistics — counting edits, writes, reads, and paradigm tool calls. It combines this with the list of modified files and symbols touched to generate a summary. The draft doesn't include full diffs or response text, keeping the lore entry concise and focused on what happened rather than how."
1853
- },
1854
- {
1855
- "id": "plsat-074b",
1856
- "scenario": "An agent completes a task that modified only 2 files. The habits system runs the on-stop check.",
1857
- "question": "Will auto-lore drafting generate an entry?",
1858
- "choices": {
1859
- "A": "Yes — any file modification triggers auto-lore",
1860
- "B": "No — auto-lore requires 3+ modified files to trigger",
1861
- "C": "Yes — but only if the session lasted longer than 15 minutes",
1862
- "D": "No — auto-lore only runs during postflight, not on-stop",
1863
- "E": "It depends on the project's limits.breadcrumbsMax setting"
1864
- },
1865
- "correct": "B",
1866
- "explanation": "Auto-lore drafting has a 3-file minimum threshold. Modifying only 2 files does not trigger a draft because such small changes are typically routine fixes that don't warrant project history entries. This threshold aligns with the lore recording decision tree: 'Did I modify 3+ source files? YES → Record lore.' The threshold prevents noise in project history."
1867
- }
1868
- ]
1869
- },
1870
- {
1871
- "type": "standalone",
1872
- "slot": "slot-075",
1873
- "course": "para-501",
1874
- "variants": [
1875
- {
1876
- "id": "plsat-075",
1877
- "scenario": "You run `paradigm global clean --older-than 30d --dry-run` and see:\n```\nWould delete 23 files from wisdom/\nWould delete 45 files from lore/\nWould delete 12 files from history/\nWould delete 0 files from cache/\n```",
1878
- "question": "What is the safest next step?",
1879
- "choices": {
1880
- "A": "Run `paradigm global clean --older-than 30d` to delete all 80 files immediately",
1881
- "B": "Review the specific files listed, then run without --dry-run if the deletions look correct",
1882
- "C": "Run `paradigm global clean --older-than 7d` to be more aggressive",
1883
- "D": "Delete the `~/.paradigm/` directory entirely since most files are old",
1884
- "E": "Skip cleanup — 80 files is too many to safely remove"
1885
- },
1886
- "correct": "B",
1887
- "explanation": "The --dry-run flag exists specifically to preview destructive operations. The safest workflow is: (1) dry-run to see what would be deleted, (2) review the file list for anything you want to keep, (3) run without --dry-run once satisfied. Global Brain files contain cross-project wisdom and lore that may be valuable — always review before bulk deletion."
1888
- }
1889
- ]
1890
- },
1891
- {
1892
- "type": "standalone",
1893
- "slot": "slot-076",
1894
- "course": "para-401",
1895
- "variants": [
1896
- {
1897
- "id": "plsat-076",
1898
- "scenario": "Your project uses both the ToolCache (for MCP tool results) and the habits cache (for habit definitions). Both have configurable TTLs.",
1899
- "question": "Why are these two separate caches rather than one unified cache?",
1900
- "choices": {
1901
- "A": "Historical accident — they were built by different teams",
1902
- "B": "They cache different data types with different invalidation needs: tool results change on reindex, habit definitions change on file edits",
1903
- "C": "Performance — two smaller caches are faster than one large cache",
1904
- "D": "Security — MCP tool results must be isolated from habit definitions",
1905
- "E": "They are the same cache with different configuration keys"
1906
- },
1907
- "correct": "B",
1908
- "explanation": "The ToolCache caches MCP tool results (search, navigate, status) and is invalidated on reindex when the underlying index changes. The habits cache stores parsed habit definitions from habits.yaml and is invalidated when the file's modification time changes. These fundamentally different invalidation strategies require separate cache implementations — flushing all habit definitions because a .purpose file changed would be wasteful, and vice versa."
1909
- }
1910
- ]
1911
- },
1912
- {
1913
- "type": "standalone",
1914
- "slot": "slot-077",
1915
- "course": "para-501",
1916
- "variants": [
1917
- {
1918
- "id": "plsat-077",
1919
- "scenario": "You're configuring a large monorepo with 500+ symbols. Sessions often span 30+ minutes with many breadcrumbs. You want to optimize the Paradigm configuration.",
1920
- "question": "Which limits configuration would be most appropriate?",
1921
- "choices": {
1922
- "A": "Set all limits to maximum values for the largest possible buffers",
1923
- "B": "Increase threadTrailMax to 25 and toolCacheTtlMs to 60000 for the larger codebase, keep other defaults",
1924
- "C": "Decrease all TTLs to 5000ms to ensure data is always fresh",
1925
- "D": "Remove the limits section entirely and rely on defaults",
1926
- "E": "Set breadcrumbsMax to 10 to save memory"
1927
- },
1928
- "correct": "B",
1929
- "explanation": "For large monorepos, increasing threadTrailMax (from default 10 to 25) provides more session context for complex tasks, and increasing toolCacheTtlMs (from 30s to 60s) reduces redundant index scans across the larger symbol space. Other defaults work well regardless of project size. Setting TTLs too low causes excessive recomputation, while setting breadcrumbsMax too low loses valuable session context."
1930
- }
1931
- ]
1932
- },
1933
- {
1934
- "type": "standalone",
1935
- "slot": "slot-078",
1936
- "course": "para-501",
1937
- "variants": [
1938
- {
1939
- "id": "plsat-078",
1940
- "scenario": "Your project has aspects categorized as rules, decisions, constraints, configurations, and invariants. A new aspect states: 'API response payloads must not exceed 5MB.' A developer is unsure which category to assign.",
1941
- "question": "Which aspect category is correct for this aspect?",
1942
- "choices": {
1943
- "A": "`rule` \u2014 because it uses 'must not', indicating a mandatory pattern",
1944
- "B": "`constraint` \u2014 because it defines a quantitative limit (5MB) on system behavior",
1945
- "C": "`configuration` \u2014 because the 5MB value could be changed per environment",
1946
- "D": "`invariant` \u2014 because it must always hold true",
1947
- "E": "`decision` \u2014 because someone decided 5MB was the right limit"
1948
- },
1949
- "correct": "B",
1950
- "explanation": "The key indicator is the quantitative limit: '5MB'. Constraints define measurable boundaries on system behavior \u2014 file sizes, rate limits, timeouts, quotas. While 'must not exceed' sounds like a rule, the category inference system prioritizes 'limit', 'maximum', 'cannot exceed' keywords for `constraint`. A rule would be a pattern without a specific numeric boundary (e.g., 'all responses must include request IDs'). Configuration would apply if the value explicitly varies by environment."
1951
- },
1952
- {
1953
- "id": "plsat-078b",
1954
- "scenario": "An aspect definition reads: 'The team decided to use PostgreSQL over MongoDB for the user service due to relational query requirements.' No category field is explicitly set.",
1955
- "question": "What category will Paradigm's category inference assign?",
1956
- "choices": {
1957
- "A": "`rule` \u2014 because it implies PostgreSQL must be used",
1958
- "B": "`constraint` \u2014 because it limits the database technology",
1959
- "C": "`decision` \u2014 because the description contains 'decided' and 'chose'",
1960
- "D": "`configuration` \u2014 because the database choice is a deployment setting",
1961
- "E": "`invariant` \u2014 because the database choice should never change"
1962
- },
1963
- "correct": "C",
1964
- "explanation": "Category inference uses keyword matching on the description. Words like 'decided', 'chosen', 'selected', 'opted' trigger the `decision` category. The description explicitly says 'The team decided to use PostgreSQL over MongoDB' \u2014 this is a textbook architectural decision with rationale."
1965
- }
1966
- ]
1967
- },
1968
- {
1969
- "type": "standalone",
1970
- "slot": "slot-079",
1971
- "course": "para-501",
1972
- "variants": [
1973
- {
1974
- "id": "plsat-079",
1975
- "scenario": "Your aspect graph has the following edges:\n- `~token-expiry-24h` --depends-on--> `~jwt-signing-rs256`\n- `~jwt-signing-rs256` --enforced-by--> `#auth-middleware`\n- `~cache-aggressively` --contradicts--> `~always-fresh-data`\n- `~rate-limit-v2` --supersedes--> `~rate-limit-v1`\n\nYou need to modify `~jwt-signing-rs256` to change the signing algorithm.",
1976
- "question": "Which aspect will paradigm_ripple surface as impacted through the 'depends-on' edge?",
1977
- "choices": {
1978
- "A": "`~cache-aggressively` \u2014 because it has a contradicts edge in the same graph",
1979
- "B": "`~token-expiry-24h` \u2014 because it depends-on the aspect being modified",
1980
- "C": "`~rate-limit-v2` \u2014 because it supersedes another aspect",
1981
- "D": "`#auth-middleware` \u2014 because it enforces the aspect",
1982
- "E": "All four aspects \u2014 ripple follows all edge types equally"
1983
- },
1984
- "correct": "B",
1985
- "explanation": "Ripple follows dependency edges to discover indirect impacts. `~token-expiry-24h` has a `depends-on` edge to `~jwt-signing-rs256`, meaning changes to the signing algorithm may affect token expiry behavior. `#auth-middleware` has an `enforced-by` edge (reverse direction \u2014 it enforces the aspect, but the aspect doesn't depend on it for correctness). The contradicts and supersedes edges involve unrelated aspects."
1986
- }
1987
- ]
1988
- },
1989
- {
1990
- "type": "standalone",
1991
- "slot": "slot-080",
1992
- "course": "para-501",
1993
- "variants": [
1994
- {
1995
- "id": "plsat-080",
1996
- "scenario": "You run `paradigm_aspect_search({ query: 'jwt expiry' })` and get three results:\n- Tier 1 (learned): `~token-expiry-24h` (weight: 3.0)\n- Tier 2 (FTS5): `~session-timeout-30m` (BM25: 0.7)\n- Tier 3 (fuzzy): `~jwt-refresh-rotation` (distance: 2)\n\nThe Tier 1 result is exactly what you need.",
1997
- "question": "What should you do to reinforce this search mapping?",
1998
- "choices": {
1999
- "A": "Nothing \u2014 Tier 1 results are already reinforced by being in the search_weights table",
2000
- "B": "Call `paradigm_aspect_confirm({ query: 'jwt expiry', aspectId: 'token-expiry-24h' })` to add +1.0 weight and decay the others",
2001
- "C": "Manually update the search_weights SQLite table to increase the weight",
2002
- "D": "Call `paradigm_aspect_get({ aspectId: 'token-expiry-24h' })` to register a direct access",
2003
- "E": "Call `paradigm_reindex` to rebuild the learned mappings"
2004
- },
2005
- "correct": "B",
2006
- "explanation": "paradigm_aspect_confirm is the feedback mechanism for the learning system. Calling it with the query and selected aspect ID adds +1.0 to the confirmed result's weight (3.0 \u2192 4.0) and decays all other results for that query by *0.95. This reinforces the correct mapping. Reindex rebuilds the graph but does not affect search_weights \u2014 those persist across reindexes. Direct access via aspect_get records a heatmap entry but does not affect search learning."
2007
- }
2008
- ]
2009
- },
2010
- {
2011
- "type": "standalone",
2012
- "slot": "slot-081",
2013
- "course": "para-501",
2014
- "variants": [
2015
- {
2016
- "id": "plsat-081",
2017
- "scenario": "The aspect graph materialization pipeline runs during `paradigm_reindex`. It processes aspects from .purpose files through a specific sequence of steps.",
2018
- "question": "What is the correct order of the five-step materialization pipeline?",
2019
- "choices": {
2020
- "A": "materialize aspects \u2192 open graph \u2192 materialize lore links \u2192 infer lore edges \u2192 close graph",
2021
- "B": "open graph \u2192 materialize aspects \u2192 materialize lore links \u2192 infer lore edges \u2192 close graph",
2022
- "C": "open graph \u2192 infer lore edges \u2192 materialize aspects \u2192 materialize lore links \u2192 close graph",
2023
- "D": "materialize aspects \u2192 materialize lore links \u2192 open graph \u2192 infer lore edges \u2192 close graph",
2024
- "E": "open graph \u2192 materialize lore links \u2192 materialize aspects \u2192 close graph \u2192 infer lore edges"
2025
- },
2026
- "correct": "B",
2027
- "explanation": "The materialization pipeline follows a strict order: (1) openAspectGraph opens or creates the SQLite database and clears all tables. (2) materializeAspects reads .purpose files and writes aspects, anchors, and explicit/inferred edges. (3) materializeLoreLinks creates entries connecting aspects to their referenced lore entries. (4) inferLoreEdges scans for shared lore references between aspects and creates learned edges. (5) closeAspectGraph commits changes, runs ANALYZE, and closes the connection."
2028
- }
2029
- ]
2030
- },
2031
- {
2032
- "type": "standalone",
2033
- "slot": "slot-082",
2034
- "course": "para-501",
2035
- "variants": [
2036
- {
2037
- "id": "plsat-082",
2038
- "scenario": "You define an aspect with an `applies-to` reference to a component:\n\n```yaml\n~audit-required:\n description: Financial operations must produce audit logs\n applies-to: [\"#payment-service\"]\n edges:\n - target: \"#audit-middleware\"\n relation: enforced-by\n```",
2039
- "question": "What edges will the materialization pipeline create, and what are their origins and weights?",
2040
- "choices": {
2041
- "A": "One edge: `enforced-by` to `#audit-middleware` with origin `explicit` and weight 1.0",
2042
- "B": "Two edges: `enforced-by` to `#audit-middleware` (origin: explicit, weight: 1.0) and an inferred edge to `#payment-service` (origin: inferred, weight: 0.5)",
2043
- "C": "Two edges: both with origin `explicit` and weight 1.0",
2044
- "D": "Three edges: one explicit, one inferred, and one learned",
2045
- "E": "One edge: `applies-to` is documentation only and does not generate edges"
2046
- },
2047
- "correct": "B",
2048
- "explanation": "The materialization pipeline creates edges from two sources. The explicit `edges` field generates an edge to `#audit-middleware` with origin `explicit` and weight 1.0. The `applies-to` reference generates an inferred edge to `#payment-service` with origin `inferred` and weight 0.5. Inferred edges have lower weight because they represent a weaker relationship than explicitly declared edges."
2049
- }
2050
- ]
2051
- },
2052
- {
2053
- "type": "standalone",
2054
- "slot": "slot-083",
2055
- "course": "para-501",
2056
- "variants": [
2057
- {
2058
- "id": "plsat-083",
2059
- "scenario": "An aspect `~session-timeout-30m` was created 3 months ago with an anchor at `src/middleware/session.ts:15-25`. Since then, a developer refactored the file and the session timeout logic is now at lines 40-55. The aspect definition was not updated.\n\nYou run `paradigm_aspect_drift({ aspectId: 'session-timeout-30m' })`.",
2060
- "question": "What will the drift detection report?",
2061
- "choices": {
2062
- "A": "No drift \u2014 the file still exists, so the anchor is valid",
2063
- "B": "Drift detected: the SHA-256 content hash of lines 15-25 no longer matches the stored hash, indicating the code at the anchored location has changed",
2064
- "C": "An error \u2014 the anchor line range exceeds the current file length",
2065
- "D": "Partial drift \u2014 only some lines within the range changed",
2066
- "E": "No drift \u2014 drift detection only checks whether the file exists, not its contents"
2067
- },
2068
- "correct": "B",
2069
- "explanation": "Drift detection computes a SHA-256 hash of the current code at the anchored line range (15-25) and compares it to the hash stored during materialization. Since the timeout logic moved to different lines, the code at lines 15-25 is now different \u2014 the hashes will not match, and drift is reported. The fix is to update the anchor to `src/middleware/session.ts:40-55` to point to the new location of the timeout logic."
2070
- }
2071
- ]
2072
- },
2073
- {
2074
- "type": "standalone",
2075
- "slot": "slot-084",
2076
- "course": "para-501",
2077
- "variants": [
2078
- {
2079
- "id": "plsat-084",
2080
- "scenario": "You run `paradigm_aspect_suggest_scan({ filePath: 'src/auth/jwt.ts' })` on a file containing:\n\n```typescript\nconst TOKEN_EXPIRY = 86400; // 24 hours in seconds\nconst MAX_REFRESH_ATTEMPTS = 3;\nif (process.env.NODE_ENV === 'production') { ... }\nconst EMAIL_REGEX = /^[a-zA-Z0-9+_.-]+@[a-zA-Z0-9.-]+$/;\n```",
2081
- "question": "Which of the 8 built-in detectors will fire for each pattern?",
2082
- "choices": {
2083
- "A": "All four lines trigger the 'magic numbers' detector only",
2084
- "B": "TOKEN_EXPIRY: time values; MAX_REFRESH_ATTEMPTS: magic numbers; process.env: environment checks; EMAIL_REGEX: regex patterns",
2085
- "C": "TOKEN_EXPIRY: magic numbers; MAX_REFRESH_ATTEMPTS: rate limits; process.env: feature flags; EMAIL_REGEX: hardcoded strings",
2086
- "D": "All four lines trigger the 'hardcoded strings' detector",
2087
- "E": "TOKEN_EXPIRY: configuration; MAX_REFRESH_ATTEMPTS: constraint; process.env: environment checks; EMAIL_REGEX: assertion guards"
2088
- },
2089
- "correct": "B",
2090
- "explanation": "Each pattern matches a specific detector: (1) 86400 with a comment mentioning '24 hours' matches the time values detector (durations, timeouts, TTLs, expiry). (2) MAX_REFRESH_ATTEMPTS = 3 is a numeric literal that is not 0 or 1, matching the magic numbers detector. (3) process.env.NODE_ENV matches the environment checks detector. (4) The regular expression literal matches the regex patterns detector. The detectors are specialized for these exact pattern types."
2091
- }
2092
- ]
2093
- },
2094
- {
2095
- "type": "standalone",
2096
- "slot": "slot-085",
2097
- "course": "para-501",
2098
- "variants": [
2099
- {
2100
- "id": "plsat-085",
2101
- "scenario": "Two aspects in your project both reference lore entry `L-2026-01-15-003`:\n- `~token-expiry-24h` has `lore: [L-2026-01-15-003]`\n- `~refresh-token-rotation` has `lore: [L-2026-01-15-003]`\n\nNeither aspect has an explicit edge to the other.",
2102
- "question": "What happens during the `inferLoreEdges` step of materialization?",
2103
- "choices": {
2104
- "A": "Nothing \u2014 edges are only created from explicit YAML definitions",
2105
- "B": "A learned edge is created between the two aspects with origin 'learned' and weight proportional to shared lore references",
2106
- "C": "Both aspects are merged into a single aspect",
2107
- "D": "A lore_links entry is created but no edge is generated",
2108
- "E": "An explicit edge with weight 1.0 is created between them"
2109
- },
2110
- "correct": "B",
2111
- "explanation": "The inferLoreEdges step scans the lore_links table for aspects that share lore references. When two aspects both reference the same lore entry, a learned edge is created between them with origin 'learned' and a weight proportional to the number of shared references. This discovers implicit relationships \u2014 aspects that were discussed in the same lore context are likely related even without explicit edges."
2112
- }
2113
- ]
2114
- },
2115
- {
2116
- "type": "standalone",
2117
- "slot": "slot-086",
2118
- "course": "para-501",
2119
- "variants": [
2120
- {
2121
- "id": "plsat-086",
2122
- "scenario": "Your project has three edge origins in the aspect graph:\n- Explicit edges (weight 1.0) from YAML `edges` fields\n- Inferred edges (weight 0.5) from `applies-to` references\n- Learned edges from shared lore references\n\nDuring recursive ripple, the BFS traverses: an explicit edge (1.0) then an inferred edge (0.5) then another inferred edge (0.5).",
2123
- "question": "What is the cumulative path weight after these three hops, and will it be pruned by the default minWeight threshold?",
2124
- "choices": {
2125
- "A": "Weight: 2.0 (additive) \u2014 well above the 0.1 threshold, not pruned",
2126
- "B": "Weight: 0.25 (multiplicative: 1.0 * 0.5 * 0.5) \u2014 above 0.1, not pruned",
2127
- "C": "Weight: 0.5 (only the weakest edge counts) \u2014 above 0.1, not pruned",
2128
- "D": "Weight: 0.0625 (multiplicative: 1.0 * 0.25 * 0.25) \u2014 below 0.1, pruned",
2129
- "E": "Weight: 0.167 (average of all three) \u2014 above 0.1, not pruned"
2130
- },
2131
- "correct": "B",
2132
- "explanation": "Recursive ripple uses multiplicative decay: the weight at each hop is multiplied by the edge weight. Starting at 1.0, after an explicit edge (1.0): 1.0 * 1.0 = 1.0. After an inferred edge (0.5): 1.0 * 0.5 = 0.5. After another inferred edge (0.5): 0.5 * 0.5 = 0.25. The cumulative weight 0.25 is above the default minWeight threshold of 0.1, so this path is NOT pruned. One more inferred edge would drop it to 0.125, still above threshold. Two more would reach 0.0625, below threshold and pruned."
2133
- }
2134
- ]
2135
- },
2136
- {
2137
- "type": "standalone",
2138
- "slot": "slot-087",
2139
- "course": "para-501",
2140
- "variants": [
2141
- {
2142
- "id": "plsat-087",
2143
- "scenario": "Your project's aspect graph SQLite database at `.paradigm/aspect-graph.db` has six tables. During a governance review, you want to understand which aspects are discovered most frequently and how they are typically found.",
2144
- "question": "Which table stores this information, and what are its columns?",
2145
- "choices": {
2146
- "A": "The `aspects` table with an `access_count` column",
2147
- "B": "The `edges` table with a `traversal_count` column",
2148
- "C": "The `heatmap` table with columns: aspect_id, access_type, count, and last_accessed",
2149
- "D": "The `search_weights` table with a `hit_count` column",
2150
- "E": "The `anchors` table with a `reference_count` column"
2151
- },
2152
- "correct": "C",
2153
- "explanation": "The `heatmap` table tracks aspect access patterns with four columns: `aspect_id` (which aspect), `access_type` (how it was discovered: search, ripple, navigate, or direct), `count` (frequency), and `last_accessed` (timestamp). This table powers `paradigm_aspect_heatmap` and reveals whether aspects are typically found via search, encountered during ripple analysis, discovered through navigation, or accessed by direct ID lookup."
2154
- }
2155
- ]
2156
- },
2157
- {
2158
- "type": "standalone",
2159
- "slot": "slot-088",
2160
- "course": "para-501",
2161
- "variants": [
2162
- {
2163
- "id": "plsat-088",
2164
- "scenario": "You want to extend `paradigm_aspect_suggest_scan` to detect SOC2 compliance annotations specific to your project. The built-in 8 detectors do not cover this pattern.",
2165
- "question": "How do you add a custom detector?",
2166
- "choices": {
2167
- "A": "Edit the Paradigm source code to add a 9th built-in detector",
2168
- "B": "Define a custom detector in `.paradigm/aspect-detectors.yaml` with regex patterns, language filters, and suggested category/severity",
2169
- "C": "Create a `.paradigm/plugins/soc2-detector.js` plugin file",
2170
- "D": "Add a `detectors` section to `.paradigm/config.yaml`",
2171
- "E": "Custom detectors are not supported \u2014 use paradigm_aspect_search instead"
2172
- },
2173
- "correct": "B",
2174
- "explanation": "Custom detectors are defined in `.paradigm/aspect-detectors.yaml`. Each detector specifies an id, name, description, regex patterns with language filters, and suggestions for category, severity, and tags. Custom detectors are loaded alongside the built-in 8 during `paradigm_aspect_suggest_scan`, extending the detection system without modifying Paradigm's source code."
2175
- }
2176
- ]
2177
- },
2178
- {
2179
- "type": "standalone",
2180
- "slot": "slot-089",
2181
- "course": "para-501",
2182
- "variants": [
2183
- {
2184
- "id": "plsat-089",
2185
- "scenario": "During a quarterly governance review of a project with 150 aspects, the heatmap shows 40 aspects with zero access. The drift audit reveals 12 drifted anchors. The category distribution is: 95 rules, 20 constraints, 15 configurations, 12 decisions, 8 invariants.",
2186
- "question": "What does the category distribution suggest about this project's aspect governance?",
2187
- "choices": {
2188
- "A": "The distribution is healthy \u2014 rules should always be the majority",
2189
- "B": "The project may be over-documenting constraints as rules, and under-documenting strategic decisions \u2014 review whether some 'rules' are actually constraints or decisions",
2190
- "C": "The project needs more invariants to balance the distribution",
2191
- "D": "Configuration aspects should equal rules in a well-governed project",
2192
- "E": "The 40 zero-access aspects indicate the project should reduce to 110 aspects"
2193
- },
2194
- "correct": "B",
2195
- "explanation": "A 63% concentration in the `rule` category (95 out of 150) suggests over-classification. Many numeric limits (which should be constraints) and architectural choices (which should be decisions) may be categorized as rules. The low decision count (12) is a red flag \u2014 a project with 150 aspects likely made more than 12 strategic decisions. The governance review should reclassify mistyped aspects and document missing decisions. Zero-access aspects (40) are a separate concern requiring individual evaluation."
2196
- }
2197
- ]
2198
- },
2199
- {
2200
- "type": "standalone",
2201
- "slot": "slot-090",
2202
- "course": "para-501",
2203
- "variants": [
2204
- {
2205
- "id": "plsat-090",
2206
- "scenario": "Your team uses Paradigm task management to track work across context windows. An agent creates three tasks on 2026-03-10: a high-priority auth bug, a medium-priority docs update, and a low-priority refactor. Later that day, a fourth task is created. The next morning (2026-03-11), a new agent session starts and calls `paradigm_session_recover`.",
2207
- "question": "What task ID is assigned to the fourth task on 2026-03-10, and how are tasks surfaced in the new session?",
2208
- "choices": {
2209
- "A": "T-2026-03-10-004. All four tasks are fully displayed in the recovery payload.",
2210
- "B": "T-2026-03-10-004. The top 5 open tasks sorted by priority are surfaced in the session recovery, so all four appear (high first, then medium, then low).",
2211
- "C": "T-004. Only tasks explicitly pinned to the session are recovered.",
2212
- "D": "T-2026-03-11-001. Task IDs reset per calendar day, so recovery re-numbers them.",
2213
- "E": "T-2026-03-10-004. Tasks are not surfaced automatically \u2014 the agent must call `paradigm_task_list` to see them."
2214
- },
2215
- "correct": "B",
2216
- "explanation": "Task IDs follow the format T-YYYY-MM-DD-NNN with per-date sequential numbering, so the fourth task on 2026-03-10 is T-2026-03-10-004. Tasks are designed to survive context windows \u2014 on session recovery, the top 5 open tasks (sorted by priority: high > medium > low) are automatically surfaced. Since there are only four open tasks, all four appear. This ensures continuity without requiring the agent to manually query task state."
2217
- }
2218
- ]
2219
- },
2220
- {
2221
- "type": "standalone",
2222
- "slot": "slot-091",
2223
- "course": "para-501",
2224
- "variants": [
2225
- {
2226
- "id": "plsat-091",
2227
- "scenario": "An agent finishes a debugging session and wants to record the root cause and resolution as a lasting insight, grouped under the `arc:auth-hardening` arc. The agent calls `paradigm_lore_record` with `type: \"insight\"`, `tags: [\"arc:auth-hardening\", \"assessment:insight\"]`, a summary, and a body with the detailed analysis. The author is `ascend` and the timestamp is `2026-04-02T16:30:00Z`.",
2228
- "question": "Where is the entry stored, and how does the arc grouping work?",
2229
- "choices": {
2230
- "A": "Stored in `.paradigm/assessments/arcs/arc-auth-hardening/entries/` \u2014 arcs have their own storage directories.",
2231
- "B": "Stored in `.paradigm/lore/entries/2026-04-02/` as a `.lore` file \u2014 arcs are just tag prefixes, not separate storage.",
2232
- "C": "Stored in `.paradigm/lore/arcs/auth-hardening/` \u2014 arc entries get their own subdirectory.",
2233
- "D": "Stored in `.paradigm/lore/entries/` root directory \u2014 no date partitioning for arc entries.",
2234
- "E": "Stored in both `.paradigm/lore/` and `.paradigm/assessments/` \u2014 the system maintains backward compatibility."
2235
- },
2236
- "correct": "B",
2237
- "explanation": "All lore entries are stored in `.paradigm/lore/entries/{date}/` as `.lore` files, regardless of their tags. Arcs are simply tag prefixes (e.g., `arc:auth-hardening`) \u2014 they require no separate directory structure or management. To find all entries in an arc, use `paradigm_lore_search` with `tag: \"arc:auth-hardening\"`. This unified storage eliminates the complexity of a separate assessment system while preserving full arc-based organization through tags."
2238
- }
2239
- ]
2240
- },
2241
- {
2242
- "type": "standalone",
2243
- "slot": "slot-092",
2244
- "course": "para-501",
2245
- "variants": [
2246
- {
2247
- "id": "plsat-092",
2248
- "scenario": "A developer wants to record retrospectives about a failed deployment. They have no prior entries tagged with `arc:platform-stability`. The agent calls `paradigm_lore_record` with `type: \"retro\"`, `tags: [\"arc:platform-stability\", \"assessment:retro\"]`, a title, summary, and body describing the failure and lessons learned.",
2249
- "question": "What happens when you use a new arc tag that no prior entries have?",
2250
- "choices": {
2251
- "A": "The call fails \u2014 arcs must be explicitly created before tagging entries.",
2252
- "B": "The entry is recorded normally \u2014 arcs are just tag prefixes, no creation step needed. The arc exists as soon as an entry has the tag.",
2253
- "C": "The system auto-creates a `.paradigm/lore/arcs/platform-stability/` directory to track the arc.",
2254
- "D": "The entry is recorded but flagged as orphaned until an arc is formally registered.",
2255
- "E": "The tag is rejected because it does not match an existing arc in the arc registry."
2256
- },
2257
- "correct": "B",
2258
- "explanation": "Arcs in the unified lore system are simply tag prefixes \u2014 no explicit creation needed. The first entry tagged with `arc:platform-stability` effectively creates that arc. To find all entries in this arc later, use `paradigm_lore_search` with `tag: \"arc:platform-stability\"`. To close the arc, add `arc-closed` and `arc-status:complete` tags to its entries. This tag-based approach eliminates the overhead of managing separate arc directories and YAML files."
2259
- }
2260
- ]
2261
- },
2262
- {
2263
- "type": "standalone",
2264
- "slot": "slot-093",
2265
- "course": "para-501",
2266
- "variants": [
2267
- {
2268
- "id": "plsat-093",
2269
- "scenario": "Your project has been running for six months. The codebase has 200+ commits in git and 57 lore entries: 45 are `agent-session` type (automatic session records), 8 have `arc:*` tags (retrospectives and insights grouped into thematic arcs), and 4 are `decision` type (architectural decisions). A new team member asks how lore's different entry types and tags work together.",
2270
- "question": "Which statement BEST describes Paradigm's unified lore model?",
2271
- "choices": {
2272
- "A": "Lore entries are all the same \u2014 tags are purely cosmetic and do not affect searching or organization.",
2273
- "B": "Lore is the single project memory system. Entry types classify the nature of knowledge (session, retro, insight, decision), while tags like `arc:*` group related entries into themes \u2014 both are filterable via `paradigm_lore_search`.",
2274
- "C": "Session entries and reflection entries are stored in separate directories, with tags used only for cross-referencing between them.",
2275
- "D": "The `arc:*` tags are managed by a separate arc subsystem that must be initialized before use.",
2276
- "E": "Entry types are deprecated \u2014 tags alone drive all classification in the new model."
2277
- },
2278
- "correct": "B",
2279
- "explanation": "Paradigm's unified lore model uses one system with two classification axes: entry `type` classifies the nature of the knowledge (agent-session for automated records, retro for retrospectives, insight for patterns, decision for choices, etc.), while tags provide flexible grouping (arc:* for thematic arcs, assessment:* for reflection type, plus arbitrary project tags). Both are searchable via `paradigm_lore_search` \u2014 you can filter by type, tag prefix, symbol, author, and date range. All entries live in the same `.paradigm/lore/entries/` directory structure regardless of type or tags."
2280
- }
2281
- ]
2282
- },
2283
- {
2284
- "type": "standalone",
2285
- "slot": "slot-094",
2286
- "course": "para-301",
2287
- "variants": [
2288
- {
2289
- "id": "plsat-094",
2290
- "scenario": "You receive a task: \"Add a new Settings page to the application.\" The project has a `.paradigm/protocols/` directory with several `.protocol` files. You know that previous agents added similar pages (Logs, Events, Dashboard) following the same pattern each time.",
2291
- "question": "What should you do FIRST before exploring the codebase?",
2292
- "choices": {
2293
- "A": "Read every existing page component to understand the pattern",
2294
- "B": "Call `paradigm_protocol_search` with your task description to check for a matching protocol",
2295
- "C": "Call `paradigm_lore_record` to document that you are starting the task",
2296
- "D": "Call `paradigm_protocol_record` to create a new protocol for the Settings page",
2297
- "E": "Read the `.paradigm/protocols/index.yaml` file directly to scan for relevant entries"
2298
- },
2299
- "correct": "B",
2300
- "explanation": "paradigm_protocol_search is the agent's first stop before exploring the codebase. It takes a natural language task description and returns matching protocols with steps, exemplar files, and freshness info — typically saving thousands of exploration tokens. Reading existing pages (A) is exactly the expensive exploration that protocols prevent. Recording lore (C) is done after work, not before. Recording a protocol (D) is done after completing repeatable work. Reading index.yaml directly (E) bypasses the fuzzy search that matches task descriptions to protocols."
2301
- },
2302
- {
2303
- "id": "plsat-094b",
2304
- "scenario": "An agent needs to add a new API endpoint to the project. The project has 15 recorded protocols covering common tasks. The agent calls `paradigm_protocol_search({ task: \"add a new API endpoint\" })` and gets back a protocol with 4 steps: create route file, add handler, register route, verify with build.",
2305
- "question": "What should the agent do next?",
2306
- "choices": {
2307
- "A": "Ignore the protocol and explore the codebase to find its own approach",
2308
- "B": "Call `paradigm_protocol_get` with the protocol ID to get full details, then follow the steps using the exemplar as reference",
2309
- "C": "Call `paradigm_protocol_record` to save the protocol it just found",
2310
- "D": "Run `paradigm_reindex` to make sure the protocol is up to date",
2311
- "E": "Call `paradigm_protocol_validate` to check all protocols before proceeding"
2312
- },
2313
- "correct": "B",
2314
- "explanation": "After finding a matching protocol via search, the next step is paradigm_protocol_get to retrieve full details (including the exemplar file path and detailed step notes), then follow the steps. The exemplar is the canonical file to study for the pattern. Ignoring the protocol (A) wastes the lookup. Recording (C) is for new protocols. Reindex (D) and full validation (E) are maintenance operations, not implementation steps."
2315
- }
2316
- ]
2317
- },
2318
- {
2319
- "type": "standalone",
2320
- "slot": "slot-095",
2321
- "course": "para-301",
2322
- "variants": [
2323
- {
2324
- "id": "plsat-095",
2325
- "scenario": "An agent just finished adding a new Sentinel event schema — the third such schema added to the project this month. Each time, the agent followed the same steps: create a schema file, register it in the schema index, add a migration, and verify. No protocol exists for this task yet.",
2326
- "question": "What should the agent do regarding protocols?",
2327
- "choices": {
2328
- "A": "Nothing — protocols are only created by project maintainers, not agents",
2329
- "B": "Call `paradigm_protocol_record` to capture the repeatable pattern it just followed",
2330
- "C": "Edit an existing protocol to add the schema steps as a sub-procedure",
2331
- "D": "Wait for `paradigm_reindex` to auto-generate a protocol from git history",
2332
- "E": "File an issue asking a human to write the protocol later"
2333
- },
2334
- "correct": "B",
2335
- "explanation": "Protocols are captured AFTER completing work, by the agent that did the work. When an agent completes a repeatable task and no protocol existed, it should call paradigm_protocol_record with the steps it followed, trigger phrases, tags, and an exemplar file. This ensures the next agent that receives a similar task can skip exploration entirely. Reindex (D) validates existing protocols but does not auto-generate new ones from git history."
2336
- },
2337
- {
2338
- "id": "plsat-095b",
2339
- "scenario": "After recording lore for a session where the agent created two new view components following the existing LogsView pattern, the `paradigm_lore_record` response includes a `protocol_suggestion` field with a draft protocol.",
2340
- "question": "What triggered this protocol suggestion?",
2341
- "choices": {
2342
- "A": "The agent explicitly asked for protocol suggestions in the lore_record call",
2343
- "B": "The lore system detected that the session created new files following existing patterns in the same directory",
2344
- "C": "All lore entries automatically include protocol suggestions",
2345
- "D": "The protocol suggestion was cached from a previous session",
2346
- "E": "The lore system runs paradigm_protocol_search on every lore entry"
2347
- },
2348
- "correct": "B",
2349
- "explanation": "When paradigm_lore_record is called, it runs a detection heuristic: if the session created 2+ new files in a directory that already has similar files, or modified the same 'registration' files that existing protocols touch, it includes a protocol_suggestion in the response. This nudges agents to capture repeatable patterns without manual intervention. Not all lore entries trigger suggestions (C) — only those with detectable repeatable patterns."
2350
- }
2351
- ]
2352
- },
2353
- {
2354
- "type": "standalone",
2355
- "slot": "slot-096",
2356
- "course": "para-301",
2357
- "variants": [
2358
- {
2359
- "id": "plsat-096",
2360
- "scenario": "During `paradigm_reindex`, the system validates all protocols. Protocol P-add-view references `ui/src/views/LogsView.tsx` as its exemplar. The file still exists but was significantly refactored two weeks after the protocol was last verified.",
2361
- "question": "What status will the reindex assign to this protocol?",
2362
- "choices": {
2363
- "A": "`current` — the file still exists, so the protocol is valid",
2364
- "B": "`stale` — the exemplar has been modified since the protocol was last verified",
2365
- "C": "`broken` — any change to a referenced file invalidates the protocol",
2366
- "D": "`deprecated` — protocols older than 30 days are automatically deprecated",
2367
- "E": "`unknown` — reindex cannot determine status without running the protocol"
2368
- },
2369
- "correct": "B",
2370
- "explanation": "During reindex validation, a protocol is marked 'stale' when its exemplar or referenced files have been modified since last_verified. The file still exists (so it is not 'broken'), but the protocol's steps might no longer match the current code pattern. A 'broken' status (C) is reserved for when referenced files are missing entirely. Stale protocols still work but should be reviewed and refreshed after successful use via paradigm_protocol_update with refresh: true."
2371
- },
2372
- {
2373
- "id": "plsat-096b",
2374
- "scenario": "An agent calls `paradigm_protocol_validate({ id: \"P-add-api-route\" })`. The protocol's step 2 references the file `src/routes/index.ts`, but that file was deleted during a recent refactoring.",
2375
- "question": "What status will the validation assign?",
2376
- "choices": {
2377
- "A": "`stale` — a referenced file has changed",
2378
- "B": "`current` — the protocol itself is still syntactically valid",
2379
- "C": "`broken` — a referenced file no longer exists",
2380
- "D": "`warning` — the file might be temporarily missing",
2381
- "E": "`archived` — protocols with missing references are auto-archived"
2382
- },
2383
- "correct": "C",
2384
- "explanation": "A 'broken' status means one or more files referenced by the protocol (targets, exemplars, or template_from) no longer exist. This is more severe than 'stale' (where files exist but have been modified). A broken protocol cannot be reliably followed until its references are updated to point to existing files via paradigm_protocol_update."
2385
- }
2386
- ]
2387
- },
2388
- {
2389
- "type": "standalone",
2390
- "slot": "slot-097",
2391
- "course": "para-201",
2392
- "variants": [
2393
- {
2394
- "id": "plsat-097",
2395
- "scenario": "You run `paradigm init` on an existing Next.js project. The output shows `discipline: fullstack` and `stack: nextjs`. Your colleague asks what the difference is between a discipline and a stack preset.",
2396
- "question": "Which statement BEST describes the relationship between disciplines and stack presets?",
2397
- "choices": {
2398
- "A": "They are the same thing — 'discipline' is the old name and 'stack preset' is the new name",
2399
- "B": "Disciplines define domain-level symbol mappings (web, backend, mobile), while stack presets layer framework-specific configuration (Next.js, FastAPI, SwiftUI) on top of the discipline",
2400
- "C": "Stack presets replace disciplines entirely — once a preset is detected, the discipline is ignored",
2401
- "D": "Disciplines are for code organization and stack presets are for deployment configuration",
2402
- "E": "Stack presets are only used for auto-scan patterns, while disciplines control everything else"
2403
- },
2404
- "correct": "B",
2405
- "explanation": "Disciplines and stack presets are a two-layer system. The discipline (e.g., 'fullstack') defines broad symbol mappings for the development domain. The stack preset (e.g., 'nextjs') adds framework-specific refinements: scan hints for Next.js patterns like app/ routes and server components, purpose-required paths, and additional symbol mappings. The preset extends the discipline — it does not replace it."
2406
- },
2407
- {
2408
- "id": "plsat-097b",
2409
- "scenario": "A team is setting up Paradigm on a Flutter mobile app. They run `paradigm init` and see `discipline: mobile` with `stack: flutter`. They want to see what other stack presets are available for mobile projects.",
2410
- "question": "Which command shows available stack presets filtered by discipline?",
2411
- "choices": {
2412
- "A": "`paradigm disciplines --list`",
2413
- "B": "`paradigm presets --discipline mobile`",
2414
- "C": "`paradigm init --show-stacks`",
2415
- "D": "`paradigm config --list-presets`",
2416
- "E": "`paradigm scan --detect-stack`"
2417
- },
2418
- "correct": "B",
2419
- "explanation": "The `paradigm presets` command lists all available stack presets. The `--discipline` flag filters to show only presets for a specific discipline. For mobile, this would show flutter, swift-ios, kotlin-android, and react-native presets."
2420
- }
2421
- ]
2422
- },
2423
- {
2424
- "type": "standalone",
2425
- "slot": "slot-098",
2426
- "course": "para-301",
2427
- "variants": [
2428
- {
2429
- "id": "plsat-098",
2430
- "scenario": "You join a large existing project with 200+ source files and no Paradigm setup. Running `paradigm init` creates the `.paradigm/` directory, but no `.purpose` files exist yet. A colleague suggests running `paradigm scan auto` to bootstrap the project.",
2431
- "question": "What does `paradigm scan auto` do?",
2432
- "choices": {
2433
- "A": "It reads all source files line-by-line and creates comprehensive documentation for every function",
2434
- "B": "It uses regex-based heuristics to detect components, routes, auth patterns, and signals in your codebase, then generates draft `.purpose` files with detected symbols",
2435
- "C": "It copies `.purpose` templates from a global registry and places them in every directory",
2436
- "D": "It connects to an AI service to analyze your code and generate documentation",
2437
- "E": "It deletes all existing `.purpose` files and starts fresh"
2438
- },
2439
- "correct": "B",
2440
- "explanation": "paradigm scan auto uses pattern-based detection to find components (exported classes/functions), routes (HTTP method patterns like app.get/router.post), auth patterns (JWT, session checks, middleware), and signals (event emitters). It produces draft .purpose files with detected symbols and confidence levels. The detection is local and regex-based — it does not call external services or read every line. Stack presets enhance the scan with framework-specific patterns."
2441
- },
2442
- {
2443
- "id": "plsat-098b",
2444
- "scenario": "After running `paradigm scan auto` on a FastAPI project, the auto-scan detects several route handlers in `src/routes/users.py` and marks them with `confidence: high`. It also finds some utility functions and marks them `confidence: medium`.",
2445
- "question": "What do the confidence levels on auto-detected symbols indicate?",
2446
- "choices": {
2447
- "A": "How important the symbol is to the project — high means critical, medium means optional",
2448
- "B": "How certain the scanner is that the detected pattern actually represents that symbol type — high means strong pattern match, medium means heuristic match",
2449
- "C": "How many lines of code the detected symbol contains — more lines means higher confidence",
2450
- "D": "How recently the file was modified — recently modified files get higher confidence",
2451
- "E": "How many other symbols reference this one — more references means higher confidence"
2452
- },
2453
- "correct": "B",
2454
- "explanation": "Confidence levels reflect the scanner's certainty about the detection. A route handler matching `@app.get('/users')` is a strong, unambiguous pattern match (high confidence). A utility function detected from an exported function that doesn't match any specific pattern is a heuristic match (medium confidence). Low confidence detections are more speculative. Users should review auto-generated .purpose files, especially medium and low confidence entries."
2455
- }
2456
- ]
2457
- },
2458
- {
2459
- "type": "standalone",
2460
- "slot": "slot-099",
2461
- "course": "para-101",
2462
- "variants": [
2463
- {
2464
- "id": "plsat-099",
2465
- "scenario": "A developer is tasked with adding Paradigm to a mature React Native project that has been in development for two years. The project has 150+ components, navigation stacks, Redux state management, and several API integration files. The developer is worried about the setup effort.",
2466
- "question": "What is the recommended approach for adding Paradigm to a large existing project?",
2467
- "choices": {
2468
- "A": "Document every single component on day one — comprehensive coverage is required before Paradigm is useful",
2469
- "B": "Start with `paradigm init` (which auto-detects discipline and stack), then create one `.purpose` file for the most critical module, and expand incrementally",
2470
- "C": "Rewrite the project structure to match Paradigm's expected directory layout",
2471
- "D": "Wait until starting a new project — Paradigm cannot be added to existing codebases",
2472
- "E": "Run `paradigm scan auto` and commit all generated files without review"
2473
- },
2474
- "correct": "B",
2475
- "explanation": "Paradigm is designed for incremental adoption. Start with `paradigm init` which auto-detects your discipline (mobile) and stack preset (react-native), configuring symbol mappings and scan patterns for your framework. Then create one .purpose file for your most important module. You can optionally run `paradigm scan auto` to bootstrap additional .purpose files, but always review auto-generated content. A common pitfall is trying to document everything on day one — start small and expand as the project grows."
2476
- },
2477
- {
2478
- "id": "plsat-099b",
2479
- "scenario": "A team runs `paradigm init` on their Django project. The init command auto-detects `discipline: fullstack` and `stack: django`. The team then runs `paradigm scan auto` which generates draft `.purpose` files for `views/`, `models/`, and `urls/` directories.",
2480
- "question": "Why did the auto-scan know to look in `views/`, `models/`, and `urls/` directories?",
2481
- "choices": {
2482
- "A": "These are hardcoded directories that every Paradigm scan checks regardless of discipline",
2483
- "B": "The django stack preset provides scan hints with Django-specific component patterns, route patterns, and directory structures",
2484
- "C": "The scan randomly explores all directories and happened to find code there",
2485
- "D": "The team manually configured these directories in config.yaml before scanning",
2486
- "E": "Django is a special case with its own dedicated scanner module in Paradigm"
2487
- },
2488
- "correct": "B",
2489
- "explanation": "Stack presets include scan hints — framework-specific patterns that tell the auto-scanner where to look and what patterns to match. The django preset knows that views.py files contain route handlers, models.py files contain data models, and urls.py files define URL routing. This is why stack presets solve the cold-start problem: they bring framework knowledge that makes auto-scanning productive for existing projects."
2490
- }
2491
- ]
2492
- },
2493
- {
2494
- "type": "standalone",
2495
- "slot": "slot-100",
2496
- "course": "para-101",
2497
- "variants": [
2498
- {
2499
- "id": "plsat-100",
2500
- "scenario": "Your project has a `#PaymentService` that coordinates payment processing and integrates with Stripe. A new developer asks whether they should use `type: integration` or `tags: [integration]` to capture the Stripe relationship.",
2501
- "question": "What is the correct approach?",
2502
- "choices": {
2503
- "A": "`type: integration` — because the Stripe integration is the most important thing about this component",
2504
- "B": "`type: service` with `tags: [integration]` — type describes structural role, tags describe domain/behavior",
2505
- "C": "Both `type: integration` and `tags: [integration]` — redundancy is good for search",
2506
- "D": "Neither — integration is a v1 concept replaced by `&` prefix in v2",
2507
- "E": "`type: stripe` — use the specific integration name as the type"
2508
- },
2509
- "correct": "B",
2510
- "explanation": "The `type` field describes a component's structural role — what the code IS architecturally. PaymentService is a service, so `type: service`. The Stripe integration is a behavioral/domain concern, captured with `tags: [integration]`. Type and tags serve different classification axes: type = architecture, tags = domain."
2511
- },
2512
- {
2513
- "id": "plsat-100b",
2514
- "scenario": "A developer adds a new `#EmailValidator` utility and sets `type: validator` in the .purpose file. Another developer points out that `validator` is not in the project's `component_types` glossary in config.yaml.",
2515
- "question": "Is `type: validator` valid?",
2516
- "choices": {
2517
- "A": "No — types must exactly match the glossary entries or they are rejected",
2518
- "B": "Yes — types are open strings and the glossary is descriptive only, not enforced",
2519
- "C": "No — you must add it to the glossary first before using it",
2520
- "D": "Yes — but only if the developer also adds a `~validator` aspect",
2521
- "E": "It depends on the `strict-types` setting in config.yaml"
2522
- },
2523
- "correct": "B",
2524
- "explanation": "Component types are open strings — any project can invent its own vocabulary. The glossary in config.yaml is descriptive only (it helps agents understand types) but does not enforce or block unknown types. The developer can use `type: validator` freely."
2525
- }
2526
- ]
2527
- },
2528
- {
2529
- "type": "standalone",
2530
- "slot": "slot-101",
2531
- "course": "para-101",
2532
- "variants": [
2533
- {
2534
- "id": "plsat-101",
2535
- "scenario": "You have a `#GazeRouter` component that maps gaze coordinates to dispatch targets. It is managed by `#InputOrchestrator`. You want to express this hierarchy in the .purpose file.",
2536
- "question": "How should the parent relationship be declared?",
2537
- "choices": {
2538
- "A": "On `#InputOrchestrator` with `children: [\"#GazeRouter\"]`",
2539
- "B": "On `#GazeRouter` with `parent: \"#InputOrchestrator\"`",
2540
- "C": "In a separate `relationships` section at the top of the .purpose file",
2541
- "D": "In portal.yaml alongside route gates",
2542
- "E": "Using `tags: [child-of-InputOrchestrator]` on GazeRouter"
2543
- },
2544
- "correct": "B",
2545
- "explanation": "Parent relationships are declared on the child component using the `parent` field with a `#` symbol reference. This keeps .purpose files decentralized — you don't need to maintain a children roster on the parent. The parent field is computed upward, not maintained downward."
2546
- }
2547
- ]
2548
- },
2549
- {
2550
- "type": "standalone",
2551
- "slot": "slot-102",
2552
- "course": "para-101",
2553
- "variants": [
2554
- {
2555
- "id": "plsat-102",
2556
- "scenario": "An AI agent needs to find all router components in a project to understand the dispatch architecture. The project uses component types consistently.",
2557
- "question": "What is the most efficient MCP tool call?",
2558
- "choices": {
2559
- "A": "`paradigm_search` with `query: \"router\"` — search by name",
2560
- "B": "`paradigm_search` with `query: \"*\"` and `componentType: \"router\"` — filter by type",
2561
- "C": "`paradigm_navigate` with `intent: \"find\"` and `target: \"router\"` — navigate to routers",
2562
- "D": "`paradigm_ripple` with `symbol: \"#router\"` — check router dependencies",
2563
- "E": "Read every .purpose file and grep for `type: router`"
2564
- },
2565
- "correct": "B",
2566
- "explanation": "The `paradigm_search` tool accepts a `componentType` filter that directly queries the symbol index for components of a specific type. This is the most efficient approach — it uses the index instead of reading files, and returns exactly the components with `type: router`."
2567
- }
2568
- ]
2569
- },
2570
- {
2571
- "type": "standalone",
2572
- "slot": "slot-103",
2573
- "course": "para-501",
2574
- "variants": [
2575
- {
2576
- "id": "plsat-103",
2577
- "scenario": "Your team is evaluating Symphony's The Score for inter-agent communication. A team member asks: 'Does The Score require a central server or WebSocket connection to route messages between agents on the same machine?'",
2578
- "question": "How does The Score route messages between agents on a single machine?",
2579
- "choices": {
2580
- "A": "Through a persistent WebSocket connection managed by Sentinel's event hub",
2581
- "B": "Through a central message broker that must be started with `paradigm symphony serve`",
2582
- "C": "Through file-based mailboxes — agents write to JSONL files in `~/.paradigm/score/agents/` and poll for new messages via `/loop`",
2583
- "D": "Through Conductor's native IPC channel between Swift and Node.js processes",
2584
- "E": "Through a shared SQLite database at `~/.paradigm/score/messages.db`"
2585
- },
2586
- "correct": "C",
2587
- "explanation": "The Score is file-based with zero daemon dependencies. Each agent has a mailbox directory containing inbox.jsonl and outbox.jsonl. Messages are appended as single JSON lines. Agents poll for new messages using `/loop 10s paradigm_symphony_poll`. No WebSocket, no broker, no Conductor required — just filesystem reads and writes."
2588
- },
2589
- {
2590
- "id": "plsat-103b",
2591
- "scenario": "A developer is comparing Symphony The Score to traditional inter-process communication. They note that The Score uses JSONL files for message passing instead of sockets, pipes, or shared memory.",
2592
- "question": "Why does The Score use JSONL files instead of a real-time transport like WebSockets?",
2593
- "choices": {
2594
- "A": "JSONL is faster than WebSockets for small messages under 1KB",
2595
- "B": "File-based messaging requires zero dependencies beyond the Paradigm CLI — no Conductor, no Sentinel, no persistent server process",
2596
- "C": "Claude Code sessions cannot open network connections, so files are the only option",
2597
- "D": "JSONL files provide built-in encryption that WebSockets lack",
2598
- "E": "File-based messaging is required by macOS sandboxing rules for CLI tools"
2599
- },
2600
- "correct": "B",
2601
- "explanation": "The Score's design goal is zero-dependency messaging. It works with nothing beyond the Paradigm CLI — no Conductor, no Sentinel, no network configuration. File-based JSONL is append-only (safe for concurrent writes), trivial to parse, and works on every OS without additional runtime dependencies. Later phases (Conductor, Sentinel) add richer transports, but The Score is the foundation that always works."
2602
- }
2603
- ]
2604
- },
2605
- {
2606
- "type": "standalone",
2607
- "slot": "slot-104",
2608
- "course": "para-501",
2609
- "variants": [
2610
- {
2611
- "id": "plsat-104",
2612
- "scenario": "You have two Claude Code sessions open: one working on `a-paradigm` in the core library role, and another working on `a-paradigm` in the backend role. After running `paradigm symphony join`, each session gets an identity.",
2613
- "question": "How are agent identities determined in The Score?",
2614
- "choices": {
2615
- "A": "Random UUIDs assigned at session start — different every time",
2616
- "B": "The process ID (PID) of the Claude Code session is used as the identity",
2617
- "C": "Derived from project directory + role (e.g., `a-paradigm/core`) — deterministic and stable across session restarts for the same project context",
2618
- "D": "The user's GitHub username combined with a session counter",
2619
- "E": "Assigned sequentially by the mail router: agent-001, agent-002, etc."
2620
- },
2621
- "correct": "C",
2622
- "explanation": "Agent identity in The Score is deterministic: `{project-name}/{role}`. The same project opened in the same context always gets the same identity, even across session restarts. This stability means other agents can reliably address messages to `a-paradigm/backend` knowing it will reach the backend agent regardless of which specific Claude Code session is running. PID maps to identity via identity.json but is not the identity itself."
2623
- },
2624
- {
2625
- "id": "plsat-104b",
2626
- "scenario": "After running `paradigm symphony whoami`, an agent sees: `agent-abc123 (a-paradigm/backend) — 3 linked peers, 2 active threads`. The agent then crashes and a new Claude Code session is started for the same project directory with the same working context.",
2627
- "question": "What happens to the agent's identity after the restart?",
2628
- "choices": {
2629
- "A": "A new random identity is assigned — the old one is permanently lost",
2630
- "B": "The identity `a-paradigm/backend` is restored because it is derived from the project directory and role, not the PID",
2631
- "C": "The identity is lost unless the agent calls `paradigm symphony recover` within 5 minutes",
2632
- "D": "The new session gets a different identity with a `-2` suffix to avoid conflicts",
2633
- "E": "The old session's mailbox is deleted and a fresh one is created with a new ID"
2634
- },
2635
- "correct": "B",
2636
- "explanation": "Agent identity is derived from project directory + role, not from PID or session-specific state. When a new session starts for the same project context, it gets the same deterministic identity. The PID-to-identity mapping in identity.json is updated, but the mailbox (inbox.jsonl, outbox.jsonl) persists. Unread messages from before the crash are still in the inbox waiting for the next poll."
2637
- }
2638
- ]
2639
- },
2640
- {
2641
- "type": "standalone",
2642
- "slot": "slot-105",
2643
- "course": "para-501",
2644
- "variants": [
2645
- {
2646
- "id": "plsat-105",
2647
- "scenario": "An agent investigating a production bug sends a Symphony message to the frontend agent. The agent wants to indicate that it is asking a question and expects information in response, not just acknowledgment.",
2648
- "question": "Which message intent should the agent use?",
2649
- "choices": {
2650
- "A": "`context` — because the agent is seeking contextual information",
2651
- "B": "`clarification` — because the agent wants details clarified",
2652
- "C": "`question` — because the agent is asking for information and the intent classifies the message's purpose for structured processing",
2653
- "D": "`reference` — because the agent wants the frontend agent to reference its recent changes",
2654
- "E": "`alert` — because the production bug is urgent"
2655
- },
2656
- "correct": "C",
2657
- "explanation": "Message intents classify the purpose of a message. `question` signals that the sender is asking for information and expects a substantive response. `context` is for providing background (not requesting it). `clarification` is for asking about something already said. `alert` is specifically for forwarding Sentinel alerts. Intents help the receiving agent understand what kind of response is expected."
2658
- },
2659
- {
2660
- "id": "plsat-105b",
2661
- "scenario": "During a Symphony conversation thread, Agent A proposes a fix: 'Make the currency field optional with a default of USD.' Agent B agrees and wants to record this as a team decision that will be captured in Lore.",
2662
- "question": "What intent should Agent B use when confirming the decision?",
2663
- "choices": {
2664
- "A": "`approval` — to approve Agent A's proposal",
2665
- "B": "`decision` — to formally record the choice, triggering automatic Lore entry creation",
2666
- "C": "`action` — to announce it will implement the fix",
2667
- "D": "`verification` — to verify understanding of the proposal",
2668
- "E": "`context` — to provide context that it agrees"
2669
- },
2670
- "correct": "B",
2671
- "explanation": "The `decision` intent serves a dual purpose: it communicates agreement within the conversation AND triggers automatic Lore integration. Symphony auto-records messages with `intent: decision` as lore entries of type `decision`, linking them to the thread and referenced symbols. Using `approval` (A) would confirm the proposal but would not trigger the Lore recording side effect."
2672
- }
2673
- ]
2674
- },
2675
- {
2676
- "type": "standalone",
2677
- "slot": "slot-106",
2678
- "course": "para-501",
2679
- "variants": [
2680
- {
2681
- "id": "plsat-106",
2682
- "scenario": "Agent A on your machine needs `src/config/database.ts` from Agent B on a teammate's machine. Agent A calls `paradigm_symphony_request_file` with the file path and reason. The teammate's trust.yaml has `neverApprove: [\".env*\", \"**/*.key\"]` but no entry for `.ts` files.",
2683
- "question": "What happens next in the file transfer pipeline?",
2684
- "choices": {
2685
- "A": "The file is automatically sent because `.ts` files are not in the neverApprove list",
2686
- "B": "The request is queued and the teammate (human) receives a prompt to approve, deny, or approve with redaction — human approval is required for every file transfer",
2687
- "C": "Agent B automatically reads and sends the file since both agents are in the same Symphony network",
2688
- "D": "The request is denied because `database.ts` might contain database credentials",
2689
- "E": "The file is sent after a 5-minute delay to give the human time to intervene"
2690
- },
2691
- "correct": "B",
2692
- "explanation": "Every file transfer requires explicit human approval regardless of trust configuration. The neverApprove list adds hard denials (even if the human clicks approve), but not being on the neverApprove list does not mean auto-approval. The teammate sees a prompt with the file path, the requesting agent, and the reason, then chooses: approve, deny, or approve with redaction."
2693
- },
2694
- {
2695
- "id": "plsat-106b",
2696
- "scenario": "A developer sees a file request in their terminal via `paradigm symphony requests`: Agent C is requesting `.env.production` from their project. The developer's trust.yaml includes `neverApprove: [\".env*\"]`. The developer runs `paradigm symphony approve req-xyz` anyway.",
2697
- "question": "What happens when the developer tries to approve a file on the neverApprove list?",
2698
- "choices": {
2699
- "A": "The approval succeeds — human override always takes priority over trust configuration",
2700
- "B": "The approval is rejected by the system — files matching neverApprove patterns are always denied, even if the human explicitly approves",
2701
- "C": "The file is sent but with all values redacted automatically",
2702
- "D": "The developer is prompted a second time to confirm the override",
2703
- "E": "The approval is queued for review by a second team member"
2704
- },
2705
- "correct": "B",
2706
- "explanation": "The neverApprove list is enforced absolutely by the system. Files matching patterns like `.env*`, `*.key`, or `*.pem` are always denied regardless of human action. This is a security guardrail — even well-intentioned approvals cannot override it. The developer would need to modify trust.yaml to remove the pattern before the file could be approved."
2707
- }
2708
- ]
2709
- },
2710
- {
2711
- "type": "standalone",
2712
- "slot": "slot-107",
2713
- "course": "para-501",
2714
- "variants": [
2715
- {
2716
- "id": "plsat-107",
2717
- "scenario": "A thread about migrating to a new API version has been active for 30 minutes. Four agents and two humans participated. The team agreed on an approach: skip the database migration and hotfix the serializer. Someone runs `paradigm symphony resolve thr-abc`.",
2718
- "question": "What does thread resolution produce?",
2719
- "choices": {
2720
- "A": "A git commit with the conversation as the commit message",
2721
- "B": "A comprehensive lore entry capturing the topic, participants, decisions made, actions taken, and symbols discussed — bridging ephemeral conversation to permanent project memory",
2722
- "C": "A .purpose file update adding the thread as a new component",
2723
- "D": "A Sentinel incident record linking the conversation to a production error",
2724
- "E": "An email summary sent to all team members who were not in the thread"
2725
- },
2726
- "correct": "B",
2727
- "explanation": "Thread resolution creates a lore entry that captures the entire collaborative context: the conversation topic, all participants (agents and humans), decisions made during the discussion, actions taken, and Paradigm symbols referenced. This bridges the gap between ephemeral real-time conversation and permanent project memory. Future developers can find and learn from the discussion via `paradigm_lore_search`."
2728
- },
2729
- {
2730
- "id": "plsat-107b",
2731
- "scenario": "An agent resolves a thread with `paradigm symphony resolve thr-def`. The thread contained 12 messages, 3 decisions, and referenced symbols `#payment-serializer`, `#api-types`, and `$refund-flow`. A week later, a new developer encounters a similar serialization issue.",
2732
- "question": "How can the new developer find the resolved thread's knowledge?",
2733
- "choices": {
2734
- "A": "Search `~/.paradigm/score/threads/` for the thread JSON file — resolved threads are kept permanently",
2735
- "B": "Call `paradigm_lore_search` with `symbol: '#payment-serializer'` — the resolved thread became a lore entry linked to the discussed symbols",
2736
- "C": "Call `paradigm_symphony_thread` with the old thread ID — resolved threads remain in the Symphony network",
2737
- "D": "Check git log for the conversation — resolved threads are saved as commit messages",
2738
- "E": "The knowledge is lost — resolved threads are deleted from the filesystem"
2739
- },
2740
- "correct": "B",
2741
- "explanation": "When a thread is resolved, it becomes a lore entry tagged with the symbols discussed in the conversation. The new developer searching for `#payment-serializer` via `paradigm_lore_search` will find the entry, which contains the full conversation context, decisions, and actions. This is the core value of thread resolution: converting temporary discussion into searchable, permanent project memory."
2742
- }
2743
- ]
2744
- },
2745
- {
2746
- "type": "standalone",
2747
- "slot": "slot-108",
2748
- "course": "para-501",
2749
- "variants": [
2750
- {
2751
- "id": "plsat-108",
2752
- "scenario": "You have set up The Score with 3 linked agents. Each agent has a mailbox, but none of them are running `/loop`. You send a message via `paradigm symphony send \"Check the failing test in #auth-service\"`. The message is written to each agent's inbox.jsonl.",
2753
- "question": "When will the agents see and respond to this message?",
2754
- "choices": {
2755
- "A": "Immediately — messages trigger an interrupt in the Claude Code session",
2756
- "B": "Never — without `/loop`, agents have no mechanism to poll their inbox and will not discover the message",
2757
- "C": "Within 30 seconds — the MCP tool cache automatically polls inboxes",
2758
- "D": "On the next `paradigm_status` call — status checks include inbox polling",
2759
- "E": "When the agent explicitly calls `paradigm symphony read` from its session"
2760
- },
2761
- "correct": "B",
2762
- "explanation": "`/loop` is the agent heartbeat. It runs `paradigm_symphony_poll` on a timer (typically every 10 seconds), which reads inbox.jsonl and presents messages to the agent. Without `/loop`, messages accumulate in the inbox with nobody reading them. The convenience command `paradigm symphony join` sets up both identity registration and the polling loop in one step. This is why the setup instructions always include `/loop 10s paradigm_symphony_poll` in each session."
2763
- },
2764
- {
2765
- "id": "plsat-108b",
2766
- "scenario": "An agent is running `/loop 10s paradigm_symphony_poll`. During one poll cycle, the tool returns 2 new messages: a question from the frontend agent and a decision message from a human. The agent processes both and sends replies via `paradigm_symphony_send`.",
2767
- "question": "Where do the agent's replies go, and how are they delivered to the recipients?",
2768
- "choices": {
2769
- "A": "Directly into each recipient's inbox.jsonl — the send tool writes to all inboxes simultaneously",
2770
- "B": "To this agent's outbox.jsonl — a mail router (or Conductor) picks up outbox messages and delivers them to the appropriate recipient inboxes",
2771
- "C": "To a central message queue at `~/.paradigm/score/queue.jsonl` that all agents read from",
2772
- "D": "Over a WebSocket connection to each recipient's MCP server",
2773
- "E": "To Sentinel's event hub, which broadcasts to all connected agents"
2774
- },
2775
- "correct": "B",
2776
- "explanation": "The agent writes replies to its own outbox.jsonl via `paradigm_symphony_send`. A mail router process (in The Score's Phase 0 implementation) or Conductor (in later phases) reads outbox files and delivers messages to the correct recipient inbox files. This separation of write (outbox) and delivery (router) keeps the protocol simple — agents only ever write to their own outbox and read from their own inbox."
2777
- }
2778
- ]
2779
- },
2780
- {
2781
- "type": "standalone",
2782
- "slot": "slot-109",
2783
- "course": "para-501",
2784
- "variants": [
2785
- {
2786
- "id": "plsat-109",
2787
- "scenario": "A developer is using `paradigm serve` to run the Platform. Their AI agent is helping refactor `#payment-service`. The agent wants to walk the developer through three related components on the Graph canvas, but the developer is currently reading a lore entry.",
2788
- "question": "What sequence of MCP tool calls should the agent use to present its walkthrough without disrupting the developer?",
2789
- "choices": {
2790
- "A": "Call `paradigm_platform_navigate` three times rapidly for each component — the browser handles queuing",
2791
- "B": "Call `paradigm_platform_observe` first to check if the user is active, then `paradigm_platform_annotate` with a toast saying 'I'd like to show you something on the Graph', then navigate after the user responds",
2792
- "C": "Call `paradigm_platform_highlight` on all three symbols simultaneously — highlights work across all sections",
2793
- "D": "Write to the Symphony mailbox and wait for the developer to read the message",
2794
- "E": "Call `paradigm_platform_clear` first, then force-navigate to each component"
2795
- },
2796
- "correct": "B",
2797
- "explanation": "The agent should first observe the user's state to understand context (are they busy? what section? muted?). Since the user is actively reading lore, the agent should use a toast annotation to signal intent rather than auto-navigating. When the user is active (<5s since last interaction), navigate commands show a prompt rather than auto-executing — but checking observe first lets the agent tailor its approach."
2798
- },
2799
- {
2800
- "id": "plsat-109b",
2801
- "scenario": "An AI agent calls `paradigm_platform_highlight({ symbols: ['#api-gateway', '#auth-middleware', '#rate-limiter'], label: 'Security surface', duration: 8000, pulse: true })` on the Platform. The developer sees three nodes glowing on the Graph canvas.",
2802
- "question": "What happens to the highlights after 8 seconds?",
2803
- "choices": {
2804
- "A": "They remain until the developer clicks on one of the nodes",
2805
- "B": "They fade out automatically — the duration parameter sets auto-expiry on both server (UserStateTracker) and browser (agentStore)",
2806
- "C": "They persist until the agent calls `paradigm_platform_clear({ target: 'highlights' })`",
2807
- "D": "They remain but the pulse animation stops",
2808
- "E": "The Platform server removes them but the browser keeps a static glow"
2809
- },
2810
- "correct": "B",
2811
- "explanation": "The duration parameter controls auto-expiry. On the server, UserStateTracker schedules removal after the specified milliseconds. On the browser, agentStore sets a setTimeout that filters out the highlight after duration expires. Both sides independently clean up, so even if a message is lost, the highlight eventually disappears."
2812
- }
2813
- ]
2814
- },
2815
- {
2816
- "type": "standalone",
2817
- "slot": "slot-110",
2818
- "course": "para-501",
2819
- "variants": [
2820
- {
2821
- "id": "plsat-110",
2822
- "scenario": "The Platform server starts with `paradigm serve`. Two browser tabs are open. An MCP tool sends an `agent:annotate` command with type `callout` targeting `#database-pool`.",
2823
- "question": "How does the annotation reach both browser tabs?",
2824
- "choices": {
2825
- "A": "The MCP tool sends the command directly to each tab via separate HTTP requests",
2826
- "B": "The Platform server stores the annotation in scan-index.json and both tabs poll for changes",
2827
- "C": "The MCP tool POSTs to /api/platform/agent-command, the server broadcasts a WebSocket message to all connected clients in the wsClients Set — both tabs receive it",
2828
- "D": "Only the active tab receives the annotation; the other tab receives it on focus",
2829
- "E": "The annotation is stored in localStorage, which is shared between tabs"
2830
- },
2831
- "correct": "C",
2832
- "explanation": "The Platform server maintains a Set<WebSocket> of all connected browser clients. When the agent command route receives a POST, it broadcasts the typed message (agent:annotate) to every client with readyState === OPEN. Both tabs have independent WebSocket connections to ws://localhost:3850/ws, so both receive the broadcast simultaneously."
2833
- },
2834
- {
2835
- "id": "plsat-110b",
2836
- "scenario": "A developer opens the Platform at localhost:3850 and navigates to the Graph section. They select `#payment-service`. The Platform server's UserStateTracker records this activity. Ten seconds later, the AI agent calls `paradigm_platform_observe()`.",
2837
- "question": "What information does the observe tool return?",
2838
- "choices": {
2839
- "A": "Only whether the Platform is running — `{ connected: true }`",
2840
- "B": "The section, selected symbol, theme, mute state, connected agents, browser client count, and optionally active highlights/annotations",
2841
- "C": "The full DOM tree of the Platform UI for the agent to parse",
2842
- "D": "A diff of everything that changed since the agent's last observe call",
2843
- "E": "Only the section name and selected symbol — no agent or highlight info"
2844
- },
2845
- "correct": "B",
2846
- "explanation": "paradigm_platform_observe returns the full UI state from the UserStateTracker: connected (boolean), users (client count), agents (array of AgentPresence with agentId, color, timestamps), and state (section, selectedSymbol, theme, muted). With detail: 'full', it also includes active highlights and annotations. This gives the agent a complete picture of the shared workspace."
2847
- }
2848
- ]
2849
- },
2850
- {
2851
- "type": "standalone",
2852
- "slot": "slot-111",
2853
- "course": "para-501",
2854
- "variants": [
2855
- {
2856
- "id": "plsat-111",
2857
- "scenario": "An agent connected to the Platform has been idle for 3 minutes. No MCP tool calls have been made. The AgentPresenceManager runs its periodic cleanup.",
2858
- "question": "What happens to the agent's presence?",
2859
- "choices": {
2860
- "A": "Nothing — agents are only removed when they explicitly disconnect",
2861
- "B": "The agent is marked as 'idle' but remains in the agents list with a dimmed indicator",
2862
- "C": "The agent is pruned from the presence list and an `agent:leave` message is broadcast to all browsers, removing the presence dot from the header",
2863
- "D": "The server sends a ping to the agent and waits for a response before deciding",
2864
- "E": "The agent's highlights and annotations are cleared but its presence remains"
2865
- },
2866
- "correct": "C",
2867
- "explanation": "The AgentPresenceManager runs pruneStale() every 30 seconds. Any agent whose lastActivity timestamp is more than 2 minutes old is removed from the agents Map and an agent:leave message is broadcast to all browser clients. The browser's agentStore filters out the agent, and the header presence dots update. This prevents ghost agents from accumulating."
2868
- }
2869
- ]
2870
- },
2871
- {
2872
- "type": "standalone",
2873
- "slot": "slot-112",
2874
- "course": "para-501",
2875
- "variants": [
2876
- {
2877
- "id": "plsat-112",
2878
- "scenario": "You're building a new Platform section that should respond to agent highlight commands. The existing sections (Graph, Lore, Overview) already work with the agent-driven UI system.",
2879
- "question": "Which browser-side component is responsible for receiving WebSocket agent messages and updating the Zustand store?",
2880
- "choices": {
2881
- "A": "platformStore.ts — all state flows through the main platform store",
2882
- "B": "useActivityReporter — it handles all WebSocket communication bidirectionally",
2883
- "C": "useAgentEffects — it connects WebSocket `agent:*` messages to agentStore.handleAgentMessage, with auto-reconnect on disconnect",
2884
- "D": "AgentToast — it listens for WebSocket messages and renders toasts",
2885
- "E": "The Platform server pushes state updates directly into the browser's agentStore via a shared reference"
2886
- },
2887
- "correct": "C",
2888
- "explanation": "useAgentEffects is the WebSocket→store bridge. It establishes a WebSocket connection to ws://localhost:{port}/ws, listens for messages whose type starts with 'agent:', and dispatches them to agentStore.handleAgentMessage. It also handles auto-reconnect (3s delay on close). useActivityReporter (B) handles the opposite direction — reporting user actions TO the server. AgentToast (D) only renders; it reads from the store but doesn't handle WebSocket."
2889
- }
2890
- ]
2891
- },
2892
- {
2893
- "type": "standalone",
2894
- "slot": "slot-113",
2895
- "course": "para-601",
2896
- "variants": [
2897
- {
2898
- "id": "plsat-113",
2899
- "scenario": "A builder agent finishes implementing a new REST endpoint. It modified 4 files, resolved 1 blocker, and the tests pass. The agent needs to record this somewhere in the knowledge streams.",
2900
- "question": "Which knowledge stream is the correct destination for this entry?",
2901
- "choices": {
2902
- "A": "Learning Journal — the agent learned how to build the endpoint",
2903
- "B": "Team Decision — the team decided to add an endpoint",
2904
- "C": "Work Log — it records what got done, files modified, outcome, and next steps",
2905
- "D": "Event Stream — it is an event that happened during work",
2906
- "E": "Lore — all project history goes into lore entries"
2907
- },
2908
- "correct": "C",
2909
- "explanation": "The Work Log stream answers 'what got done.' It captures the agent, summary, files_modified, symbols_touched, outcome (pass/fail/partial/blocked), and next_steps. It is project-scoped and ephemeral — designed for sprint boards and standup summaries. The Learning Journal is for personal insights the agent gains, not task completion. Team Decisions record rationale and alternatives for institutional choices."
2910
- },
2911
- {
2912
- "id": "plsat-113b",
2913
- "scenario": "An agent spent 45 minutes debugging a flaky test in the CI pipeline. It turned out to be a race condition in the database setup. The agent fixed it, and the test suite passes now. The agent wants to log this work.",
2914
- "question": "Which knowledge stream should this entry go into?",
2915
- "choices": {
2916
- "A": "Team Decision — the team needs to know about flaky tests",
2917
- "B": "Work Log — it captures what was done, duration, outcome, and what was fixed",
2918
- "C": "Learning Journal — the agent discovered a race condition pattern",
2919
- "D": "Event Stream — file-modified events are emitted automatically",
2920
- "E": "Both Work Log and Learning Journal equally — there is no distinction"
2921
- },
2922
- "correct": "B",
2923
- "explanation": "The Work Log is the correct primary destination — it records what got done (fixed flaky test), the outcome (pass), duration (45 min), and files modified. The agent might also record a Learning Journal entry about the race condition pattern it discovered, but the work itself belongs in the Work Log. These are separate entries in separate streams — the journal entry would link back to the work log via linked_work_log."
2924
- }
2925
- ]
2926
- },
2927
- {
2928
- "type": "standalone",
2929
- "slot": "slot-114",
2930
- "course": "para-601",
2931
- "variants": [
2932
- {
2933
- "id": "plsat-114",
2934
- "scenario": "During a code review, the architect agent points out that the builder's approach to token refresh will cause a race condition under concurrent requests. The builder realizes its mental model of the refresh flow was wrong and adjusts its confidence from 0.8 to 0.5 on `#token-refresh`.",
2935
- "question": "Which knowledge stream captures this learning moment?",
2936
- "choices": {
2937
- "A": "Work Log — the builder did work (reviewed code)",
2938
- "B": "Team Decision — the team decided to change the approach",
2939
- "C": "Learning Journal — the builder received a correction and adjusted its confidence, which is a personal learning event",
2940
- "D": "Event Stream — the correction is a 'compliance-violation' event",
2941
- "E": "Work Log with outcome 'fail' — the original approach failed review"
2942
- },
2943
- "correct": "C",
2944
- "explanation": "The Learning Journal captures 'what I learned.' The trigger is 'correction_received', the insight is the corrected mental model, confidence_before is 0.8, confidence_after is 0.5, and the entry is agent-private (stored in ~/.paradigm/agents/{id}/journal/). It may or may not be transferable to other projects. The Work Log would separately record the review activity, but the learning itself belongs in the journal."
2945
- },
2946
- {
2947
- "id": "plsat-114b",
2948
- "scenario": "A security agent reviewed an authentication module and predicted with 0.9 confidence that the session handling was secure. A penetration test later revealed a session fixation vulnerability. The security agent needs to record this calibration miss.",
2949
- "question": "Which knowledge stream is appropriate for this entry?",
2950
- "choices": {
2951
- "A": "Team Decision — the team needs to decide how to fix the vulnerability",
2952
- "B": "Work Log — the penetration test results are work output",
2953
- "C": "Event Stream — emit an 'error-encountered' event",
2954
- "D": "Learning Journal — the agent had a confidence miss and needs to record the corrected understanding",
2955
- "E": "Both Work Log and Team Decision — the journal is only for minor insights"
2956
- },
2957
- "correct": "D",
2958
- "explanation": "The Learning Journal is the right stream for calibration misses. The trigger is 'confidence_miss', with confidence_before: 0.9 and confidence_after adjusted downward. The journal is agent-private, stored in ~/.paradigm/agents/{id}/journal/, and travels with the agent across projects if marked transferable. A separate Team Decision might record the fix approach, but the personal calibration adjustment belongs in the journal."
2959
- }
2960
- ]
2961
- },
2962
- {
2963
- "type": "standalone",
2964
- "slot": "slot-115",
2965
- "course": "para-601",
2966
- "variants": [
2967
- {
2968
- "id": "plsat-115",
2969
- "scenario": "The team holds a design discussion about whether to use WebSockets or Server-Sent Events for real-time updates. The architect proposes WebSockets, the builder supports it, and the reviewer dissents (preferring SSE for simplicity). They go with WebSockets. The rationale and alternatives need to be preserved.",
2970
- "question": "Which knowledge stream captures this?",
2971
- "choices": {
2972
- "A": "Work Log — a design discussion is work",
2973
- "B": "Learning Journal — everyone learned something from the debate",
2974
- "C": "Team Decision — it records the choice, rationale, participants with stances, and alternatives considered",
2975
- "D": "Event Stream — emit a 'decision-made' event and the stream captures it",
2976
- "E": "Lore entry with type 'decision' — the legacy system handles decisions"
2977
- },
2978
- "correct": "C",
2979
- "explanation": "Team Decisions capture institutional memory: the title, decision text, rationale, participants (with stances like 'proposed', 'supported', 'dissented'), alternatives_considered (with rejected_because), and affected symbols. They are project-scoped and long-lived (status: active/superseded/deprecated). The event stream might emit a 'decision-made' event, but that is a notification — the decision record itself lives in the Team Decisions stream at .paradigm/decisions/."
2980
- },
2981
- {
2982
- "id": "plsat-115b",
2983
- "scenario": "After a production incident, the team decides to add rate limiting to all public API endpoints. The security agent proposed it, the architect supported it, and the builder abstained. They considered IP-based limiting vs token-bucket and chose token-bucket. This needs to be recorded for future reference.",
2984
- "question": "Which knowledge stream should hold this record?",
2985
- "choices": {
2986
- "A": "Work Log — an incident response is work that was done",
2987
- "B": "Team Decision — it preserves the choice, who participated, their stances, and why token-bucket was chosen over IP-based",
2988
- "C": "Learning Journal — the team learned from the incident",
2989
- "D": "All three streams equally — incidents generate entries everywhere",
2990
- "E": "Event Stream — the decision is an event type 'decision-made'"
2991
- },
2992
- "correct": "B",
2993
- "explanation": "Team Decisions are the institutional record for choices like this. The entry includes participants (security: proposed, architect: supported, builder: abstained), alternatives_considered (IP-based: rejected because it fails behind proxies), symbols_affected (#rate-limiter, #api-gateway), and status: active. The incident itself might generate work log entries and journal entries, but the decision about the approach belongs in the Team Decisions stream."
2994
- }
2995
- ]
2996
- },
2997
- {
2998
- "type": "standalone",
2999
- "slot": "slot-116",
3000
- "course": "para-601",
3001
- "variants": [
3002
- {
3003
- "id": "plsat-116",
3004
- "scenario": "A security agent has attention patterns configured as: symbols: ['^*', '#*-auth', '#*-middleware'], paths: ['auth/**', 'middleware/**'], concepts: ['permission', 'JWT', 'RBAC'], signals: [{ type: 'gate-added' }, { type: 'route-created' }], threshold: 0.4. A new event arrives: type: 'file-modified', path: 'src/utils/date-formatter.ts', symbols: ['#date-formatter'], keywords: ['formatting', 'locale'].",
3005
- "question": "Will the security agent self-nominate for this event?",
3006
- "choices": {
3007
- "A": "Yes — the agent's threshold is low (0.4), so it nominates for most events",
3008
- "B": "No — the symbol '#date-formatter' does not match '^*', '#*-auth', or '#*-middleware'; the path 'src/utils/' does not match 'auth/**' or 'middleware/**'; the keywords have no concept overlap; and no signal matches. All four scores are 0, which is below 0.4",
3009
- "C": "Yes — '#date-formatter' matches '#*' because * is a wildcard for any suffix",
3010
- "D": "No — the agent only responds to signals, not file modifications",
3011
- "E": "Yes — every agent nominates for every event to ensure nothing is missed"
3012
- },
3013
- "correct": "B",
3014
- "explanation": "Attention scoring evaluates four dimensions: symbolMatch (does '#date-formatter' match '^*', '#*-auth', or '#*-middleware'? No — it would need to end with '-auth' or '-middleware'), pathMatch ('src/utils/date-formatter.ts' doesn't match 'auth/**' or 'middleware/**'), conceptMatch (no overlap between ['formatting', 'locale'] and ['permission', 'JWT', 'RBAC']), and signalMatch (event type 'file-modified' is not 'gate-added' or 'route-created'). The final score is max(0, 0, 0, 0) = 0, which is below the 0.4 threshold."
3015
- },
3016
- {
3017
- "id": "plsat-116b",
3018
- "scenario": "A tester agent has attention patterns: paths: ['**/*.test.*', '**/*.spec.*'], concepts: ['test', 'coverage', 'assertion'], signals: [{ type: 'error-encountered' }], threshold: 0.5. An event arrives: type: 'file-modified', path: 'src/services/payment.ts', symbols: ['#payment-service'], keywords: ['refactor', 'extract method'].",
3019
- "question": "Will the tester agent self-nominate?",
3020
- "choices": {
3021
- "A": "Yes — any file modification could break tests, so the tester is always relevant",
3022
- "B": "Yes — the threshold of 0.5 is met because 'refactor' is similar to 'test'",
3023
- "C": "No — the path does not match '**/*.test.*' or '**/*.spec.*', no concept overlap ('refactor' and 'extract method' do not match 'test', 'coverage', or 'assertion'), and the event type 'file-modified' is not 'error-encountered'. Score is 0",
3024
- "D": "No — tester agents can only observe test files, not source files",
3025
- "E": "Yes — '#payment-service' triggers the tester because payments need testing"
3026
- },
3027
- "correct": "C",
3028
- "explanation": "The tester's attention has no symbol patterns, so symbolMatch is 0. The path 'src/services/payment.ts' doesn't match '**/*.test.*' or '**/*.spec.*', so pathMatch is 0. The keywords ['refactor', 'extract method'] have no overlap with ['test', 'coverage', 'assertion'], so conceptMatch is 0. The event type 'file-modified' is not 'error-encountered', so signalMatch is 0. Final score: max(0, 0, 0, 0) = 0, below the 0.5 threshold."
3029
- }
3030
- ]
3031
- },
3032
- {
3033
- "type": "standalone",
3034
- "slot": "slot-117",
3035
- "course": "para-601",
3036
- "variants": [
3037
- {
3038
- "id": "plsat-117",
3039
- "scenario": "An architect agent has attention: symbols: ['$*', '#*'], concepts: ['architecture', 'design', 'pattern'], threshold: 0.5. An event arrives: type: 'flow-modified', symbols: ['$checkout-flow', '#cart-service'], keywords: ['added step', 'validation'], context: 'New payment validation step added to checkout flow'.",
3040
- "question": "Will the architect self-nominate, and what is the approximate attention score?",
3041
- "choices": {
3042
- "A": "No — the architect only cares about design documents, not flow changes",
3043
- "B": "Yes — '$checkout-flow' matches '$*' giving symbolMatch=1.0, and the score exceeds the 0.5 threshold",
3044
- "C": "Yes — but only because the keyword 'validation' triggers a concept match",
3045
- "D": "No — the threshold of 0.5 requires at least two dimensions to match",
3046
- "E": "Yes — every event with symbols triggers every agent that has symbol patterns"
3047
- },
3048
- "correct": "B",
3049
- "explanation": "The architect's symbol pattern '$*' matches '$checkout-flow' (any symbol starting with $), giving symbolMatch=1.0. Additionally '#*' matches '#cart-service' (already capped at 1.0). The final score is max(symbolMatch, pathMatch, conceptMatch, signalMatch) = max(1.0, 0, partial, 0) = 1.0, well above the 0.5 threshold. The agent will self-nominate. Note that conceptMatch might also contribute ('pattern' could partial-match against context), but symbolMatch alone is sufficient."
3050
- },
3051
- {
3052
- "id": "plsat-117b",
3053
- "scenario": "A reviewer agent has attention: concepts: ['code quality', 'bug', 'smell', 'convention'], threshold: 0.6. An event arrives: type: 'error-encountered', keywords: ['null pointer', 'uncaught exception', 'bug'], context: 'TypeError: Cannot read properties of undefined in auth middleware', severity: 'error'.",
3054
- "question": "Will the reviewer self-nominate?",
3055
- "choices": {
3056
- "A": "No — the reviewer has no symbol or path patterns, so it cannot score above 0",
3057
- "B": "No — 'error-encountered' events are only for tester agents",
3058
- "C": "Yes — the keyword 'bug' matches the concept 'bug', giving conceptMatch of at least 0.25 (1 of 4 concepts). Since max score uses the highest dimension, this gives 0.25 which is below 0.6, so actually no",
3059
- "D": "Yes — 'bug' matches concept 'bug' (1/4 = 0.25 conceptMatch), but the score is max(0, 0, 0.25, 0) = 0.25, which is below the 0.6 threshold, so the agent stays quiet",
3060
- "E": "Yes — the severity 'error' automatically forces all agents to nominate"
3061
- },
3062
- "correct": "D",
3063
- "explanation": "The reviewer has no symbol patterns (symbolMatch=0), no path patterns (pathMatch=0), and no signal patterns (signalMatch=0). For concepts, 'bug' matches 1 of 4 concepts, giving conceptMatch = 1/4 = 0.25. The final score is max(0, 0, 0.25, 0) = 0.25, which is below the 0.6 threshold. The agent's quietReason would be 'below-threshold'. Severity does not override the attention threshold — agents only speak when their specific attention patterns fire strongly enough."
3064
- }
3065
- ]
3066
- },
3067
- {
3068
- "type": "standalone",
3069
- "slot": "slot-118",
3070
- "course": "para-601",
3071
- "variants": [
3072
- {
3073
- "id": "plsat-118",
3074
- "scenario": "A learning journal entry contains: insight: 'When JWT tokens use RS256 with the PAYMENTS_SIGNING_KEY, rotation requires coordinating across 3 services.' The data policy has learning_journal ring: 'user-scoped' with redaction patterns: [{ pattern: '\\\\b[A-Z_]{2,}_KEY\\\\b' }, { pattern: 'password|secret|token' }].",
3075
- "question": "Which trust ring does the learning journal belong to, and what happens to the content?",
3076
- "choices": {
3077
- "A": "Ring 1 (project-locked) — journals never leave the project",
3078
- "B": "Ring 2 (user-scoped) — the journal travels across the user's projects, but 'PAYMENTS_SIGNING_KEY' is redacted by the [A-Z_]{2,}_KEY pattern and 'token' is redacted by the password|secret|token pattern",
3079
- "C": "Ring 3 (creator-upstream) — journal insights flow to agent creators anonymized",
3080
- "D": "Ring 2 (user-scoped) — but no redaction occurs because journals are already private",
3081
- "E": "Ring 4 (network-public) — aggregated learning patterns are shared publicly"
3082
- },
3083
- "correct": "B",
3084
- "explanation": "The learning journal's ring is 'user-scoped' (Ring 2), meaning it travels across the user's own projects but never beyond. The data policy's redaction patterns are applied at the 'journal-recording' enforcement boundary. The regex '\\b[A-Z_]{2,}_KEY\\b' matches 'PAYMENTS_SIGNING_KEY', and 'password|secret|token' matches 'token' in 'JWT tokens'. Both are redacted before storage. Trust rings are concentric — data classified as Ring 2 can be read in Ring 1 (project) and Ring 2 (user) contexts, but never reaches Ring 3 (upstream) or Ring 4 (network)."
3085
- },
3086
- {
3087
- "id": "plsat-118b",
3088
- "scenario": "A work log entry records: summary: 'Fixed database connection pooling for the POSTGRES_SECRET_URL endpoint, updated #db-pool configuration.' The data policy has work_log ring: 'project-locked' with deny_content: ['code_snippets', 'file_contents', 'diff_content'] and no redaction patterns.",
3089
- "question": "What trust ring contains this work log, and is the content filtered?",
3090
- "choices": {
3091
- "A": "Ring 1 (project-locked) — the content stays in the project; deny_content blocks code snippets but the summary text is allowed because 'file_paths' and 'symbol_names' are in allow_content",
3092
- "B": "Ring 2 (user-scoped) — work logs travel with the user across projects",
3093
- "C": "Ring 1 (project-locked) — but 'POSTGRES_SECRET_URL' should be redacted",
3094
- "D": "Ring 1 (project-locked) — the entire entry is blocked because it mentions a secret URL",
3095
- "E": "Ring 3 (creator-upstream) — work logs are feedback for agent creators"
3096
- },
3097
- "correct": "A",
3098
- "explanation": "Work logs default to Ring 1 (project-locked) — they never leave the project. The allow_content list includes 'file_paths', 'symbol_names', and 'outcome', so the summary mentioning '#db-pool' and describing the fix is fine. The deny_content blocks 'code_snippets', 'file_contents', and 'diff_content', but a summary paragraph is not a code snippet. Note that 'POSTGRES_SECRET_URL' is not redacted because the work_log stream has no redaction patterns in the default policy — it relies on project-locked ring containment instead."
3099
- }
3100
- ]
3101
- },
3102
- {
3103
- "type": "standalone",
3104
- "slot": "slot-119",
3105
- "course": "para-601",
3106
- "variants": [
3107
- {
3108
- "id": "plsat-119",
3109
- "scenario": "A project's CLAUDE.md has grown to 850 lines. It includes: the symbol table (5 symbols), commit conventions, agent onboarding steps, a 200-line logging guide with directory mapping tables, a 150-line portal protocol specification, and 100 lines of MCP workflow details. The team wants to reduce base context cost.",
3110
- "question": "Using Paradigm's guidance resource model, what should stay inline in CLAUDE.md and what should move?",
3111
- "choices": {
3112
- "A": "Everything stays in CLAUDE.md — agents need all context upfront to avoid errors",
3113
- "B": "Move everything to guidance resources — CLAUDE.md should only have the project name",
3114
- "C": "The symbol table, commit conventions, and agent onboarding steps stay inline (~150 lines). The logging guide, portal protocol, and MCP workflow move to on-demand guidance resources (paradigm://guidance/logging, paradigm://guidance/portal, paradigm://guidance/mcp-workflow)",
3115
- "D": "Only the symbol table stays inline. Everything else, including commit conventions, moves to guidance resources",
3116
- "E": "Keep the logging guide inline (agents need it every session) and move everything else to guidance"
3117
- },
3118
- "correct": "C",
3119
- "explanation": "Paradigm's guidance resource model splits CLAUDE.md into two tiers: (1) always-loaded base context (~150 lines) containing the symbol system, conventions, and onboarding steps that every session needs, and (2) on-demand guidance resources loaded via MCP resource URIs only when relevant. The logging guide, portal protocol, and MCP workflow are reference material — an agent building a React component doesn't need the portal spec. Moving them to paradigm://guidance/* reduces base context from ~850 to ~150 lines while keeping all guidance one tool call away."
3120
- },
3121
- {
3122
- "id": "plsat-119b",
3123
- "scenario": "A developer notices that their CLAUDE.md includes a detailed multi-agent orchestration section (120 lines), a flow-first development guide (80 lines), a workspace configuration reference (60 lines), and the core symbol system with conventions (100 lines). They want to follow Paradigm's context efficiency pattern.",
3124
- "question": "What is the correct split between inline CLAUDE.md and on-demand guidance?",
3125
- "choices": {
3126
- "A": "Keep everything — 360 lines is acceptable for CLAUDE.md",
3127
- "B": "The core symbol system and conventions stay inline. Orchestration, flow-first, and workspace references become guidance resources loaded via paradigm://guidance/orchestration, paradigm://guidance/flows, and paradigm://guidance/workspaces",
3128
- "C": "Move everything to guidance resources and leave CLAUDE.md empty",
3129
- "D": "Keep orchestration inline because multi-agent work is common; move only workspaces",
3130
- "E": "Split each section in half — keep summaries inline, details in guidance"
3131
- },
3132
- "correct": "B",
3133
- "explanation": "The core symbol system (# $ ^ ! ~) and project conventions are needed in every session — they stay inline. Orchestration, flow-first development, and workspace configuration are situational: you only need orchestration when running multi-agent tasks, flows when documenting a complex process, and workspaces when working across projects. These become on-demand resources. The CLAUDE.md even includes a resource table mapping topics to URIs so agents know what is available without loading the full content."
3134
- }
3135
- ]
3136
- },
3137
- {
3138
- "type": "standalone",
3139
- "slot": "slot-120",
3140
- "course": "para-601",
3141
- "variants": [
3142
- {
3143
- "id": "plsat-120",
3144
- "scenario": "A builder agent modifies `src/auth/jwt-verify.ts` and adds a new gate `^token-valid`. The event stream emits: type: 'gate-added', source: 'post-write-hook', symbols: ['^token-valid', '#jwt-verify'], path: 'src/auth/jwt-verify.ts', keywords: ['JWT', 'validation', 'gate', 'authentication']. A security agent has attention: symbols: ['^*', '#*-auth', '#*-middleware'], concepts: ['permission', 'JWT', 'RBAC'], signals: [{ type: 'gate-added' }], threshold: 0.4.",
3145
- "question": "What urgency level should the security agent's nomination use?",
3146
- "choices": {
3147
- "A": "Low — gate additions are routine maintenance",
3148
- "B": "Medium — any security-related change warrants moderate urgency",
3149
- "C": "High or Critical — a new gate in the auth layer is a security-sensitive change; the agent's nomination.speak_when.urgency likely includes 'gate_missing' and 'security_risk', and the signal match on 'gate-added' directly triggers the security review pattern",
3150
- "D": "The urgency is always 'medium' unless the event severity is 'critical'",
3151
- "E": "No nomination — the security agent only reviews when asked directly"
3152
- },
3153
- "correct": "C",
3154
- "explanation": "The security agent's attention fires on three dimensions simultaneously: symbolMatch ('^token-valid' matches '^*'), conceptMatch ('JWT' and 'authentication' match concepts), and signalMatch ('gate-added' matches a signal). With a threshold of 0.4 and a score of 1.0, the agent clearly self-nominates. For urgency, gate additions in authentication code are security-sensitive changes — the nomination.speak_when.urgency array typically includes 'gate_missing' and 'security_risk'. A new gate needs review to verify it is correctly implemented and not bypassed."
3155
- },
3156
- {
3157
- "id": "plsat-120b",
3158
- "scenario": "A builder agent updates a CSS file: type: 'file-modified', path: 'src/styles/button.css', symbols: ['#button-styles'], keywords: ['padding', 'border-radius', 'color']. A security agent has attention: symbols: ['^*', '#*-auth'], paths: ['auth/**', 'middleware/**'], concepts: ['permission', 'JWT'], signals: [{ type: 'gate-added' }], threshold: 0.4.",
3159
- "question": "What happens with the security agent's nomination for this event?",
3160
- "choices": {
3161
- "A": "The security agent nominates with urgency 'low' — all changes get reviewed",
3162
- "B": "The security agent does not nominate — score is 0 across all dimensions (no symbol match, path is not auth/middleware, no concept overlap, no signal match), and quietReason is 'below-threshold'",
3163
- "C": "The security agent nominates because the low threshold (0.4) catches most events",
3164
- "D": "The security agent nominates with urgency 'medium' — CSS could affect security UI",
3165
- "E": "The security agent defers — it waits to see if other agents nominate first"
3166
- },
3167
- "correct": "B",
3168
- "explanation": "The security agent's attention patterns produce zero matches: '#button-styles' does not match '^*' or '#*-auth'; 'src/styles/button.css' does not match 'auth/**' or 'middleware/**'; ['padding', 'border-radius', 'color'] have no overlap with ['permission', 'JWT']; and event type 'file-modified' is not 'gate-added'. The score is max(0, 0, 0, 0) = 0, far below the 0.4 threshold. The agent stays quiet with quietReason: 'below-threshold'. A low threshold does not mean the agent nominates for everything — it means the agent is more sensitive to weak matches in its attention domain."
3169
- }
3170
- ]
3171
- },
3172
- {
3173
- "type": "standalone",
3174
- "slot": "slot-121",
3175
- "course": "para-601",
3176
- "variants": [
3177
- {
3178
- "id": "plsat-121",
3179
- "scenario": "A security agent nominates with urgency 'critical' after detecting a missing gate on a payment endpoint. At the same time, a reviewer agent nominates with urgency 'low' about a variable naming convention issue in the same file. Both nominations target overlapping symbols.",
3180
- "question": "How should the urgency levels affect surfacing to the human?",
3181
- "choices": {
3182
- "A": "Both are shown equally — urgency is just metadata with no behavioral effect",
3183
- "B": "Only the critical nomination is shown — low urgency nominations are always suppressed",
3184
- "C": "The critical nomination is surfaced immediately; the low urgency nomination may be batched or deferred based on the SurfacingConfig's min_urgency setting for the reviewer agent",
3185
- "D": "They are merged into a single nomination with urgency 'high' (the average)",
3186
- "E": "The reviewer's nomination is upgraded to 'critical' because it touches the same symbols"
3187
- },
3188
- "correct": "C",
3189
- "explanation": "SurfacingConfig controls how nominations reach the human. Each agent can have a min_urgency setting — if the reviewer's SurfacingPreference has min_urgency: 'medium', the 'low' urgency naming convention issue would be batched or suppressed until the human is less busy. The security agent's 'critical' nomination exceeds any reasonable min_urgency threshold and is surfaced immediately. Nominations are not merged or averaged — they are independent contributions that may be grouped as a 'complementary' Debate if they touch overlapping symbols."
3190
- },
3191
- {
3192
- "id": "plsat-121b",
3193
- "scenario": "An event fires: type: 'route-created' for a new `/api/admin/users` endpoint. The security agent nominates with urgency 'high' (missing gate review). The architect agent nominates with urgency 'medium' (suggesting a different URL structure). The builder agent scores below threshold and stays quiet.",
3194
- "question": "What nomination urgency behavior is correct here?",
3195
- "choices": {
3196
- "A": "The architect's nomination is suppressed because the security agent already nominated",
3197
- "B": "Both nominations surface: security at 'high' urgency (shows first) and architect at 'medium' urgency. They may be grouped into a Debate because they target the same route symbol. The builder's quietReason is 'below-threshold'",
3198
- "C": "All three agents must nominate — staying quiet is not an option",
3199
- "D": "The urgency levels are recalculated based on the number of agents that nominate",
3200
- "E": "Only the highest urgency nomination surfaces; others are queued for later"
3201
- },
3202
- "correct": "B",
3203
- "explanation": "Each agent independently scores the event and decides whether to nominate. The security and architect agents exceeded their thresholds and nominated with different urgency levels. Both are surfaced (high shows before medium in priority order). Because they target the same route/symbols, the system may group them as a Debate (type: 'complementary' since they address different concerns). The builder agent scored below its threshold and stays quiet with quietReason: 'below-threshold' — this is correct behavior, not a failure."
3204
- }
3205
- ]
3206
- },
3207
- {
3208
- "type": "standalone",
3209
- "slot": "slot-122",
3210
- "course": "para-601",
3211
- "variants": [
3212
- {
3213
- "id": "plsat-122",
3214
- "scenario": "A project's data policy has: upstream ring: 'creator-upstream', allowed: ['task_type', 'outcome', 'helpfulness', 'duration_bucket', 'error_category'], denied: ['code_of_any_kind', 'file_paths', 'symbol_names', 'conversation_content', 'user_identity']. An agent creator's analytics dashboard requests feedback data.",
3215
- "question": "Which data reaches the agent creator?",
3216
- "choices": {
3217
- "A": "Everything the agent produced — the creator needs full visibility to improve the agent",
3218
- "B": "Only the allowed fields: task_type ('feature implementation'), outcome ('pass'), helpfulness ('high'), duration_bucket ('30-60min'), error_category (null). No code, file paths, symbol names, conversation content, or user identity is transmitted",
3219
- "C": "A summary of the agent's work log entries",
3220
- "D": "Nothing — the 'denied' list overrides the 'allowed' list entirely",
3221
- "E": "Only 'outcome' and 'helpfulness' — those are the minimum required fields"
3222
- },
3223
- "correct": "B",
3224
- "explanation": "The upstream rules at Ring 3 (creator-upstream) define exactly what flows to agent creators. The 'allowed' list enumerates the specific fields that can be transmitted: task_type, outcome, helpfulness, duration_bucket, and error_category. The 'denied' list explicitly blocks code_of_any_kind, file_paths, symbol_names, conversation_content, and user_identity. These two lists work together — only allowed fields pass, and denied fields are hard-blocked even if they would otherwise be inferred. The creator sees anonymized quality metrics, never the user's actual code or identity."
3225
- },
3226
- {
3227
- "id": "plsat-122b",
3228
- "scenario": "A team decision entry contains: decision: 'Use PostgreSQL with pgvector for embeddings', rationale: 'Lower latency than Pinecone for our dataset size', symbols_affected: ['#embedding-store', '#search-service']. The data policy has team_decisions ring: 'project-locked', deny_content: ['implementation_details']. An agent from another user project requests this data.",
3229
- "question": "What happens when the cross-project request arrives?",
3230
- "choices": {
3231
- "A": "The decision is shared — team decisions are public knowledge",
3232
- "B": "Only the rationale is shared — the decision text contains implementation details",
3233
- "C": "The request is blocked entirely — team decisions are in Ring 1 (project-locked), which means they never leave the project regardless of what the requesting agent needs",
3234
- "D": "The decision title is shared but the rationale is redacted",
3235
- "E": "The data is shared if the other project is in the same workspace"
3236
- },
3237
- "correct": "C",
3238
- "explanation": "Team decisions default to Ring 1 (project-locked). The trust ring system is the primary enforcement boundary — data classified in Ring 1 never leaves the project, period. The 'cross-project-transfer' enforcement boundary checks the ring before any content filtering. Even though the deny_content only blocks 'implementation_details', the ring restriction prevents the entire entry from being transmitted. Workspaces do not override ring restrictions — they enable symbol awareness across projects, not data sharing."
3239
- }
3240
- ]
3241
- },
3242
- {
3243
- "type": "standalone",
3244
- "slot": "slot-123",
3245
- "course": "para-601",
3246
- "variants": [
3247
- {
3248
- "id": "plsat-123",
3249
- "scenario": "The data policy's default ring is 'project-locked'. The observation rules are: allow: ['src/**', '.paradigm/**', 'portal.yaml'], deny: ['.env*', '**/*.key', '**/*.pem', '**/secrets/**']. A builder agent tries to read '.env.production' to check a database URL.",
3250
- "question": "What happens at the observation enforcement boundary?",
3251
- "choices": {
3252
- "A": "The read succeeds — '.env.production' matches 'src/**' because it starts with a dot",
3253
- "B": "The read is blocked — '.env.production' matches the deny pattern '.env*', and deny overrides allow. The agent is prevented from observing the file contents",
3254
- "C": "The read succeeds but the content is redacted — only the file name is returned",
3255
- "D": "The read is blocked only if the file contains actual secrets; the policy checks content",
3256
- "E": "The read succeeds because the default ring is 'project-locked', which means all project files are accessible"
3257
- },
3258
- "correct": "B",
3259
- "explanation": "The observation rules control what agents can see. The deny list takes precedence over the allow list — '.env.production' matches the deny glob '.env*', so the read is blocked at the 'event-emission' enforcement boundary. This is a hard deny, not a content-aware filter. The default ring being 'project-locked' means data stays within the project, but observation deny patterns prevent agents from accessing sensitive files regardless of ring level. The agent would need the deny pattern removed from the data policy to read this file."
3260
- },
3261
- {
3262
- "id": "plsat-123b",
3263
- "scenario": "The observation rules allow: ['src/**', '.paradigm/**'], deny: ['**/*.key', '**/*.pem', '**/secrets/**']. A security agent wants to audit the file 'src/config/secrets/api-keys.json' to check for hardcoded credentials.",
3264
- "question": "What does the data policy enforce?",
3265
- "choices": {
3266
- "A": "The read is allowed — 'src/**' matches and the security agent has special override privileges",
3267
- "B": "The read is allowed — 'src/config/secrets/api-keys.json' matches 'src/**' in allow",
3268
- "C": "The read is blocked — 'src/config/secrets/api-keys.json' matches '**/secrets/**' in deny, which overrides the 'src/**' allow pattern",
3269
- "D": "The read is partially allowed — the file name is visible but contents are redacted",
3270
- "E": "The read depends on the agent's permissions.paths.read setting, not the data policy"
3271
- },
3272
- "correct": "C",
3273
- "explanation": "Deny patterns override allow patterns in observation rules. While 'src/config/secrets/api-keys.json' does match 'src/**' in the allow list, it also matches '**/secrets/**' in the deny list. The deny takes precedence. This is a deliberate security design — even well-intentioned audit access to secrets directories is blocked. If the security agent needs to verify no hardcoded secrets exist, it would need a different approach (e.g., a human-run audit) or a per-agent override in agent_overrides that explicitly allows that path."
3274
- }
3275
- ]
3276
- },
3277
- {
3278
- "type": "standalone",
3279
- "slot": "slot-124",
3280
- "course": "para-601",
3281
- "variants": [
3282
- {
3283
- "id": "plsat-124",
3284
- "scenario": "An event is emitted by the post-write hook after a file save. You need to understand the anatomy of the StreamEvent object.",
3285
- "question": "Which fields are ALWAYS present on every StreamEvent, regardless of the event type or source?",
3286
- "choices": {
3287
- "A": "id, type, source, timestamp, path, symbols, agent",
3288
- "B": "id, type, source, timestamp — these four are required. Fields like path, symbols, keywords, context, agent, tool, severity, and data are all optional and depend on the event type",
3289
- "C": "id, timestamp, and agent — the agent is always set because events come from agents",
3290
- "D": "All fields are always present — optional fields default to empty arrays or null",
3291
- "E": "type and timestamp only — the id is generated lazily when the event is queried"
3292
- },
3293
- "correct": "B",
3294
- "explanation": "A StreamEvent has four required fields: id (auto-generated like 'ev-1711000000000-0042'), type (an EventType like 'file-modified', 'gate-added', 'error-encountered'), source (an EventSource like 'post-write-hook', 'mcp-tool-call', 'conversation'), and timestamp (ISO 8601). All other fields are optional: path is present for file events, symbols for Paradigm-aware events, agent for agent-originated events, tool for MCP tool calls, severity for compliance/error events, and data for arbitrary structured metadata."
3295
- },
3296
- {
3297
- "id": "plsat-124b",
3298
- "scenario": "You are debugging an event that arrived in the stream: { id: 'ev-1711036800000-0317', type: 'gate-checked', source: 'mcp-tool-call', timestamp: '2026-03-21T12:00:00Z', symbols: ['^admin-only'], tool: 'paradigm_gates_for_route', context: 'Checking gates for /api/admin/config' }.",
3299
- "question": "Which fields on this event are the optional ones (not present on every event)?",
3300
- "choices": {
3301
- "A": "All fields shown are required — this is the minimum event structure",
3302
- "B": "Only 'context' is optional — everything else is always present",
3303
- "C": "symbols, tool, and context are the optional fields. id, type, source, and timestamp are the four required fields present on every event",
3304
- "D": "id is optional — some events are anonymous",
3305
- "E": "source and tool are both optional — they are redundant"
3306
- },
3307
- "correct": "C",
3308
- "explanation": "The four required fields are id, type, source, and timestamp. In this event, symbols (['^admin-only']), tool ('paradigm_gates_for_route'), and context ('Checking gates for /api/admin/config') are all optional fields that happen to be populated. Other events might lack these — for example, a 'file-modified' event from 'post-write-hook' would have path instead of tool, and might not have symbols at all if the file is not covered by a .purpose file."
3309
- }
3310
- ]
3311
- },
3312
- {
3313
- "type": "standalone",
3314
- "slot": "slot-125",
3315
- "course": "para-601",
3316
- "variants": [
3317
- {
3318
- "id": "plsat-125",
3319
- "scenario": "A builder agent starts a new session on project 'acme-api'. You call `paradigm_context_compose` with agent: 'builder', symbols: ['#api-gateway', '#rate-limiter'], include_nominations: true, include_decisions: true, include_journal: true.",
3320
- "question": "What sections does the composed context include?",
3321
- "choices": {
3322
- "A": "Only the agent's profile — context compose just returns the .agent file",
3323
- "B": "The full CLAUDE.md, all .purpose files, and the complete event stream",
3324
- "C": "Profile enrichment (personality, relevant expertise for the given symbols, transferable patterns), recent active team decisions, transferable journal entries for this agent, and pending nominations — four distinct sections composed into a markdown block",
3325
- "D": "Only nominations and decisions — the profile is loaded separately",
3326
- "E": "The agent's attention patterns and learning configuration — context compose is about ambient setup"
3327
- },
3328
- "correct": "C",
3329
- "explanation": "paradigm_context_compose builds a session context from four sources: (1) Profile enrichment — the agent's personality, expertise entries matching the given symbols, and transferable patterns; (2) Recent team decisions with status 'active' (up to max_decisions, default 5); (3) Transferable journal entries for this agent (insights that apply across projects, up to max_journal, default 5); (4) Pending nominations that haven't been surfaced yet. Each section can be toggled via include_* flags. The result is a markdown string for prompt injection, not raw data."
3330
- },
3331
- {
3332
- "id": "plsat-125b",
3333
- "scenario": "A security agent calls `paradigm_context_compose` with agent: 'security', include_decisions: true, include_journal: true, include_nominations: false, max_decisions: 3, max_journal: 10.",
3334
- "question": "What does the composed context contain?",
3335
- "choices": {
3336
- "A": "All 4 sections are always included — the include_* flags are just suggestions",
3337
- "B": "Profile enrichment for the security agent, up to 3 recent active team decisions, up to 10 transferable journal entries, and NO nominations section (because include_nominations is false)",
3338
- "C": "Only the journal entries — security agents do not get profile enrichment",
3339
- "D": "The full event stream filtered to security-relevant events",
3340
- "E": "Profile, decisions, and journal — but max_journal caps at 5 regardless of the parameter"
3341
- },
3342
- "correct": "B",
3343
- "explanation": "The include_* flags control which sections appear in the composed context. With include_nominations: false, that section is skipped entirely. Profile enrichment is always included (it is the base). max_decisions: 3 limits team decisions to the 3 most recent active ones (instead of the default 5). max_journal: 10 allows up to 10 transferable journal entries (overriding the default 5). The result is a markdown context block with three sections: profile enrichment, decisions, and journal — ready for prompt injection."
3344
- }
3345
- ]
3346
- },
3347
- {
3348
- "type": "standalone",
3349
- "slot": "slot-126",
3350
- "course": "para-601",
3351
- "variants": [
3352
- {
3353
- "id": "plsat-126",
3354
- "scenario": "The data policy network rules are: ring: 'network-public', opt_in: false, if_opted_in: ['aggregated_task_success_rates', 'anonymized_pattern_frequency']. A network aggregation service requests data from this project.",
3355
- "question": "What data flows to the network?",
3356
- "choices": {
3357
- "A": "aggregated_task_success_rates and anonymized_pattern_frequency — the if_opted_in list defines what is shared",
3358
- "B": "Nothing — opt_in is false, so no data reaches Ring 4 (network-public) regardless of the if_opted_in list. The user must explicitly set opt_in: true before any data flows",
3359
- "C": "Only anonymized_pattern_frequency — success rates could identify the project",
3360
- "D": "Aggregated statistics are always shared — opt_in only controls detailed data",
3361
- "E": "The data is shared but with a 30-day delay for privacy"
3362
- },
3363
- "correct": "B",
3364
- "explanation": "The network rules require explicit opt-in. With opt_in: false, the 'network-aggregation' enforcement boundary blocks ALL data from reaching Ring 4 (network-public). The if_opted_in list only takes effect when opt_in is true — it defines which specific metrics would be shared if the user chooses to participate. This is a deliberate design: no data flows to the network by default. The user must consciously enable it, and even then, only the listed metric types (aggregated task success rates and anonymized pattern frequency) are transmitted."
3365
- },
3366
- {
3367
- "id": "plsat-126b",
3368
- "scenario": "An upstream rule has: ring: 'creator-upstream', allowed: ['task_type', 'outcome'], denied: ['code_of_any_kind', 'file_paths', 'symbol_names', 'conversation_content', 'user_identity']. The agent generates a work log entry that mentions file paths and symbol names in the summary. This entry is allowed at Ring 1. Now the upstream boundary is checked.",
3369
- "question": "What reaches the agent creator?",
3370
- "choices": {
3371
- "A": "The full work log entry — it was allowed at Ring 1 so it passes all rings",
3372
- "B": "Only the task_type and outcome fields. Even though the summary mentions file paths and symbols, the upstream boundary enforces the denied list. File paths, symbol names, and any code are stripped. Only the enumerated allowed fields pass through to Ring 3",
3373
- "C": "The summary with file paths and symbol names redacted inline",
3374
- "D": "Nothing — the denied list cancels out the allowed list",
3375
- "E": "The work log ID and timestamp only — those are always transmitted"
3376
- },
3377
- "correct": "B",
3378
- "explanation": "The upstream enforcement boundary operates on specific fields, not on the text content of entries. The 'allowed' list names exactly which fields pass: task_type and outcome. The 'denied' list provides an additional hard block on categories like file_paths, symbol_names, and code_of_any_kind. Data that was allowed at Ring 1 (project-locked) does not automatically flow to Ring 3 (creator-upstream) — each ring's enforcement boundary re-evaluates what passes. The creator receives structured, enumerated fields only — never free-text summaries that might leak sensitive information."
3379
- }
3380
- ]
3381
- },
3382
- {
3383
- "type": "standalone",
3384
- "slot": "slot-127",
3385
- "course": "para-601",
3386
- "variants": [
3387
- {
3388
- "id": "plsat-127",
3389
- "scenario": "A new project is set up with Paradigm Ambient. The CLAUDE.md mentions a 'learning loop' where agents improve over time. A junior developer asks about the sequence of steps in the ambient learning loop.",
3390
- "question": "What is the correct order of the ambient learning loop?",
3391
- "choices": {
3392
- "A": "Observe → Learn → Act → Record",
3393
- "B": "Act → Record → Learn → Apply",
3394
- "C": "Event emitted → Attention scoring → Self-nomination → Surfacing → Human engagement → Feedback → Journal recording → Confidence adjustment → Pattern extraction → Context enrichment (next session)",
3395
- "D": "Record everything → Filter later → Show on demand",
3396
- "E": "Human assigns → Agent acts → Agent reports → Human reviews"
3397
- },
3398
- "correct": "C",
3399
- "explanation": "The ambient learning loop is a 10-step cycle: (1) An event is emitted (file save, tool call, error). (2) Each agent scores it against their attention patterns. (3) Agents that exceed their threshold self-nominate with a type and urgency. (4) Nominations are surfaced to the human based on SurfacingConfig. (5) The human engages (accepts, dismisses, defers). (6) The response becomes feedback. (7) The agent records insights in its Learning Journal. (8) Confidence scores are adjusted via exponential moving average. (9) Transferable patterns are extracted for cross-project use. (10) Next session, paradigm_context_compose injects the updated patterns and insights."
3400
- },
3401
- {
3402
- "id": "plsat-127b",
3403
- "scenario": "A team is evaluating whether Paradigm Ambient's learning loop actually improves agent performance over time. They want to understand the mechanism.",
3404
- "question": "Which sequence correctly describes how an agent's future behavior improves from a single learning event?",
3405
- "choices": {
3406
- "A": "The agent's code is retrained on the new data point",
3407
- "B": "The correction is stored but only used if the exact same scenario recurs",
3408
- "C": "Feedback → Journal entry (trigger + insight + confidence adjustment) → Pattern extraction (if transferable) → Notebook promotion (if auto-promote enabled) → Context compose injects the pattern in future sessions → Agent's expertise scores are updated",
3409
- "D": "The human writes a rule in the data policy and the agent follows it",
3410
- "E": "The agent's threshold is lowered so it speaks up more often"
3411
- },
3412
- "correct": "C",
3413
- "explanation": "Agent improvement follows a structured path: (1) Feedback from human engagement triggers a journal entry with the specific trigger type (correction_received, confidence_miss, etc.), the insight, and confidence adjustment (before/after). (2) If the learning is transferable, a pattern is extracted with applies_when and correct_approach. (3) If notebook_auto_promote is enabled in the agent's learning config, the pattern promotes to the agent's notebook. (4) In future sessions, paradigm_context_compose loads transferable journal entries and patterns into the agent's context. (5) The agent's expertise scores (EMA) are updated, shifting future attention scoring. The agent is not retrained — it improves through richer context injection."
3414
- }
3415
- ]
3416
- },
3417
- {
3418
- "type": "standalone",
3419
- "slot": "slot-128",
3420
- "course": "para-601",
3421
- "variants": [
3422
- {
3423
- "id": "plsat-128",
3424
- "scenario": "An architect agent has attention: symbols: ['$*', '#*'], concepts: ['architecture', 'design', 'pattern', 'refactor'], threshold: 0.5. An event arrives: type: 'concept-mentioned', source: 'conversation', keywords: ['refactor', 'extract', 'design pattern', 'strategy pattern'], context: 'Discussing whether to refactor the notification system using the strategy pattern.'",
3425
- "question": "What is the architect's conceptMatch score, and does it self-nominate?",
3426
- "choices": {
3427
- "A": "conceptMatch = 0.25 (1 of 4 concepts), score = 0.25, does not nominate (below 0.5)",
3428
- "B": "conceptMatch = 1.0 — any concept match gives full score",
3429
- "C": "conceptMatch = 0.75 — 'architecture' does not appear but 'design', 'pattern', and 'refactor' all match, giving 3 of 4 concepts matched. Score = max(0, 0, 0.75, 0) = 0.75, which exceeds 0.5, so the agent self-nominates",
3430
- "D": "conceptMatch = 0.5 — only exact keyword matches count, and only 'refactor' and 'pattern' are exact matches (2 of 4)",
3431
- "E": "The architect does not nominate because 'concept-mentioned' is not in its signals list"
3432
- },
3433
- "correct": "C",
3434
- "explanation": "Concept matching joins the event's context, keywords, and type into a single lowercase string, then checks how many of the agent's concepts appear. The combined text includes 'refactor', 'extract', 'design pattern', 'strategy pattern', 'concept-mentioned'. Against the agent's concepts ['architecture', 'design', 'pattern', 'refactor']: 'design' matches, 'pattern' matches, 'refactor' matches. 'architecture' does not appear. That is 3 out of 4 = 0.75. The final score is max(symbolMatch, pathMatch, conceptMatch, signalMatch). With no symbols or path on this event, symbolMatch and pathMatch are 0. signalMatch is 0 (no signal patterns configured). Score = 0.75, above the 0.5 threshold. The architect self-nominates."
3435
- },
3436
- {
3437
- "id": "plsat-128b",
3438
- "scenario": "A security agent has attention: symbols: ['^*', '#*-auth', '#*-middleware'], paths: ['auth/**', 'middleware/**'], concepts: ['permission', 'JWT', 'RBAC', 'XSS', 'injection'], signals: [{ type: 'gate-added' }, { type: 'route-created' }], threshold: 0.4. An event arrives: type: 'route-created', source: 'post-write-hook', path: 'src/routes/admin.ts', symbols: ['#admin-routes'], keywords: ['new endpoint', 'admin panel', 'user management'].",
3439
- "question": "What is the security agent's overall score and should it nominate?",
3440
- "choices": {
3441
- "A": "Score = 0 — '#admin-routes' does not match any symbol pattern",
3442
- "B": "Score = 0.4 — only the path partially matches, barely meeting threshold",
3443
- "C": "Score = 1.0 — signalMatch is 1.0 because 'route-created' matches signals. symbolMatch is 0 ('#admin-routes' does not match '^*', '#*-auth', or '#*-middleware'). pathMatch is 0 ('src/routes/admin.ts' does not match 'auth/**' or 'middleware/**'). The final score is max(0, 0, 0, 1.0) = 1.0, well above the 0.4 threshold",
3444
- "D": "Score = 0.2 — partial matches across dimensions are averaged",
3445
- "E": "Score = 1.0 — all four dimensions fire because admin is security-related"
3446
- },
3447
- "correct": "C",
3448
- "explanation": "The scoring uses max() across four dimensions, not an average. symbolMatch: '#admin-routes' does not end with '-auth' or '-middleware' and does not start with '^', so 0. pathMatch: 'src/routes/admin.ts' does not match 'auth/**' or 'middleware/**', so 0. conceptMatch: keywords ['new endpoint', 'admin panel', 'user management'] do not contain 'permission', 'JWT', 'RBAC', 'XSS', or 'injection', so 0. signalMatch: the event type 'route-created' matches the signal { type: 'route-created' }, so 1.0. Final score: max(0, 0, 0, 1.0) = 1.0. The security agent self-nominates — new route creation is exactly the kind of event that warrants security review."
3449
- }
3450
- ]
3451
- }
3452
- ]
3453
- }