@a-company/paradigm 5.38.0 → 6.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{accept-orchestration-OATWIRHP.js → accept-orchestration-QQISPINV.js} +1 -1
- package/dist/add-UOR4INIV.js +8 -0
- package/dist/{agent-loader-RIVI6QPP.js → agent-loader-2WJHD46U.js} +1 -1
- package/dist/{agent-loader-RJRVO5GQ.js → agent-loader-YKS2PQWO.js} +1 -1
- package/dist/{ambient-76YMUA5Q.js → ambient-BE3SQXNN.js} +1 -1
- package/dist/{ambient-WTLYUAQM.js → ambient-NVKQCW2A.js} +12 -12
- package/dist/{assess-UFPYEJKP.js → assess-63WXHWJV.js} +1 -1
- package/dist/{calibration-OLJYB5HN.js → calibration-BDHGYJOK.js} +1 -1
- package/dist/{chunk-5QOCKWK5.js → chunk-4PSD5R7N.js} +2 -2
- package/dist/{chunk-HOBHJPTL.js → chunk-6SKSV5B2.js} +1 -1
- package/dist/{chunk-4L7665QV.js → chunk-FEYOQMZ5.js} +1 -1
- package/dist/{chunk-NEJ4ZLCY.js → chunk-GAFKOFAV.js} +1 -1
- package/dist/chunk-GRZQIKST.js +2 -0
- package/dist/{chunk-RLCH7DXQ.js → chunk-K7X3Z3GL.js} +1 -1
- package/dist/{chunk-4VKSEOXZ.js → chunk-LPBCQM5Y.js} +3 -3
- package/dist/{chunk-74SGKSRQ.js → chunk-M2HKWR25.js} +1 -1
- package/dist/{chunk-BOYQAMGC.js → chunk-M3PPXJU4.js} +1 -1
- package/dist/chunk-PHEX6LU4.js +111 -0
- package/dist/chunk-Q527BPUF.js +2 -0
- package/dist/chunk-R5ECMBIV.js +11 -0
- package/dist/{chunk-X3U3IGYT.js → chunk-TBWWFRL5.js} +1 -1
- package/dist/{chunk-MQIG6SMF.js → chunk-TNVWGPCE.js} +1 -1
- package/dist/chunk-TZDYIPVU.js +521 -0
- package/dist/{chunk-3XGNXXCT.js → chunk-UZ5H7K6Q.js} +1 -1
- package/dist/chunk-VIG5LSGZ.js +2 -0
- package/dist/chunk-VNIX5KBT.js +3 -0
- package/dist/{chunk-AGFPVSX5.js → chunk-VXIIVMTM.js} +1 -1
- package/dist/{chunk-ORDKEGII.js → chunk-WESTEMIM.js} +1 -1
- package/dist/{chunk-DOCDDDTD.js → chunk-YNDPSWOE.js} +5 -5
- package/dist/chunk-Z5QW6USC.js +2 -0
- package/dist/{compliance-D7GD6ZYC.js → compliance-BNFWQPKM.js} +1 -1
- package/dist/config-schema-FLHRVZMI.js +2 -0
- package/dist/{context-audit-XRPT3OU2.js → context-audit-JVCA6GSV.js} +1 -1
- package/dist/{cursorrules-U5O4G5T4.js → cursorrules-ZXPXPZ3P.js} +1 -1
- package/dist/decision-loader-HELL2AMX.js +2 -0
- package/dist/{delete-P5VULXR4.js → delete-2C6ALLYY.js} +1 -1
- package/dist/{diff-YGHBIJY5.js → diff-MF55KQZH.js} +1 -1
- package/dist/{dist-KGRCLBJP-2QAPFYNF.js → dist-GQ42YS5N-4HIJZVBB.js} +10 -10
- package/dist/{docs-USDAF26F.js → docs-O37YLLRN.js} +1 -1
- package/dist/doctor-IG5XM4C4.js +2 -0
- package/dist/{edit-GUU3HBVW.js → edit-P3MDAZLU.js} +1 -1
- package/dist/{flow-FVZR3YJ4.js → flow-BGXOVE2V.js} +1 -1
- package/dist/index.js +6 -6
- package/dist/init-M44SO65G.js +2 -0
- package/dist/{init-XYB62Q3X.js → init-V4KSEKPK.js} +1 -1
- package/dist/{list-YKIQNKGB.js → list-2XIWUEMA.js} +1 -1
- package/dist/list-CFHINXIS.js +12 -0
- package/dist/lore-loader-D2ISOASW.js +2 -0
- package/dist/lore-loader-PXFKMKAN.js +2 -0
- package/dist/mcp.js +4 -4
- package/dist/metrics-UESGUHTA.js +2 -0
- package/dist/migrate-assessments-YSITX7KM.js +4 -0
- package/dist/migrate-decisions-NPLQOEEH.js +6 -0
- package/dist/migrate-plsat-EM2ACIQ3.js +6 -0
- package/dist/{nomination-engine-EALA5MGI.js → nomination-engine-QPZJH6XO.js} +1 -1
- package/dist/{notebook-loader-PXNRBBXD.js → notebook-loader-3J2OFMS3.js} +1 -1
- package/dist/{orchestrate-M5PBZBJQ.js → orchestrate-RID7HHHH.js} +1 -1
- package/dist/{platform-server-DNAMH4YI.js → platform-server-UD45NTGV.js} +1 -1
- package/dist/{portal-check-ZMLVBIGW.js → portal-check-DV2VSJ5E.js} +1 -1
- package/dist/portal-compliance-JONQ4SOP.js +2 -0
- package/dist/{probe-3FTG6LYO.js → probe-5HAXULAD.js} +1 -1
- package/dist/{providers-AWA7WLLM.js → providers-4PXMWA7V.js} +1 -1
- package/dist/quiz-WYIZJG5K.js +10 -0
- package/dist/{record-YXPB34MY.js → record-N3VNYYKJ.js} +1 -1
- package/dist/reindex-FWPD2VGM.js +2 -0
- package/dist/{retag-N5XF3KXP.js → retag-72R2OSZV.js} +1 -1
- package/dist/{review-77QI6VOC.js → review-2INNWLTW.js} +1 -1
- package/dist/{sentinel-HYAZ3CO5.js → sentinel-EFPEX246.js} +1 -1
- package/dist/{sentinel-bridge-VR357PKL.js → sentinel-bridge-UR2MKARY.js} +1 -1
- package/dist/{serve-U47GULB6.js → serve-MO35XIZE.js} +1 -1
- package/dist/serve-OQYUO7CR.js +12 -0
- package/dist/{server-4YNUIK4W.js → server-4D77LCST.js} +1 -1
- package/dist/server-FGUL2FWQ.js +7 -0
- package/dist/session-tracker-KGORN6B5.js +2 -0
- package/dist/{session-work-log-PAKXOFGL.js → session-work-log-4IEVE4KK.js} +1 -1
- package/dist/{session-work-log-ZP45TREI.js → session-work-log-EE4UIZ33.js} +1 -1
- package/dist/{setup-FEWSYS3Y.js → setup-ZSEC72BS.js} +1 -1
- package/dist/{shift-PC6C7NUX.js → shift-TVNY2CQF.js} +6 -6
- package/dist/{show-PJ5LFLIL.js → show-JH7LJ5MT.js} +1 -1
- package/dist/show-WVHAL4VU.js +7 -0
- package/dist/{spawn-M5BAV252.js → spawn-UH5RENSE.js} +1 -1
- package/dist/status-S7Z5FVIE.js +6 -0
- package/dist/{summary-PYTEIJ4U.js → summary-WLI3NF4G.js} +2 -2
- package/dist/{sweep-HU74OPVW.js → sweep-7TZFN5NS.js} +1 -1
- package/dist/sync-55U6QPIA.js +2 -0
- package/dist/{sync-llms-7CAI74QL.js → sync-llms-GF7DDQDI.js} +1 -1
- package/dist/{team-PDK64JXI.js → team-MGT66HZQ.js} +1 -1
- package/dist/{timeline-K3ZFKJ3R.js → timeline-RK7O2SCM.js} +1 -1
- package/dist/tools-QJHAVYI6.js +2 -0
- package/dist/university-content/notes/N-para-001-build-something.md +126 -0
- package/dist/university-content/notes/N-para-001-meet-the-team.md +85 -0
- package/dist/university-content/notes/N-para-001-shift-setup.md +74 -0
- package/dist/university-content/notes/N-para-101-component-types.md +99 -0
- package/dist/university-content/notes/N-para-101-first-steps.md +134 -0
- package/dist/university-content/notes/N-para-101-five-symbols.md +128 -0
- package/dist/university-content/notes/N-para-101-paradigm-logger.md +89 -0
- package/dist/university-content/notes/N-para-101-portal-yaml.md +112 -0
- package/dist/university-content/notes/N-para-101-project-structure.md +143 -0
- package/dist/university-content/notes/N-para-101-purpose-files.md +121 -0
- package/dist/university-content/notes/N-para-101-tags-and-classification.md +93 -0
- package/dist/university-content/notes/N-para-101-welcome.md +51 -0
- package/dist/university-content/notes/N-para-201-architecture-review.md +175 -0
- package/dist/university-content/notes/N-para-201-aspect-graph.md +79 -0
- package/dist/university-content/notes/N-para-201-aspects-and-anchors.md +112 -0
- package/dist/university-content/notes/N-para-201-component-patterns.md +138 -0
- package/dist/university-content/notes/N-para-201-cross-cutting-concerns.md +145 -0
- package/dist/university-content/notes/N-para-201-disciplines.md +187 -0
- package/dist/university-content/notes/N-para-201-flows-deep-dive.md +119 -0
- package/dist/university-content/notes/N-para-201-gates-deep-dive.md +165 -0
- package/dist/university-content/notes/N-para-201-portal-protocol.md +133 -0
- package/dist/university-content/notes/N-para-201-signal-patterns.md +159 -0
- package/dist/university-content/notes/N-para-201-symbol-naming.md +149 -0
- package/dist/university-content/notes/N-para-301-context-management.md +53 -0
- package/dist/university-content/notes/N-para-301-decisions.md +99 -0
- package/dist/university-content/notes/N-para-301-doctor-and-validation.md +70 -0
- package/dist/university-content/notes/N-para-301-enforcement-levels.md +102 -0
- package/dist/university-content/notes/N-para-301-fragility-tracking.md +50 -0
- package/dist/university-content/notes/N-para-301-history-system.md +42 -0
- package/dist/university-content/notes/N-para-301-navigation-system.md +55 -0
- package/dist/university-content/notes/N-para-301-operations-review.md +55 -0
- package/dist/university-content/notes/N-para-301-paradigm-shift.md +93 -0
- package/dist/university-content/notes/N-para-301-protocols.md +113 -0
- package/dist/university-content/notes/N-para-301-ripple-analysis.md +53 -0
- package/dist/university-content/notes/N-para-301-sentinel-observability.md +87 -0
- package/dist/university-content/notes/N-para-301-sync-and-maintenance.md +57 -0
- package/dist/university-content/notes/N-para-301-wisdom-system.md +89 -0
- package/dist/university-content/notes/N-para-401-agent-identity.md +99 -0
- package/dist/university-content/notes/N-para-401-agent-interop.md +87 -0
- package/dist/university-content/notes/N-para-401-agent-roles.md +107 -0
- package/dist/university-content/notes/N-para-401-commit-conventions.md +82 -0
- package/dist/university-content/notes/N-para-401-mastery-review.md +71 -0
- package/dist/university-content/notes/N-para-401-mcp-tools-overview.md +102 -0
- package/dist/university-content/notes/N-para-401-multi-agent-coordination.md +80 -0
- package/dist/university-content/notes/N-para-401-notebooks-permissions.md +66 -0
- package/dist/university-content/notes/N-para-401-orchestration-workflow.md +101 -0
- package/dist/university-content/notes/N-para-401-pm-governance.md +71 -0
- package/dist/university-content/notes/N-para-401-provider-cascade.md +75 -0
- package/dist/university-content/notes/N-para-401-quick-check.md +95 -0
- package/dist/university-content/notes/N-para-501-advanced-workflows.md +122 -0
- package/dist/university-content/notes/N-para-501-aspect-graph-advanced.md +195 -0
- package/dist/university-content/notes/N-para-501-aspect-graph-internals.md +97 -0
- package/dist/university-content/notes/N-para-501-assessment-loops.md +116 -0
- package/dist/university-content/notes/N-para-501-conductor-workspace.md +77 -0
- package/dist/university-content/notes/N-para-501-habits-practice.md +164 -0
- package/dist/university-content/notes/N-para-501-hook-enforcement.md +100 -0
- package/dist/university-content/notes/N-para-501-lore-system.md +155 -0
- package/dist/university-content/notes/N-para-501-platform-agent-ui.md +108 -0
- package/dist/university-content/notes/N-para-501-review-compliance.md +72 -0
- package/dist/university-content/notes/N-para-501-sentinel-deep-dive.md +173 -0
- package/dist/university-content/notes/N-para-501-session-intelligence.md +104 -0
- package/dist/university-content/notes/N-para-501-symphony-a-mail.md +120 -0
- package/dist/university-content/notes/N-para-501-symphony-networking.md +119 -0
- package/dist/university-content/notes/N-para-501-task-management.md +100 -0
- package/dist/university-content/notes/N-para-601-agent-renaissance.md +121 -0
- package/dist/university-content/notes/N-para-601-attention-scoring.md +129 -0
- package/dist/university-content/notes/N-para-601-context-composition.md +146 -0
- package/dist/university-content/notes/N-para-601-data-sovereignty.md +140 -0
- package/dist/university-content/notes/N-para-601-event-stream.md +126 -0
- package/dist/university-content/notes/N-para-601-knowledge-streams.md +144 -0
- package/dist/university-content/notes/N-para-601-learning-loop.md +68 -0
- package/dist/university-content/notes/N-para-601-maestro-team-collab.md +136 -0
- package/dist/university-content/notes/N-para-601-nominations-debates.md +115 -0
- package/dist/university-content/notes/N-para-701-agent-notebooks.md +131 -0
- package/dist/university-content/notes/N-para-701-agent-pods-nevrland.md +182 -0
- package/dist/university-content/notes/N-para-701-agent-profiles.md +197 -0
- package/dist/university-content/notes/N-para-701-agent-roster.md +82 -0
- package/dist/university-content/notes/N-para-701-agent-state.md +180 -0
- package/dist/university-content/notes/N-para-701-learning-feedback-loop.md +188 -0
- package/dist/university-content/notes/N-para-701-model-tier-resolution.md +204 -0
- package/dist/university-content/notes/N-para-701-orchestration-enforcement.md +169 -0
- package/dist/university-content/notes/N-para-701-per-project-rosters.md +198 -0
- package/dist/university-content/notes/N-para-701-symphony-visibility.md +142 -0
- package/dist/university-content/paths/LP-para-001.yaml +29 -0
- package/dist/university-content/paths/LP-para-101.yaml +59 -0
- package/dist/university-content/paths/LP-para-201.yaml +69 -0
- package/dist/university-content/paths/LP-para-301.yaml +84 -0
- package/dist/university-content/paths/LP-para-401.yaml +74 -0
- package/dist/university-content/paths/LP-para-501.yaml +89 -0
- package/dist/university-content/paths/LP-para-601.yaml +59 -0
- package/dist/university-content/paths/LP-para-701.yaml +64 -0
- package/dist/university-content/quizzes/Q-para-001-build-something.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-001-meet-the-team.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-001-shift-setup.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-101-component-types.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-101-first-steps.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-five-symbols.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-101-paradigm-logger.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-portal-yaml.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-project-structure.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-101-purpose-files.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-tags-and-classification.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-welcome.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-architecture-review.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-201-aspect-graph.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-201-aspects-and-anchors.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-component-patterns.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-cross-cutting-concerns.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-disciplines.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-201-flows-deep-dive.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-201-gates-deep-dive.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-201-portal-protocol.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-signal-patterns.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-symbol-naming.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-301-context-management.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-decisions.yaml +76 -0
- package/dist/university-content/quizzes/Q-para-301-doctor-and-validation.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-301-enforcement-levels.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-fragility-tracking.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-history-system.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-navigation-system.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-operations-review.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-301-paradigm-shift.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-protocols.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-ripple-analysis.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-sentinel-observability.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-sync-and-maintenance.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-wisdom-system.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-401-agent-identity.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-agent-interop.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-401-agent-roles.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-401-commit-conventions.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-401-mastery-review.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-mcp-tools-overview.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-multi-agent-coordination.yaml +76 -0
- package/dist/university-content/quizzes/Q-para-401-notebooks-permissions.yaml +61 -0
- package/dist/university-content/quizzes/Q-para-401-orchestration-workflow.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-pm-governance.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-provider-cascade.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-401-quick-check.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-501-advanced-workflows.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-aspect-graph-advanced.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-aspect-graph-internals.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-assessment-loops.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-501-conductor-workspace.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-501-habits-practice.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-501-hook-enforcement.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-lore-system.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-platform-agent-ui.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-review-compliance.yaml +61 -0
- package/dist/university-content/quizzes/Q-para-501-sentinel-deep-dive.yaml +86 -0
- package/dist/university-content/quizzes/Q-para-501-session-intelligence.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-symphony-a-mail.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-symphony-networking.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-task-management.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-601-agent-renaissance.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-601-attention-scoring.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-601-context-composition.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-601-data-sovereignty.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-601-event-stream.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-601-knowledge-streams.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-601-learning-loop.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-601-maestro-team-collab.yaml +86 -0
- package/dist/university-content/quizzes/Q-para-601-nominations-debates.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-notebooks.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-pods-nevrland.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-profiles.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-roster.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-state.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-learning-feedback-loop.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-model-tier-resolution.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-orchestration-enforcement.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-per-project-rosters.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-symphony-visibility.yaml +66 -0
- package/dist/university-content/quizzes/Q-plsat-v2.yaml +904 -0
- package/dist/university-content/quizzes/Q-plsat-v3.yaml +2909 -0
- package/dist/university-content/reference.json +2 -2
- package/dist/university-ui/assets/{index-CecQrfSn.js → index-nNgzO1il.js} +2 -2
- package/dist/university-ui/assets/{index-CecQrfSn.js.map → index-nNgzO1il.js.map} +1 -1
- package/dist/university-ui/index.html +1 -1
- package/dist/{upgrade-GX56QE3C.js → upgrade-NKN63VTY.js} +2 -2
- package/dist/validate-XUQZTF3H.js +9 -0
- package/dist/{watch-YCODNIET.js → watch-25GJHQYT.js} +1 -1
- package/lore-ui/dist/assets/{index-Bk-K0qgN.js → index-DKhNxgtW.js} +10 -10
- package/lore-ui/dist/index.html +1 -1
- package/package.json +2 -2
- package/platform-ui/dist/assets/{AmbientSection-BYjt75R1.js → AmbientSection-CwatqcBD.js} +1 -1
- package/platform-ui/dist/assets/{CanvasSection-rKvA_vZj.js → CanvasSection-dFAthehN.js} +1 -1
- package/platform-ui/dist/assets/{DocsSection-CI9K73M-.js → DocsSection-BZ2SFJBZ.js} +1 -1
- package/platform-ui/dist/assets/{GitSection-DSGj_c6S.js → GitSection-MNNYU1tO.js} +1 -1
- package/platform-ui/dist/assets/{GraphSection-CawN7pC5.js → GraphSection-COYjb4Pt.js} +1 -1
- package/platform-ui/dist/assets/LoreSection-B0hUbfsJ.js +1 -0
- package/platform-ui/dist/assets/{SentinelSection-DNgoYMH0.js → SentinelSection-BCxW1DCp.js} +1 -1
- package/platform-ui/dist/assets/{SymphonySection-C0zfcqv3.js → SymphonySection-BsucZRqy.js} +1 -1
- package/platform-ui/dist/assets/{TeamSection-Bzd3Dt9Q.js → TeamSection-C0QNTudW.js} +1 -1
- package/platform-ui/dist/assets/{UniversitySection-tBr62R0S.js → UniversitySection-DN1-g9pw.js} +1 -1
- package/platform-ui/dist/assets/{index-BaOmyn11.js → index-DwUT8pju.js} +2 -2
- package/platform-ui/dist/index.html +1 -1
- package/dist/add-P76GEMGF.js +0 -8
- package/dist/chunk-JQKKVAAN.js +0 -2
- package/dist/chunk-NQ47TA6C.js +0 -111
- package/dist/chunk-ODVKPZZ4.js +0 -2
- package/dist/chunk-Q2J542ST.js +0 -2
- package/dist/chunk-RBLK34IA.js +0 -11
- package/dist/chunk-RN4VE6P3.js +0 -521
- package/dist/chunk-WS2N27RX.js +0 -3
- package/dist/config-schema-GUQY2QN7.js +0 -2
- package/dist/decision-loader-2XPZE4EZ.js +0 -2
- package/dist/doctor-WMVULMQD.js +0 -2
- package/dist/list-5IUGP3ZB.js +0 -7
- package/dist/lore-loader-RVQI5GXL.js +0 -2
- package/dist/lore-loader-XY5MZRR2.js +0 -2
- package/dist/migrate-assessments-GEI5WMI2.js +0 -4
- package/dist/portal-compliance-6YR27IQU.js +0 -2
- package/dist/quiz-FE5UGAY2.js +0 -10
- package/dist/reindex-I6LPAKCC.js +0 -2
- package/dist/serve-OY6XYL7F.js +0 -12
- package/dist/server-2MNROHF6.js +0 -7
- package/dist/session-tracker-MWJAJA6Z.js +0 -2
- package/dist/show-BOAVWZPZ.js +0 -7
- package/dist/status-A37ECYNJ.js +0 -6
- package/dist/sync-DLUBV5HQ.js +0 -2
- package/dist/tools-5ITPEPSV.js +0 -2
- package/dist/university-content/courses/.purpose +0 -492
- package/dist/university-content/courses/para-001.json +0 -166
- package/dist/university-content/courses/para-101.json +0 -615
- package/dist/university-content/courses/para-201.json +0 -794
- package/dist/university-content/courses/para-301.json +0 -830
- package/dist/university-content/courses/para-401.json +0 -868
- package/dist/university-content/courses/para-501.json +0 -1166
- package/dist/university-content/courses/para-601.json +0 -719
- package/dist/university-content/courses/para-701.json +0 -807
- package/dist/university-content/plsat/.purpose +0 -162
- package/dist/university-content/plsat/v2.0.json +0 -760
- package/dist/university-content/plsat/v3.0.json +0 -3453
- package/dist/validate-C6SMKGYD.js +0 -9
- package/platform-ui/dist/assets/LoreSection-oO5dCe6O.js +0 -1
- /package/dist/{chunk-BV5PRPLB.js → chunk-IZSBGW6E.js} +0 -0
- /package/templates/paradigm/specs/{scan.md → probe.md} +0 -0
|
@@ -1,719 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"id": "para-601",
|
|
3
|
-
"title": "PARA 601: Paradigm Ambient",
|
|
4
|
-
"description": "Master Paradigm's ambient coordination system \u2014 knowledge streams, event stream, attention scoring, nominations, data sovereignty, agent manifest renaissance, and context composition. The full loop from observation to adaptation.",
|
|
5
|
-
"lessons": [
|
|
6
|
-
{
|
|
7
|
-
"id": "learning-loop",
|
|
8
|
-
"title": "The Learning Loop",
|
|
9
|
-
"content": "## Why Observation Without Adaptation Is Waste\n\nMost development tools observe extensively but adapt almost never. Your linter sees thousands of issues. Your CI runs hundreds of tests. Your APM collects millions of metrics. All of this observation generates data \u2014 but data without a feedback loop is just noise with a storage bill.\n\nConsider what happens in a typical AI-assisted session today. An agent modifies 8 files, creates a new service, adds routes to portal.yaml, and records a lore entry. The session ends. A week later, a different agent picks up related work and makes the same architectural mistake the first agent corrected mid-session. Why? Because the correction was never captured in a form that feeds back into future sessions. The observation happened (the lore entry recorded what was done), but the adaptation never occurred (the learning was not injected into the next agent's context).\n\nThis is the gap that Paradigm v5.0 closes.\n\n## The DO-RECORD-ASSESS-LEARN-ADAPT-DO Cycle\n\nThe ambient coordination system implements a six-phase loop:\n\n**DO** \u2014 An agent performs work: modifying files, calling tools, making decisions. Every action produces events in the event stream.\n\n**RECORD** \u2014 The work is captured in three knowledge streams, each with a different audience and lifecycle. The work log records what got done (for the team). The learning journal records what the agent learned (for itself). Team decisions record what was decided and why (for the institution).\n\n**ASSESS** \u2014 Events flow through attention filters. Each agent scores every event against its attention patterns \u2014 symbol matches, path matches, concept matches, signal matches. Events that exceed an agent's threshold trigger the next phase.\n\n**LEARN** \u2014 Agents self-nominate contributions based on relevant events. A security agent notices a new route without a gate. A reviewer spots a pattern they have seen fail before. A tester sees a new component without test coverage. These nominations capture agent-specific insights triggered by project activity.\n\n**ADAPT** \u2014 Learnings feed back into context. Journal insights from past sessions appear in the next session's prompt enrichment. Recent team decisions are surfaced to agents working on related symbols. Pending nominations are included in the active agent's context. The `paradigm_context_compose` tool assembles all of this into a coherent context section.\n\n**DO** \u2014 The cycle repeats, but now the agent starts with richer context. It knows what was tried before, what the team decided, what the security agent flagged, and what patterns to avoid. Each iteration produces better outcomes because the loop is closed.\n\n## What v5.0 Adds to Close the Loop\n\nBefore v5.0, Paradigm had the DO and RECORD phases (lore entries, .purpose files, portal.yaml). It had partial ASSESS capability through ripple analysis. But the LEARN and ADAPT phases were manual \u2014 a human had to read lore entries and tell the next agent what to watch out for.\n\nv5.0 adds four capabilities that close the loop automatically:\n\n1. **Knowledge Streams** \u2014 Lore is split into three streams with distinct storage, lifecycles, and audiences, enabling targeted adaptation.\n2. **Event Stream** \u2014 Every tool call and file edit produces a structured event that flows through attention filters, enabling real-time assessment.\n3. **Attention Scoring & Nominations** \u2014 Agents evaluate events against their attention patterns and self-nominate contributions, enabling machine-driven learning.\n4. **Context Composition** \u2014 Journal insights, team decisions, and pending nominations are composed into the next session's context, enabling automatic adaptation.\n\n## Context Engineering: Slim CLAUDE.md + On-Demand Guidance\n\nThe learning loop requires efficient context management. A 900-line CLAUDE.md that loads every time wastes tokens on content that may not be relevant to the current task. v5.0 implements a context engineering approach:\n\n**Slim CLAUDE.md** \u2014 The base CLAUDE.md was reduced from ~856 lines to ~150 lines. It contains only the orientation information needed for every session: project overview, symbol system, conventions, commit format, and pointers to on-demand resources.\n\n**On-Demand Guidance** \u2014 Twelve guidance resources are available via `paradigm://guidance/{topic}`. Topics include logging, portal protocol, MCP workflow, flows, orchestration, workspaces, university, calibration, checkpoints, navigation, component types, and troubleshooting. An agent loads only the guidance it needs for the current task.\n\n**Agent Contributions** \u2014 Active agents inject their own context sections via `AgentContext.contributions`. A security agent might contribute a section listing recently added gates. A reviewer might contribute a section listing code smells found in the current PR. These contributions compose dynamically based on which agents are active.\n\nThe result is a context window that contains high-signal, task-relevant content rather than a static wall of instructions. The learning loop feeds relevant history into this context, making each session incrementally smarter than the last.",
|
|
10
|
-
"keyConcepts": [
|
|
11
|
-
"Six-phase learning loop: DO, RECORD, ASSESS, LEARN, ADAPT, DO",
|
|
12
|
-
"Observation without adaptation is waste \u2014 data without a feedback loop is noise",
|
|
13
|
-
"v5.0 closes the loop with knowledge streams, event stream, attention scoring, and context composition",
|
|
14
|
-
"Slim CLAUDE.md (~150 lines) plus 12 on-demand guidance resources via paradigm://guidance/{topic}",
|
|
15
|
-
"Agent contributions dynamically inject context sections based on active agents",
|
|
16
|
-
"Each loop iteration produces better outcomes because past learnings feed into future context"
|
|
17
|
-
],
|
|
18
|
-
"quiz": [
|
|
19
|
-
{
|
|
20
|
-
"id": "q1",
|
|
21
|
-
"question": "An agent completes a session where it discovers that Express v5 requires explicit async error wrapping. It records this in a lore entry but no journal entry is created. Three weeks later, a different agent makes the same mistake. Which phase of the learning loop failed?",
|
|
22
|
-
"choices": {
|
|
23
|
-
"A": "DO \u2014 the first agent should not have made the mistake",
|
|
24
|
-
"B": "RECORD \u2014 the lore entry was insufficient",
|
|
25
|
-
"C": "LEARN \u2014 the insight was captured in lore but never extracted into a journal entry that could feed back into future context",
|
|
26
|
-
"D": "ADAPT \u2014 the context composition tool was broken",
|
|
27
|
-
"E": "ASSESS \u2014 the event stream missed the original correction"
|
|
28
|
-
},
|
|
29
|
-
"correct": "C",
|
|
30
|
-
"explanation": "The LEARN phase failed. The observation was recorded (RECORD phase worked \u2014 there is a lore entry), but the insight was never extracted into a learning journal entry with a transferable pattern. Without a journal entry, the ADAPT phase has nothing to inject into future sessions. The fix is recording a journal entry with `transferable: true` and extracting a `LearningPattern` so it feeds into context composition."
|
|
31
|
-
},
|
|
32
|
-
{
|
|
33
|
-
"id": "q2",
|
|
34
|
-
"question": "Why did Paradigm v5.0 reduce CLAUDE.md from ~856 lines to ~150 lines?",
|
|
35
|
-
"choices": {
|
|
36
|
-
"A": "The old content was outdated and no longer relevant",
|
|
37
|
-
"B": "Smaller files load faster from disk",
|
|
38
|
-
"C": "Loading all guidance every session wastes tokens on content irrelevant to the current task \u2014 on-demand resources let agents load only what they need",
|
|
39
|
-
"D": "Claude Code has a strict file size limit for CLAUDE.md",
|
|
40
|
-
"E": "The content was moved to .paradigm/config.yaml instead"
|
|
41
|
-
},
|
|
42
|
-
"correct": "C",
|
|
43
|
-
"explanation": "Context engineering is about putting high-signal, task-relevant content in the context window. A 856-line CLAUDE.md loaded every session means hundreds of tokens spent on logging rules when the task is about testing, or portal conventions when the task is about lore. The slim CLAUDE.md provides universal orientation, and 12 `paradigm://guidance/{topic}` resources provide targeted guidance on demand."
|
|
44
|
-
},
|
|
45
|
-
{
|
|
46
|
-
"id": "q3",
|
|
47
|
-
"question": "Which phases of the learning loop existed before v5.0?",
|
|
48
|
-
"choices": {
|
|
49
|
-
"A": "All six phases existed but were unreliable",
|
|
50
|
-
"B": "Only DO existed \u2014 everything else is new in v5.0",
|
|
51
|
-
"C": "DO, RECORD, and partial ASSESS (via ripple analysis) \u2014 LEARN, ADAPT, and full ASSESS were manual",
|
|
52
|
-
"D": "DO, RECORD, ASSESS, and LEARN \u2014 only ADAPT was missing",
|
|
53
|
-
"E": "DO and ADAPT \u2014 recording and assessment were added in v5.0"
|
|
54
|
-
},
|
|
55
|
-
"correct": "C",
|
|
56
|
-
"explanation": "Before v5.0, Paradigm had DO (agents perform work), RECORD (lore entries, .purpose files, portal.yaml), and partial ASSESS (ripple analysis could identify impact, but there was no event stream or attention filtering). The LEARN phase (journal entries, nominations) and ADAPT phase (context composition from learnings) were entirely manual \u2014 a human had to read lore and brief the next agent."
|
|
57
|
-
},
|
|
58
|
-
{
|
|
59
|
-
"id": "q4",
|
|
60
|
-
"question": "A security agent contributes a context section listing recently added gates. Where is this contribution defined?",
|
|
61
|
-
"choices": {
|
|
62
|
-
"A": "In the security agent's `.agent` file under the `context.contributions` field",
|
|
63
|
-
"B": "In `.paradigm/config.yaml` under a `context_sections` key",
|
|
64
|
-
"C": "In CLAUDE.md as a static section",
|
|
65
|
-
"D": "In `portal.yaml` under each gate definition",
|
|
66
|
-
"E": "In the security agent's learning journal"
|
|
67
|
-
},
|
|
68
|
-
"correct": "A",
|
|
69
|
-
"explanation": "Agent context contributions are defined in the `AgentContext.contributions` array on the agent's profile (the `.agent` file). Each contribution specifies a `section` name, inline `content` or a `content_ref` MCP resource URI, and a `priority` (high, medium, low). High-priority contributions are always included; low-priority ones are loaded on demand. This allows each agent to inject task-relevant context without hardcoding it into CLAUDE.md."
|
|
70
|
-
}
|
|
71
|
-
]
|
|
72
|
-
},
|
|
73
|
-
{
|
|
74
|
-
"id": "knowledge-streams",
|
|
75
|
-
"title": "Knowledge Streams",
|
|
76
|
-
"content": "## The Lore Split\n\nParadigm's original lore system stored everything \u2014 sessions, decisions, incidents, milestones \u2014 in a single stream of date-partitioned YAML entries. This worked well for small projects, but as projects grew, the single stream created problems. A standup bot pulling \"what got done this week\" had to filter through architectural decisions and incident postmortems. An agent looking for \"what did I learn about JWT handling\" had to parse session summaries. A new team member searching for \"why did we choose Redis\" had to wade through hundreds of entries.\n\nv5.0 splits lore into three knowledge streams, each with a distinct audience, lifecycle, and storage location.\n\n## Stream 1: Work Log \u2014 \"What Got Done\"\n\nThe work log is the team-facing record of completed work. It answers the question every standup asks: what did you do, what is left, what is blocking you?\n\n**Storage:** `.paradigm/work-log/{date}/` \u2014 project-scoped, date-partitioned YAML files, one entry per work unit.\n\n**Audience:** The team. Standup bots, sprint boards, and project managers consume work log entries.\n\n**Lifecycle:** Ephemeral. Work log entries are relevant for days to weeks. After a sprint ends, they are historical context rather than active reference.\n\n**Entry structure (`WorkLogEntry`):**\n\n| Field | Required | Description |\n|---|---|---|\n| `id` | yes | Unique ID (e.g., `WL-security-001`) |\n| `agent` | yes | Agent that performed the work |\n| `timestamp` | yes | ISO 8601 timestamp |\n| `summary` | yes | What was done |\n| `outcome` | yes | pass, fail, partial, or blocked |\n| `task_ref` | no | Ticket or issue reference (e.g., `ENG-142`) |\n| `files_modified` | no | List of modified files |\n| `symbols_touched` | no | Paradigm symbols touched |\n| `next_steps` | no | What remains to be done |\n| `blockers` | no | What is blocking progress |\n| `duration_minutes` | no | How long the work took |\n| `commit` | no | Git commit hash |\n\n**MCP Tools:**\n- `paradigm_work_log_record` \u2014 Record a work log entry. Requires `agent`, `summary`, and `outcome`. Supports optional `task_ref`, `files_modified`, `symbols_touched`, `next_steps`, `blockers`, `duration_minutes`, and `commit`.\n- `paradigm_work_log_search` \u2014 Search work log entries by `agent`, `outcome`, `task_ref`, `symbol`, `dateFrom`, `dateTo`. Pass `summary: true` to get an aggregate summary instead of individual entries.\n\n## Stream 2: Learning Journal \u2014 \"What I Learned\"\n\nThe learning journal is the agent-private record of insights, corrections, and patterns discovered during work. It answers the question: what should I remember for next time?\n\n**Storage:** `~/.paradigm/agents/{id}/journal/` \u2014 user-scoped, agent-specific. The journal travels with the agent across projects because learning is not project-specific.\n\n**Audience:** The agent itself (and optionally other agents if the insight is marked `transferable`).\n\n**Lifecycle:** Durable. A pattern discovered today about JWT ordering is relevant months from now. Journal entries persist until explicitly archived.\n\n**Entry structure (`JournalEntry`):**\n\n| Field | Required | Description |\n|---|---|---|\n| `id` | yes | Unique ID (e.g., `LJ-2026-03-20-001`) |\n| `agent` | yes | Agent who learned this |\n| `timestamp` | yes | ISO 8601 timestamp |\n| `trigger` | yes | What prompted the learning (7 trigger types) |\n| `insight` | yes | The insight itself |\n| `project` | yes | Project where this happened |\n| `transferable` | yes | Whether this applies to other projects |\n| `confidence_before` | no | Agent's confidence before (0.0-1.0) |\n| `confidence_after` | no | Adjusted confidence after (0.0-1.0) |\n| `pattern` | no | Extracted `LearningPattern` (id, applies_when, correct_approach) |\n| `linked_work_log` | no | Work log entry that prompted this learning |\n| `tags` | no | Tags for categorization |\n\nSeven journal triggers capture distinct learning moments: `correction_received` (human corrected the approach), `confidence_miss` (agent was confident but wrong), `pattern_discovered` (new reusable pattern found), `debate_loss` (another agent's approach was chosen), `failure_analysis` (something broke and was analyzed), `human_feedback` (direct human assessment), and `self_reflection` (agent proactively recorded an insight).\n\n**MCP Tools:**\n- `paradigm_journal_record` \u2014 Record a journal entry. Requires `agent`, `trigger`, `insight`, `project`, and `transferable`. Supports optional `confidence_before`, `confidence_after`, `pattern`, `linked_work_log`, and `tags`.\n- `paradigm_journal_search` \u2014 Search journal entries by `agent`, `trigger`, `project`, `transferable`, `tag`, `dateFrom`, `dateTo`. Pass `stats: true` (with `agent`) to get aggregate statistics.\n\n## Stream 3: Team Decisions \u2014 \"What We Decided\"\n\nTeam decisions are the institutional record of choices made, rationale given, and alternatives rejected. They answer the question: why did we do it this way?\n\n**Storage:** `.paradigm/decisions/` \u2014 project-scoped, not date-partitioned (decisions are referenced by topic, not by when they were made).\n\n**Audience:** The entire team \u2014 current and future. New team members benefit most from decision records.\n\n**Lifecycle:** Institutional. Decisions persist until explicitly superseded or deprecated. A decision made in month one remains authoritative until a newer decision replaces it.\n\n**Entry structure (`TeamDecision`):**\n\n| Field | Required | Description |\n|---|---|---|\n| `id` | yes | Unique ID (e.g., `TD-2026-03-20-001`) |\n| `title` | yes | Decision title |\n| `timestamp` | yes | ISO 8601 timestamp |\n| `participants` | yes | Who participated (id, role, stance) |\n| `decision` | yes | The decision itself |\n| `rationale` | yes | Why this was chosen |\n| `alternatives_considered` | no | What else was considered and why it was rejected |\n| `symbols_affected` | no | Paradigm symbols affected |\n| `status` | yes | active, proposed, superseded, deprecated, rejected |\n| `tags` | no | Tags for categorization |\n\nParticipant stances capture the human dynamics: `proposed`, `supported`, `dissented`, `abstained`, `neutral`. Recording dissent is especially important \u2014 when a decision is revisited later, knowing who dissented and why saves time.\n\n**MCP Tools:**\n- `paradigm_decision_record` \u2014 Record a decision. Requires `title`, `decision`, `rationale`, and `participants`. Supports optional `alternatives_considered`, `symbols_affected`, `status`, and `tags`.\n- `paradigm_decision_search` \u2014 Search decisions by `status`, `participant`, `symbol`, `tag`, `dateFrom`, `dateTo`. Pass `summary: true` for an aggregate view.\n\n## Auto-Classification\n\nWhen recording via the original `paradigm_lore_record` tool, the `stream` parameter routes the entry to the correct knowledge stream. Setting `stream: 'auto'` triggers auto-classification based on the entry type. The `LORE_TYPE_TO_STREAM` mapping defines how existing lore types map to streams:\n\n- `agent-session` splits into work-log (what was done) and journal (what was learned)\n- `decision` routes to the decisions stream\n- `incident` splits across all three: work-log (what happened), journal (what we learned), decision (prevention strategy)\n- `human-note` routes to decisions (institutional context)",
|
|
77
|
-
"keyConcepts": [
|
|
78
|
-
"Three knowledge streams: Work Log (what got done), Learning Journal (what I learned), Team Decisions (what we decided)",
|
|
79
|
-
"Work Log: .paradigm/work-log/{date}/ \u2014 team-facing, ephemeral, project-scoped",
|
|
80
|
-
"Learning Journal: ~/.paradigm/agents/{id}/journal/ \u2014 agent-private, durable, user-scoped",
|
|
81
|
-
"Team Decisions: .paradigm/decisions/ \u2014 institutional, persistent, project-scoped",
|
|
82
|
-
"Seven journal triggers: correction_received, confidence_miss, pattern_discovered, debate_loss, failure_analysis, human_feedback, self_reflection",
|
|
83
|
-
"Six MCP tools: paradigm_work_log_record, paradigm_work_log_search, paradigm_journal_record, paradigm_journal_search, paradigm_decision_record, paradigm_decision_search"
|
|
84
|
-
],
|
|
85
|
-
"quiz": [
|
|
86
|
-
{
|
|
87
|
-
"id": "q1",
|
|
88
|
-
"question": "A security agent discovers during a session that JWT refresh tokens must be rotated on every use (not just on expiry). This insight applies to every project that uses JWTs. Where should this be recorded?",
|
|
89
|
-
"choices": {
|
|
90
|
-
"A": "Work log \u2014 it is work the agent performed",
|
|
91
|
-
"B": "Team decisions \u2014 the team needs to know about JWT rotation",
|
|
92
|
-
"C": "Learning journal with `transferable: true` \u2014 it is an agent-learned insight that applies across projects",
|
|
93
|
-
"D": "Lore entry with type `agent-session`",
|
|
94
|
-
"E": "CLAUDE.md as a permanent instruction"
|
|
95
|
-
},
|
|
96
|
-
"correct": "C",
|
|
97
|
-
"explanation": "This is a learning moment \u2014 the agent discovered a pattern (`pattern_discovered` or `correction_received` trigger) that applies beyond the current project. Recording it in the learning journal with `transferable: true` ensures it travels with the agent to future projects via `~/.paradigm/agents/{id}/journal/`. A work log entry would capture what was done but not the reusable insight. A team decision would capture a project-specific choice but not the cross-project pattern."
|
|
98
|
-
},
|
|
99
|
-
{
|
|
100
|
-
"id": "q2",
|
|
101
|
-
"question": "Where are learning journal entries stored, and why?",
|
|
102
|
-
"choices": {
|
|
103
|
-
"A": "`.paradigm/journal/` \u2014 project-scoped because learnings are project-specific",
|
|
104
|
-
"B": "`~/.paradigm/agents/{id}/journal/` \u2014 user-scoped because learnings travel with the agent across projects",
|
|
105
|
-
"C": "`.paradigm/lore/entries/` \u2014 in the same location as all other lore entries",
|
|
106
|
-
"D": "`~/.paradigm/journal/` \u2014 global but not agent-specific",
|
|
107
|
-
"E": "In memory only \u2014 journal entries are ephemeral"
|
|
108
|
-
},
|
|
109
|
-
"correct": "B",
|
|
110
|
-
"explanation": "Journal entries are stored at `~/.paradigm/agents/{id}/journal/` \u2014 in the user's home directory, scoped to the specific agent. This location is user-scoped (ring 2) rather than project-locked (ring 1) because learning is not project-specific. An insight about JWT handling discovered in project A should be available when the same agent works on project B."
|
|
111
|
-
},
|
|
112
|
-
{
|
|
113
|
-
"id": "q3",
|
|
114
|
-
"question": "A lore entry of type `incident` is recorded with `stream: 'auto'`. Which knowledge streams receive data?",
|
|
115
|
-
"choices": {
|
|
116
|
-
"A": "Only the work log \u2014 incidents are about what happened",
|
|
117
|
-
"B": "Only the learning journal \u2014 incidents are about what was learned",
|
|
118
|
-
"C": "Work log, learning journal, and team decisions \u2014 incidents split across all three streams",
|
|
119
|
-
"D": "Only team decisions \u2014 incidents result in policy changes",
|
|
120
|
-
"E": "The entry is rejected \u2014 incidents cannot use auto-classification"
|
|
121
|
-
},
|
|
122
|
-
"correct": "C",
|
|
123
|
-
"explanation": "The `LORE_TYPE_TO_STREAM` mapping routes `incident` to all three streams: the work log captures what happened (the incident timeline), the learning journal captures what was learned (failure analysis), and team decisions capture the prevention strategy. This is because incidents naturally contain all three kinds of knowledge \u2014 facts about the event, insights from investigation, and decisions about prevention."
|
|
124
|
-
},
|
|
125
|
-
{
|
|
126
|
-
"id": "q4",
|
|
127
|
-
"question": "A team decision has two participants: `human/matt` with stance `proposed` and `a-paradigm/security` with stance `dissented`. Six months later, the team revisits this decision. Why is the recorded dissent valuable?",
|
|
128
|
-
"choices": {
|
|
129
|
-
"A": "It proves the human was wrong and the agent was right",
|
|
130
|
-
"B": "It provides an audit trail for compliance purposes",
|
|
131
|
-
"C": "It immediately surfaces the security agent's original concerns, saving time re-analyzing the tradeoffs",
|
|
132
|
-
"D": "It triggers an automatic alert to the security agent",
|
|
133
|
-
"E": "It prevents the same decision from being proposed again"
|
|
134
|
-
},
|
|
135
|
-
"correct": "C",
|
|
136
|
-
"explanation": "Recording dissent captures the counter-arguments at the time of the decision. When the decision is revisited, the team can immediately see what the security agent objected to rather than re-analyzing from scratch. This is especially valuable when the original participants have moved on or forgotten the context. The `dissented` stance does not imply the decision was wrong \u2014 it preserves the full deliberation for future reference."
|
|
137
|
-
},
|
|
138
|
-
{
|
|
139
|
-
"id": "q5",
|
|
140
|
-
"question": "You want to find all work done by the builder agent this sprint that resulted in a `blocked` outcome. Which tool and parameters do you use?",
|
|
141
|
-
"choices": {
|
|
142
|
-
"A": "`paradigm_lore_search` with `author: 'builder'` and `type: 'agent-session'`",
|
|
143
|
-
"B": "`paradigm_work_log_search` with `agent: 'builder'`, `outcome: 'blocked'`, and `dateFrom` set to sprint start",
|
|
144
|
-
"C": "`paradigm_journal_search` with `agent: 'builder'` and `trigger: 'failure_analysis'`",
|
|
145
|
-
"D": "`paradigm_decision_search` with `participant: 'builder'`",
|
|
146
|
-
"E": "`paradigm_work_log_search` with `summary: true` only"
|
|
147
|
-
},
|
|
148
|
-
"correct": "B",
|
|
149
|
-
"explanation": "`paradigm_work_log_search` is purpose-built for answering \"what got done\" questions. Passing `agent: 'builder'`, `outcome: 'blocked'`, and `dateFrom` set to the sprint start date filters to exactly the entries needed. The journal would show what the agent learned, not what work was blocked. The lore search would work but returns the old unified format rather than the structured work log fields like `outcome`, `blockers`, and `next_steps`."
|
|
150
|
-
}
|
|
151
|
-
]
|
|
152
|
-
},
|
|
153
|
-
{
|
|
154
|
-
"id": "event-stream",
|
|
155
|
-
"title": "The Event Stream",
|
|
156
|
-
"content": "## How Events Are Produced\n\nEvery meaningful action in a Paradigm session produces an event. When an agent modifies a file, the post-write hook emits a `file-modified` event. When an MCP tool is called, a `symbol-queried` or `gate-checked` event fires depending on the tool. When compliance issues are detected, a `compliance-violation` event captures the details. These events flow into a shared stream that all agents can observe.\n\nThe event stream is the nervous system of ambient coordination. Without it, agents are blind to each other's actions. With it, a security agent can notice that a new route was created without a gate, a tester can see that a new component was added without test coverage, and a reviewer can observe that a complex flow was modified without documentation updates.\n\n## StreamEvent Anatomy\n\nEvery event follows the `StreamEvent` interface with 12 fields:\n\n```typescript\ninterface StreamEvent {\n id: string; // Unique ID (e.g., \"ev-1710937200000-4821\")\n type: EventType; // Classification (12 types)\n source: EventSource; // Origin (6 sources)\n timestamp: string; // ISO 8601\n path?: string; // File path (if applicable)\n symbols?: string[]; // Paradigm symbols referenced\n keywords?: string[]; // Semantic keywords extracted\n context?: string; // Brief context snippet\n agent?: string; // Agent that produced this event\n tool?: string; // MCP tool name (if from tool call)\n severity?: string; // info, warning, error, critical\n data?: Record<string, unknown>; // Structured metadata\n}\n```\n\n**Event IDs** are generated from the current timestamp plus a random 4-digit suffix: `ev-{timestamp}-{rand}`. This ensures uniqueness without coordination.\n\n**Event Sources** identify where the event originated:\n- `post-write-hook` \u2014 File was written, hook detected the change\n- `mcp-tool-call` \u2014 An MCP tool was invoked\n- `stop-hook` \u2014 Session end triggered an event\n- `conversation` \u2014 Event derived from conversation context\n- `agent-action` \u2014 Agent explicitly emitted an event\n- `error` \u2014 An error occurred during processing\n\n**Event Types** classify what happened. Twelve types cover the full range of project activity:\n\n| Type | When It Fires |\n|---|---|\n| `file-modified` | A source file was changed |\n| `symbol-queried` | A symbol was looked up via search/navigate/ripple |\n| `gate-checked` | A gate (^) was evaluated or referenced |\n| `compliance-violation` | A habit, hook, or policy check failed |\n| `concept-mentioned` | A semantic concept appeared in context |\n| `work-completed` | A unit of work finished (pass/fail/partial) |\n| `decision-made` | A team decision was recorded |\n| `error-encountered` | An error was caught during processing |\n| `route-created` | A new API route was added |\n| `gate-added` | A new gate was added to portal.yaml |\n| `flow-modified` | A flow definition was changed |\n| `test-result` | A test suite reported results |\n\n## JSONL Storage\n\nEvents are stored as append-only JSONL (one JSON object per line) at `.paradigm/events/stream.jsonl`. The append-only format is chosen for performance \u2014 writing one line is cheaper than reading, modifying, and rewriting a file.\n\nThe stream is bounded by `DEFAULT_MAX_EVENTS` (1000). When the file exceeds ~500KB (a rough proxy for 1000 events), the `pruneIfNeeded` function reads the file, keeps only the most recent 1000 lines, and rewrites it. This ensures the stream does not grow unbounded while preserving recent history.\n\nEvents are also held in a memory buffer (`memoryStream`) for fast access during the current session. The memory buffer is independently bounded to 1000 events. Even if file I/O fails, the memory stream continues to function \u2014 file write failure is non-fatal.\n\n## emitEvent\n\nThe `emitEvent` function is the single entry point for producing events:\n\n```typescript\nemitEvent(rootDir, {\n type: 'file-modified',\n source: 'post-write-hook',\n path: 'src/auth/middleware.ts',\n symbols: ['#auth-middleware', '^authenticated'],\n keywords: ['authentication', 'JWT'],\n context: 'Modified JWT validation logic',\n});\n```\n\nThe function auto-generates the `id` and `timestamp`, appends to both the memory buffer and the JSONL file, and prunes if the file is oversized. It returns the complete `StreamEvent` with all fields populated.\n\n## queryEvents\n\nThe `queryEvents` function reads events from disk (falling back to the memory buffer on read failure) and supports five filters:\n\n- `type` \u2014 Filter by event type (e.g., `'file-modified'`)\n- `source` \u2014 Filter by event source (e.g., `'mcp-tool-call'`)\n- `symbol` \u2014 Filter by a specific symbol in the event's `symbols` array\n- `agent` \u2014 Filter by the agent that produced the event\n- `since` \u2014 Only events after this ISO timestamp\n- `limit` \u2014 Maximum number of events to return\n\nResults are sorted by timestamp descending (most recent first). This ordering is intentional \u2014 in ambient coordination, recent events are almost always more relevant than older ones.\n\n## Event Stream Configuration\n\nThe `EventStreamConfig` interface allows fine-grained control:\n\n- `enabled` \u2014 Master switch for ambient coordination (default: true in v5.0 projects)\n- `max_events` \u2014 Maximum events retained (default: 1000)\n- `event_ttl_seconds` \u2014 Time-to-live for events (default: 3600 = 1 hour)\n- `emit` \u2014 Whitelist of event types to produce (if set, only these types fire)\n- `suppress` \u2014 Blacklist of event types to silence (overrides emit)\n- `storage` \u2014 `'memory'` (in-process only) or `'file'` (JSONL persistence)\n\nFor projects that do not need ambient coordination, setting `enabled: false` turns off event emission entirely. For projects that need it but want to reduce noise, the `suppress` list can silence high-frequency event types like `symbol-queried` while preserving important ones like `compliance-violation` and `gate-added`.",
|
|
157
|
-
"keyConcepts": [
|
|
158
|
-
"StreamEvent has 12 fields: id, type, source, timestamp, path, symbols, keywords, context, agent, tool, severity, data",
|
|
159
|
-
"12 event types: file-modified, symbol-queried, gate-checked, compliance-violation, concept-mentioned, work-completed, decision-made, error-encountered, route-created, gate-added, flow-modified, test-result",
|
|
160
|
-
"6 event sources: post-write-hook, mcp-tool-call, stop-hook, conversation, agent-action, error",
|
|
161
|
-
"JSONL append-only storage at .paradigm/events/stream.jsonl with 1000-event bounded pruning",
|
|
162
|
-
"emitEvent auto-generates id and timestamp, writes to both memory buffer and file",
|
|
163
|
-
"queryEvents supports filtering by type, source, symbol, agent, since, and limit"
|
|
164
|
-
],
|
|
165
|
-
"quiz": [
|
|
166
|
-
{
|
|
167
|
-
"id": "q1",
|
|
168
|
-
"question": "An agent adds a new POST route to `src/routes/payments.ts` and updates `portal.yaml` with a `^authenticated` gate. How many events are emitted, and of what types?",
|
|
169
|
-
"choices": {
|
|
170
|
-
"A": "One event: `file-modified` for the route file",
|
|
171
|
-
"B": "Two events: `file-modified` for the route file and `file-modified` for portal.yaml",
|
|
172
|
-
"C": "At minimum two events: `route-created` (or `file-modified`) for the new route, and `gate-added` for the portal.yaml change \u2014 exact count depends on which hooks and tools fired",
|
|
173
|
-
"D": "No events \u2014 events are only emitted by MCP tool calls, not file edits",
|
|
174
|
-
"E": "One event: `gate-added` \u2014 only portal.yaml changes produce events"
|
|
175
|
-
},
|
|
176
|
-
"correct": "C",
|
|
177
|
-
"explanation": "Adding a route and a gate are distinct actions that produce distinct events. The route change produces at least a `file-modified` or `route-created` event (depending on whether the post-write hook recognizes it as a route file). The portal.yaml change produces a `gate-added` event. Additional events like `symbol-queried` may fire if the agent used ripple or navigate tools during the process. The exact count depends on the session's tool usage and hook configuration."
|
|
178
|
-
},
|
|
179
|
-
{
|
|
180
|
-
"id": "q2",
|
|
181
|
-
"question": "The event stream file at `.paradigm/events/stream.jsonl` grows to 600KB. What happens on the next `emitEvent` call?",
|
|
182
|
-
"choices": {
|
|
183
|
-
"A": "The event is rejected \u2014 the file is too large",
|
|
184
|
-
"B": "The file is deleted and a fresh one is started",
|
|
185
|
-
"C": "The file is read, only the most recent 1000 lines are kept, and it is rewritten \u2014 then the new event is appended",
|
|
186
|
-
"D": "A second file (`stream-2.jsonl`) is created for overflow",
|
|
187
|
-
"E": "Nothing special \u2014 pruning only happens at startup"
|
|
188
|
-
},
|
|
189
|
-
"correct": "C",
|
|
190
|
-
"explanation": "The `pruneIfNeeded` function runs after every `emitEvent` call. When the file exceeds ~500KB (512 * 1024 bytes), it reads all lines, keeps only the most recent 1000 (`lines.slice(-DEFAULT_MAX_EVENTS)`), and rewrites the file. The new event has already been appended before pruning runs, so it is included in the kept lines. This bounded pruning ensures the stream never grows unbounded."
|
|
191
|
-
},
|
|
192
|
-
{
|
|
193
|
-
"id": "q3",
|
|
194
|
-
"question": "You want to see all compliance violations from the last hour. Which `queryEvents` call is correct?",
|
|
195
|
-
"choices": {
|
|
196
|
-
"A": "`queryEvents(rootDir, { type: 'compliance-violation', since: oneHourAgo.toISOString() })`",
|
|
197
|
-
"B": "`queryEvents(rootDir, { source: 'compliance', limit: 100 })`",
|
|
198
|
-
"C": "`queryEvents(rootDir, { symbol: 'compliance-violation' })`",
|
|
199
|
-
"D": "`queryEvents(rootDir, { type: 'error-encountered', agent: 'compliance' })`",
|
|
200
|
-
"E": "`queryEvents(rootDir)` and filter in application code"
|
|
201
|
-
},
|
|
202
|
-
"correct": "A",
|
|
203
|
-
"explanation": "The `type` filter matches the event classification (`'compliance-violation'` is one of the 12 event types), and the `since` filter restricts to events after the given ISO timestamp. Source `'compliance'` does not exist \u2014 valid sources are `post-write-hook`, `mcp-tool-call`, `stop-hook`, `conversation`, `agent-action`, and `error`. The `symbol` filter matches against the event's `symbols` array, not the event type."
|
|
204
|
-
},
|
|
205
|
-
{
|
|
206
|
-
"id": "q4",
|
|
207
|
-
"question": "File I/O fails when `emitEvent` tries to append to the JSONL file. What happens to the event?",
|
|
208
|
-
"choices": {
|
|
209
|
-
"A": "The event is lost \u2014 it was not written anywhere",
|
|
210
|
-
"B": "An exception is thrown and the calling tool fails",
|
|
211
|
-
"C": "The event is still stored in the in-memory buffer \u2014 file write failure is non-fatal",
|
|
212
|
-
"D": "The event is written to a fallback file at `/tmp/paradigm-events.jsonl`",
|
|
213
|
-
"E": "The event is queued and retried on the next `emitEvent` call"
|
|
214
|
-
},
|
|
215
|
-
"correct": "C",
|
|
216
|
-
"explanation": "Events are always pushed to the memory buffer (`memoryStream`) before attempting file I/O. The file write is wrapped in a try/catch that silently absorbs failures. This design ensures that ambient coordination continues functioning even if the filesystem is temporarily unavailable. The memory buffer is independently bounded to 1000 events, providing the same capacity as the file-based stream."
|
|
217
|
-
},
|
|
218
|
-
{
|
|
219
|
-
"id": "q5",
|
|
220
|
-
"question": "A project wants ambient coordination but finds `symbol-queried` events too noisy. How should they configure the event stream?",
|
|
221
|
-
"choices": {
|
|
222
|
-
"A": "Set `enabled: false` to disable the entire event stream",
|
|
223
|
-
"B": "Set `max_events: 100` to reduce the buffer size",
|
|
224
|
-
"C": "Add `suppress: ['symbol-queried']` to the EventStreamConfig to silence that event type while keeping all others",
|
|
225
|
-
"D": "Remove all `paradigm_search` and `paradigm_navigate` tools from the MCP server",
|
|
226
|
-
"E": "Set `storage: 'memory'` so noisy events are not persisted"
|
|
227
|
-
},
|
|
228
|
-
"correct": "C",
|
|
229
|
-
"explanation": "The `suppress` array in `EventStreamConfig` blacklists specific event types. Adding `'symbol-queried'` to this list prevents those events from being emitted while preserving all other event types. This is the targeted solution \u2014 `enabled: false` would kill the entire ambient system, `max_events: 100` would reduce history but not noise, and `storage: 'memory'` would affect persistence but not which events fire."
|
|
230
|
-
}
|
|
231
|
-
]
|
|
232
|
-
},
|
|
233
|
-
{
|
|
234
|
-
"id": "attention-scoring",
|
|
235
|
-
"title": "Attention & Scoring",
|
|
236
|
-
"content": "## AgentAttention Patterns\n\nEvery agent in the ambient system has an attention configuration \u2014 a set of patterns that define what the agent notices. Think of it as a personalized filter: events that match the agent's attention patterns are potentially relevant; events that do not match are noise.\n\nThe `AgentAttention` interface has five fields:\n\n```typescript\ninterface AgentAttention {\n symbols?: string[]; // Symbol patterns (e.g., [\"^*\", \"#*-middleware\"])\n paths?: string[]; // File path patterns (e.g., [\"auth/**\", \"middleware/**\"])\n concepts?: string[]; // Semantic triggers (e.g., [\"JWT\", \"RBAC\", \"injection\"])\n signals?: AttentionSignal[]; // Event type triggers (e.g., [{ type: \"gate-added\" }])\n threshold?: number; // Confidence threshold (default 0.6)\n}\n```\n\n**Symbols** use glob patterns to match against the `symbols` array on events. A security agent watching `[\"^*\", \"#*-auth\", \"#*-middleware\"]` will match any gate symbol and any component whose name ends in `-auth` or `-middleware`.\n\n**Paths** use glob patterns to match against the `path` field on events. A builder watching `[\"src/**\", \"lib/**\", \"packages/**\"]` matches any source file change.\n\n**Concepts** are semantic keywords matched against the event's `context`, `keywords`, and `type` fields (all lowercased). A tester watching `[\"test\", \"coverage\", \"assertion\"]` will match events mentioning those terms.\n\n**Signals** match against the event's `type` field. A security agent with `signals: [{ type: 'gate-added' }, { type: 'route-created' }]` will match whenever a new gate or route appears in the stream.\n\n## Four Scoring Dimensions\n\nWhen an event enters the stream, each agent scores it against their attention patterns across four dimensions:\n\n**symbolMatch (0.0-1.0):** For each pattern in the agent's `symbols` array, check if any symbol in the event matches (using glob). If any match is found, `symbolMatch = 1.0`. If no agent symbols are defined or no event symbols exist, `symbolMatch = 0.0`.\n\n**pathMatch (0.0-1.0):** For each pattern in the agent's `paths` array, check if the event's `path` matches. If any match is found, `pathMatch = 1.0`. Binary: either a path matches or it does not.\n\n**conceptMatch (0.0-1.0):** The event's `context`, `keywords`, and `type` are joined into a lowercased text. Each concept in the agent's `concepts` array is checked for inclusion. The score is `matched / total_concepts`. If the agent watches 5 concepts and 3 appear in the event text, `conceptMatch = 0.6`.\n\n**signalMatch (0.0-1.0):** For each signal in the agent's `signals` array, check if the event's `type` matches. If any match, `signalMatch = 1.0`. Binary.\n\n## Max-Based Score\n\nThe overall score is the **maximum** of the four dimensions:\n\n```typescript\nconst score = Math.max(symbolMatch, pathMatch, conceptMatch, signalMatch);\n```\n\nThis is a deliberate design choice. Using max (rather than average or weighted sum) means a single strong match is enough to trigger attention. A security agent does not need to match on all four dimensions \u2014 if the event mentions a gate symbol (`symbolMatch = 1.0`), that alone is sufficient even if the file path, concepts, and signals do not match.\n\nThe alternative (averaging) would dilute strong signals: a perfect symbol match (1.0) with no other matches would average to 0.25, likely falling below the threshold. Max scoring ensures that domain-specific expertise in any single dimension is respected.\n\n## Threshold-Based Self-Nomination\n\nAfter scoring, the agent checks its threshold:\n\n```typescript\nconst threshold = attention.threshold ?? 0.6;\nconst shouldNominate = score >= threshold;\n```\n\nIf the score meets or exceeds the threshold, the agent should self-nominate a contribution. If not, the agent stays quiet, and the `quietReason` field records why: `'below-threshold'`.\n\nThe default threshold is 0.6, but different agent roles have different defaults based on their domain:\n\n| Role | Default Threshold | Rationale |\n|---|---|---|\n| architect | 0.5 | Broad awareness \u2014 should notice most structural changes |\n| builder | 0.7 | Focused \u2014 only speaks when directly relevant to implementation |\n| reviewer | 0.6 | Balanced \u2014 watches code quality and patterns |\n| tester | 0.5 | Broad \u2014 testing intersects all areas |\n| security | 0.4 | Most sensitive \u2014 should speak up early and often |\n\nA lower threshold means the agent speaks up more often (more false positives, fewer missed issues). A higher threshold means the agent speaks up less often (fewer false positives, more missed issues). Security uses 0.4 because the cost of missing a security issue far outweighs the cost of a false alarm.\n\n## scoreEventForAgent\n\nThe `scoreEventForAgent` function ties everything together:\n\n```typescript\nfunction scoreEventForAgent(\n event: StreamEvent,\n agentId: string,\n attention: AgentAttention\n): AttentionScore\n```\n\nIt returns an `AttentionScore` with five fields:\n- `agentId` \u2014 Which agent evaluated this event\n- `score` \u2014 Overall relevance (0.0-1.0)\n- `breakdown` \u2014 The four dimension scores (`symbolMatch`, `pathMatch`, `conceptMatch`, `signalMatch`)\n- `shouldNominate` \u2014 Whether the score exceeds the agent's threshold\n- `quietReason` \u2014 Why the agent stayed quiet (if it did)\n\nThe breakdown is valuable for debugging attention patterns. If an agent is not speaking up when expected, the breakdown shows which dimension scored low. If `symbolMatch = 0.0` but the event clearly involves the agent's domain, the agent's symbol patterns may need expanding.\n\n## DEFAULT_ATTENTION for Standard Roles\n\nParadigm provides default attention configurations for five standard roles:\n\n**Architect** \u2014 Watches all flows (`$*`) and components (`#*`), concepts like `architecture`, `design`, `pattern`, `refactor`. Threshold 0.5.\n\n**Builder** \u2014 Watches source paths (`src/**`, `lib/**`, `packages/**`). Threshold 0.7.\n\n**Reviewer** \u2014 Watches concepts like `code quality`, `bug`, `smell`, `convention`. Threshold 0.6.\n\n**Tester** \u2014 Watches test paths (`**/*.test.*`, `**/*.spec.*`), concepts like `test`, `coverage`, `assertion`, and `error-encountered` signals. Threshold 0.5.\n\n**Security** \u2014 Watches gate symbols (`^*`), auth components (`#*-auth`, `#*-middleware`), auth paths (`auth/**`, `middleware/**`, `guards/**`), concepts like `permission`, `JWT`, `session`, `RBAC`, `XSS`, `injection`, and signals `gate-added` and `route-created`. Threshold 0.4.\n\nThese defaults are overridable in the agent's `.agent` file via the `attention` field. A security agent working on a project with no web routes might raise its threshold to 0.6 and remove the path patterns.",
|
|
237
|
-
"keyConcepts": [
|
|
238
|
-
"AgentAttention has four pattern types: symbols, paths, concepts, signals \u2014 plus a threshold",
|
|
239
|
-
"Four scoring dimensions: symbolMatch, pathMatch, conceptMatch, signalMatch \u2014 each 0.0-1.0",
|
|
240
|
-
"Score is max-based (Math.max of all four) \u2014 a single strong match is sufficient",
|
|
241
|
-
"Default threshold is 0.6 \u2014 agent self-nominates when score >= threshold",
|
|
242
|
-
"Five default attention profiles: architect (0.5), builder (0.7), reviewer (0.6), tester (0.5), security (0.4)",
|
|
243
|
-
"scoreEventForAgent returns AttentionScore with score, breakdown, shouldNominate, and quietReason"
|
|
244
|
-
],
|
|
245
|
-
"quiz": [
|
|
246
|
-
{
|
|
247
|
-
"id": "q1",
|
|
248
|
-
"question": "A security agent (threshold 0.4) evaluates an event where a new route was created (`type: 'route-created'`). The agent's attention includes `signals: [{ type: 'route-created' }]` but the event has no matching symbols, paths, or concepts. What is the overall score and does the agent nominate?",
|
|
249
|
-
"choices": {
|
|
250
|
-
"A": "Score 0.25 (average of 0, 0, 0, 1) \u2014 does not nominate (below 0.4)",
|
|
251
|
-
"B": "Score 1.0 (max of 0, 0, 0, 1) \u2014 nominates (1.0 >= 0.4)",
|
|
252
|
-
"C": "Score 0.0 \u2014 signal matching requires at least one other dimension to also match",
|
|
253
|
-
"D": "Score 0.4 \u2014 signals are weighted at 0.4 for security agents",
|
|
254
|
-
"E": "Score 1.0 but does not nominate \u2014 signal matches are informational only"
|
|
255
|
-
},
|
|
256
|
-
"correct": "B",
|
|
257
|
-
"explanation": "The score is `Math.max(0, 0, 0, 1.0) = 1.0`. Signal match is binary \u2014 the event type `'route-created'` matches the agent's signal pattern, so `signalMatch = 1.0`. The overall score uses max, not average, so a single dimension scoring 1.0 gives an overall score of 1.0. Since 1.0 >= 0.4 (the security agent's threshold), the agent self-nominates."
|
|
258
|
-
},
|
|
259
|
-
{
|
|
260
|
-
"id": "q2",
|
|
261
|
-
"question": "A reviewer agent watches 4 concepts: `['code quality', 'bug', 'smell', 'convention']`. An event has `keywords: ['code', 'quality']` and `context: 'Fixed a bug in the validation logic'`. What is the conceptMatch score?",
|
|
262
|
-
"choices": {
|
|
263
|
-
"A": "0.25 \u2014 only 'bug' matches (1 out of 4)",
|
|
264
|
-
"B": "0.50 \u2014 'code quality' and 'bug' both match (2 out of 4)",
|
|
265
|
-
"C": "0.75 \u2014 'code quality', 'bug', and partial 'convention' match",
|
|
266
|
-
"D": "1.0 \u2014 all keywords are present in the combined text",
|
|
267
|
-
"E": "0.0 \u2014 concepts must be exact matches against the keywords array only"
|
|
268
|
-
},
|
|
269
|
-
"correct": "B",
|
|
270
|
-
"explanation": "The event's `context`, `keywords`, and `type` are joined into a lowercased text: `'code quality fixed a bug in the validation logic'` (keywords `['code', 'quality']` become part of the joined text). The concept `'code quality'` appears in this text (substring match). The concept `'bug'` appears. The concepts `'smell'` and `'convention'` do not. So `matched = 2`, `total = 4`, `conceptMatch = 2/4 = 0.5`."
|
|
271
|
-
},
|
|
272
|
-
{
|
|
273
|
-
"id": "q3",
|
|
274
|
-
"question": "Why does scoring use `Math.max` rather than an average of the four dimensions?",
|
|
275
|
-
"choices": {
|
|
276
|
-
"A": "Max is faster to compute than average",
|
|
277
|
-
"B": "Average would require all four dimensions to have non-zero values, which is too restrictive",
|
|
278
|
-
"C": "Max ensures a single strong match in any domain-specific dimension is sufficient \u2014 averaging would dilute a perfect 1.0 match to 0.25",
|
|
279
|
-
"D": "Max produces higher scores, making agents more talkative which is always desirable",
|
|
280
|
-
"E": "The four dimensions are redundant so only the best one matters"
|
|
281
|
-
},
|
|
282
|
-
"correct": "C",
|
|
283
|
-
"explanation": "Max-based scoring respects domain-specific expertise. A security agent that matches a gate symbol perfectly (`symbolMatch = 1.0`) should absolutely speak up, even if the file path, concepts, and signals do not match. Averaging would reduce that 1.0 to 0.25, likely below the agent's threshold. Max scoring means that expertise in any single dimension is sufficient to trigger attention."
|
|
284
|
-
},
|
|
285
|
-
{
|
|
286
|
-
"id": "q4",
|
|
287
|
-
"question": "The security agent's default threshold is 0.4 while the builder's is 0.7. What is the practical effect of this difference?",
|
|
288
|
-
"choices": {
|
|
289
|
-
"A": "The security agent's nominations are ranked higher in the UI",
|
|
290
|
-
"B": "The security agent self-nominates on weaker matches because the cost of missing a security issue outweighs false alarms \u2014 the builder stays quiet unless directly relevant",
|
|
291
|
-
"C": "The builder processes events faster because it rejects more of them",
|
|
292
|
-
"D": "The security agent receives more events from the stream",
|
|
293
|
-
"E": "There is no practical difference \u2014 thresholds only affect logging verbosity"
|
|
294
|
-
},
|
|
295
|
-
"correct": "B",
|
|
296
|
-
"explanation": "A lower threshold means the agent speaks up more often \u2014 even on partial matches. For security, this is the right tradeoff: a false alarm about a potential security issue costs little, but missing a real vulnerability can be catastrophic. For the builder, a higher threshold (0.7) means it only self-nominates when the event is strongly relevant to implementation work, avoiding noise from events that are merely tangentially related."
|
|
297
|
-
}
|
|
298
|
-
]
|
|
299
|
-
},
|
|
300
|
-
{
|
|
301
|
-
"id": "nominations-debates",
|
|
302
|
-
"title": "Nominations & Debates",
|
|
303
|
-
"content": "## Agents Self-Nominate Contributions\n\nIn the ambient model, agents do not push messages at each other. Instead, when an event exceeds an agent's attention threshold, the agent creates a **nomination** \u2014 a structured contribution that may or may not be surfaced to the human. Nominations are the bridge between passive observation and active participation.\n\nThe key insight is that not every observation deserves immediate attention. A nomination captures the agent's contribution in a structured format, and surfacing rules determine when and how to present it. This prevents the \"every agent shouts at once\" problem that plagues naive multi-agent systems.\n\n## Nomination Anatomy\n\nA nomination has 13 fields:\n\n```typescript\ninterface Nomination {\n id: string; // Unique ID\n agent: string; // Nominating agent\n relevance: number; // Attention score (0.0-1.0)\n urgency: NominationUrgencyLevel; // critical, high, medium, low\n type: NominationType; // warning, suggestion, question, offer, observation\n brief: string; // 1-line summary\n detail?: string; // Full contribution (shown on engage)\n action_offered?: string; // Action the agent offers to take\n evidence?: NominationEvidence[]; // Supporting evidence\n triggered_by: string[]; // Event ID(s) that triggered this\n timestamp: string; // ISO 8601\n surfaced: boolean; // Whether shown to human\n engaged?: boolean; // Whether human interacted\n response?: string; // accepted, dismissed, deferred\n}\n```\n\nThe `brief` field is critical \u2014 it is the first (and possibly only) thing the human sees. A good brief is actionable and specific: \"New POST /api/payments route lacks ^payment-authorized gate\" rather than \"Security concern detected.\"\n\nThe `detail` field expands on the brief with full reasoning, code references, and recommendations. It is shown only if the human engages with the nomination, saving context window space when the human dismisses or defers.\n\n## Urgency Levels\n\nFour urgency levels determine how aggressively a nomination is surfaced:\n\n| Level | Meaning | Surfacing Rule |\n|---|---|---|\n| `critical` | Immediate action required \u2014 security vulnerability, data loss risk | Always surfaced immediately, interrupts if necessary |\n| `high` | Should be addressed before session ends \u2014 missing gate, broken flow | Surfaced in the current batch, highlighted |\n| `medium` | Worth knowing but not blocking \u2014 code smell, missing test | Surfaced if the human has not dismissed similar nominations recently |\n| `low` | FYI \u2014 style suggestion, minor optimization opportunity | Batched and shown only if the human asks or at session end |\n\nThe surfacing rules are configurable via `SurfacingConfig`. A user who finds security nominations too frequent can set the security agent's `min_urgency` to `high`, silencing `medium` and `low` nominations from that agent.\n\n## Nomination Types\n\nFive types classify the nature of the contribution:\n\n- **warning** \u2014 Something is wrong or risky (e.g., \"Route without gate\", \"Aspect anchor drift detected\")\n- **suggestion** \u2014 An improvement opportunity (e.g., \"Consider extracting this into a shared utility\")\n- **question** \u2014 The agent needs clarification (e.g., \"Should this endpoint be public or require authentication?\")\n- **offer** \u2014 The agent volunteers to do something (e.g., \"I can write the test suite for this component\")\n- **observation** \u2014 A neutral factual note (e.g., \"This is the third time this pattern has been refactored\")\n\nThe `action_offered` field is used with `offer` type nominations. When the human accepts an offer, the agent can proceed to take the offered action.\n\n## Evidence\n\nNominations can include evidence to support their claims. Each `NominationEvidence` item can reference a file, a symbol, a pattern from the agent's notebook, specific line numbers, or a textual description.\n\nEvidence transforms a nomination from opinion to argument. \"This route needs a gate\" is a suggestion. \"This route needs a gate \u2014 see portal.yaml line 42 where all /api/payments routes require ^payment-authorized, and `#payment-service` has a documented aspect ~pci-compliance-required\" is a compelling argument backed by project facts.\n\n## Storage\n\nNominations and debates are stored as JSONL files alongside the event stream:\n\n- `.paradigm/events/nominations.jsonl` \u2014 All nominations, append-only\n- `.paradigm/events/debates.jsonl` \u2014 Detected debates (conflicting/complementary nomination groups)\n\n## Debate Detection\n\nWhen multiple agents nominate on the same event or overlapping symbols, a **debate** may form. Paradigm detects debates by checking for overlapping `triggered_by` event IDs or overlapping symbols across nominations within a time window.\n\nA `Debate` has two types:\n- **conflicting** \u2014 The nominations disagree (e.g., architect says \"use SQL\" while builder says \"use NoSQL\")\n- **complementary** \u2014 The nominations agree but add different perspectives (e.g., security says \"add gate\" and tester says \"add test for gate\")\n\nDebates are surfaced as a group rather than individual nominations, so the human sees the full picture. A debate includes:\n- `topic` \u2014 What the debate is about (derived from overlapping symbols/events)\n- `nominations` \u2014 IDs of the participating nominations\n- `overlap_symbols` \u2014 Symbols that triggered grouping\n- `overlap_events` \u2014 Events that triggered grouping\n- `resolution` \u2014 How it was resolved (chosen nomination, reason, resolved by human or consensus)\n\n## MCP Tools\n\n**`paradigm_ambient_nominations`** \u2014 View pending nominations. Supports filtering by agent, urgency, type, and whether nominations have been surfaced. Returns nominations sorted by urgency (critical first) then by relevance score.\n\n**`paradigm_ambient_engage`** \u2014 Engage with a nomination. Pass the nomination ID and a response (`accepted`, `dismissed`, `deferred`). If accepted, the nomination's `detail` and `evidence` are returned for the agent to act on. If dismissed, the nomination is marked as seen but not acted upon. If deferred, it is re-queued for later surfacing.\n\nThe engage tool creates a feedback signal \u2014 over time, the pattern of accepted vs dismissed nominations helps calibrate attention thresholds. An agent whose nominations are consistently dismissed may need a higher threshold.",
|
|
304
|
-
"keyConcepts": [
|
|
305
|
-
"Nominations are structured contributions with 13 fields including brief, detail, evidence, urgency, and type",
|
|
306
|
-
"Four urgency levels: critical (immediate), high (before session ends), medium (batched), low (on-demand)",
|
|
307
|
-
"Five nomination types: warning, suggestion, question, offer, observation",
|
|
308
|
-
"Debate detection groups overlapping nominations from different agents as conflicting or complementary",
|
|
309
|
-
"Storage: .paradigm/events/nominations.jsonl and .paradigm/events/debates.jsonl",
|
|
310
|
-
"Two MCP tools: paradigm_ambient_nominations (view) and paradigm_ambient_engage (respond)"
|
|
311
|
-
],
|
|
312
|
-
"quiz": [
|
|
313
|
-
{
|
|
314
|
-
"id": "q1",
|
|
315
|
-
"question": "A security agent creates a nomination with `urgency: 'medium'` and `type: 'warning'` about a missing gate. The user has configured the security agent's `min_urgency` to `'high'`. What happens?",
|
|
316
|
-
"choices": {
|
|
317
|
-
"A": "The nomination is deleted \u2014 it does not meet the urgency threshold",
|
|
318
|
-
"B": "The nomination is recorded but not surfaced \u2014 it falls below the user's configured minimum urgency for that agent",
|
|
319
|
-
"C": "The nomination is surfaced anyway because warnings always override urgency settings",
|
|
320
|
-
"D": "The nomination is upgraded to `high` urgency automatically",
|
|
321
|
-
"E": "The user's setting is ignored because security nominations are always shown"
|
|
322
|
-
},
|
|
323
|
-
"correct": "B",
|
|
324
|
-
"explanation": "The nomination is still recorded in `.paradigm/events/nominations.jsonl` (all nominations are persisted), but surfacing rules respect the user's configuration. Since `min_urgency: 'high'` means only `high` and `critical` nominations from the security agent are shown, a `medium` nomination is suppressed. The nomination remains available if the user later queries all nominations or lowers the threshold."
|
|
325
|
-
},
|
|
326
|
-
{
|
|
327
|
-
"id": "q2",
|
|
328
|
-
"question": "An architect nominates \"Use a message queue for async processing\" while a builder nominates \"Use direct HTTP calls for simplicity\" \u2014 both triggered by the same `route-created` event. How does Paradigm classify this?",
|
|
329
|
-
"choices": {
|
|
330
|
-
"A": "Two independent nominations \u2014 no debate is detected because different agents created them",
|
|
331
|
-
"B": "A complementary debate \u2014 both are responding to the same event",
|
|
332
|
-
"C": "A conflicting debate \u2014 the nominations share a `triggered_by` event but propose different approaches",
|
|
333
|
-
"D": "An error \u2014 only one agent should nominate per event",
|
|
334
|
-
"E": "The nomination with the higher relevance score wins automatically"
|
|
335
|
-
},
|
|
336
|
-
"correct": "C",
|
|
337
|
-
"explanation": "Debate detection checks for overlapping `triggered_by` event IDs. Both nominations reference the same `route-created` event, so they are grouped. Since they propose different approaches (message queue vs HTTP calls), the debate is classified as `conflicting`. The debate is surfaced as a group so the human sees both perspectives together rather than individual nominations. Resolution requires a human choice or consensus."
|
|
338
|
-
},
|
|
339
|
-
{
|
|
340
|
-
"id": "q3",
|
|
341
|
-
"question": "A nomination has `evidence: [{ symbol: '^payment-authorized', file: 'portal.yaml', lines: { start: 42, end: 45 }, description: 'All /api/payments routes require this gate' }]`. Why is this better than a nomination without evidence?",
|
|
342
|
-
"choices": {
|
|
343
|
-
"A": "Evidence makes the nomination sort higher in the UI",
|
|
344
|
-
"B": "Nominations without evidence are automatically dismissed",
|
|
345
|
-
"C": "Evidence transforms the nomination from opinion to argument \u2014 the human can verify the claim against specific code locations without investigating from scratch",
|
|
346
|
-
"D": "Evidence is required for all nomination types",
|
|
347
|
-
"E": "Evidence triggers automatic remediation"
|
|
348
|
-
},
|
|
349
|
-
"correct": "C",
|
|
350
|
-
"explanation": "Evidence gives the nomination credibility and actionability. Without evidence, \"This route needs a gate\" requires the human to verify the claim manually. With evidence pointing to portal.yaml line 42 and the specific gate symbol, the human can quickly confirm the pattern and act. Evidence is optional (the `evidence` field is nullable), but nominations with evidence are more likely to be accepted rather than dismissed."
|
|
351
|
-
},
|
|
352
|
-
{
|
|
353
|
-
"id": "q4",
|
|
354
|
-
"question": "A tester creates a nomination with `type: 'offer'` and `action_offered: 'Write integration tests for the new payment flow'`. The human responds via `paradigm_ambient_engage` with `response: 'accepted'`. What happens next?",
|
|
355
|
-
"choices": {
|
|
356
|
-
"A": "Paradigm automatically generates the test files",
|
|
357
|
-
"B": "The nomination's `detail` and `evidence` are returned so the tester agent can proceed with the offered action",
|
|
358
|
-
"C": "The nomination is moved to a task queue for later execution",
|
|
359
|
-
"D": "The tester agent is immediately spawned in a new session",
|
|
360
|
-
"E": "Nothing \u2014 acceptance is recorded but has no effect"
|
|
361
|
-
},
|
|
362
|
-
"correct": "B",
|
|
363
|
-
"explanation": "When a nomination is accepted via `paradigm_ambient_engage`, the full nomination including `detail` and `evidence` is returned. For `offer` type nominations, this signals that the agent should proceed with the offered action. The actual execution depends on the orchestration context \u2014 in a multi-agent session, the tester may act immediately; in a single-agent session, the offer acceptance is recorded for the next session. Paradigm does not auto-generate code; it surfaces the offer for the agent to execute."
|
|
364
|
-
},
|
|
365
|
-
{
|
|
366
|
-
"id": "q5",
|
|
367
|
-
"question": "Over 30 sessions, a reviewer agent's nominations are dismissed 80% of the time. What does this pattern suggest?",
|
|
368
|
-
"choices": {
|
|
369
|
-
"A": "The reviewer agent should be removed from the project",
|
|
370
|
-
"B": "The reviewer's attention threshold may be too low \u2014 it is nominating on weak matches, and raising the threshold would reduce false positives",
|
|
371
|
-
"C": "The human is ignoring valid feedback and should be trained",
|
|
372
|
-
"D": "Dismissed nominations indicate the system is working correctly \u2014 not all observations are actionable",
|
|
373
|
-
"E": "The nomination storage file is corrupted"
|
|
374
|
-
},
|
|
375
|
-
"correct": "B",
|
|
376
|
-
"explanation": "An 80% dismissal rate is a strong signal that the agent is speaking up too often on weak matches. The most likely fix is raising the agent's attention threshold (e.g., from 0.6 to 0.75) so it only nominates on stronger matches. The `paradigm_ambient_engage` feedback loop exists precisely for this calibration \u2014 over time, the pattern of accepted vs dismissed nominations reveals whether an agent's threshold is well-tuned."
|
|
377
|
-
}
|
|
378
|
-
]
|
|
379
|
-
},
|
|
380
|
-
{
|
|
381
|
-
"id": "data-sovereignty",
|
|
382
|
-
"title": "Data Sovereignty",
|
|
383
|
-
"content": "## The Local Brain Principle\n\nAll data produced during agent work is project-locked by default. This is not a policy choice that requires opt-in \u2014 it is the architectural default. No data leaves the project unless the user explicitly configures it to do so. This principle is called the \"local brain\" \u2014 every project has its own self-contained intelligence that never leaks.\n\nThe reason is trust. When an agent records a learning journal entry about your payment processing logic, that entry should not appear in another user's project. When a work log captures which files were modified, those file paths should not flow to an analytics dashboard without consent. Data sovereignty means the user controls every boundary their data crosses.\n\n## Trust Rings\n\nData classification uses four concentric trust rings, each expanding the boundary of who can see the data:\n\n**Ring 1: Project-Locked** \u2014 Data never leaves the project directory. Work log entries, event stream, nominations, and team decisions are project-locked by default. Storage lives in `.paradigm/` within the project.\n\n**Ring 2: User-Scoped** \u2014 Data travels across the user's own projects but not beyond. Learning journal entries are user-scoped \u2014 an agent's insights from project A are available when working on project B, but only for the same user. Storage lives in `~/.paradigm/`.\n\n**Ring 3: Creator-Upstream** \u2014 Anonymized, aggregate data flows to agent creators (for agents installed from a marketplace). Only high-level metrics like task type, outcome, and helpfulness rating \u2014 never code, file paths, symbol names, or conversation content. This ring requires explicit opt-in.\n\n**Ring 4: Network-Public** \u2014 Fully anonymized, aggregated statistics shared publicly. Only `aggregated_task_success_rates` and `anonymized_pattern_frequency`. Requires explicit opt-in. Nothing in this ring can identify a project, user, or agent.\n\nThe rings are ordered: data at ring 1 can never reach ring 3 without passing through ring 2 first. Each ring expansion requires a policy declaration.\n\n## data-policy.yaml Format\n\nThe data policy is configured in `.paradigm/data-policy.yaml`:\n\n```yaml\nversion: \"1.0\"\ndefault_ring: project-locked\n\nobservation:\n allow:\n - \"src/**\"\n - \".paradigm/**\"\n - \"portal.yaml\"\n deny:\n - \".env*\"\n - \"**/*.key\"\n - \"**/*.pem\"\n - \"**/secrets/**\"\n\nstreams:\n work_log:\n ring: project-locked\n allow_content: [file_paths, symbol_names, outcome]\n deny_content: [code_snippets, file_contents, diff_content]\n learning_journal:\n ring: user-scoped\n allow_content: [pattern_descriptions, confidence_adjustments, approach_descriptions]\n deny_content: [code_snippets, file_contents, symbol_names_with_context]\n redaction:\n - pattern: \"\\\\b[A-Z_]{2,}_KEY\\\\b\"\n - pattern: \"password|secret|token\"\n team_decisions:\n ring: project-locked\n allow_content: [rationale, alternatives, symbol_references]\n deny_content: [implementation_details]\n\nupstream:\n ring: creator-upstream\n allowed: [task_type, outcome, helpfulness, duration_bucket, error_category]\n denied: [code_of_any_kind, file_paths, symbol_names, conversation_content, user_identity]\n\nnetwork:\n ring: network-public\n opt_in: false\n if_opted_in: [aggregated_task_success_rates, anonymized_pattern_frequency]\n```\n\n## Observation Rules\n\nObservation rules control which files agents can observe. The `allow` list defines glob patterns for permitted paths. The `deny` list defines patterns that are always blocked \u2014 deny overrides allow.\n\nThe default denies `.env*`, `*.key`, `*.pem`, and `**/secrets/**`. These patterns catch common secret file locations. The deny list is **additive** when merging user policy over defaults \u2014 you can add deny patterns but never remove the built-in ones.\n\n## Stream Content Rules\n\nEach knowledge stream has its own content rules defining what categories of content are allowed or denied:\n\n**14 content categories:** `file_paths`, `symbol_names`, `symbol_names_with_context`, `outcome`, `pattern_descriptions`, `confidence_adjustments`, `approach_descriptions`, `rationale`, `alternatives`, `symbol_references`, `code_snippets`, `file_contents`, `diff_content`, `implementation_details`, `architectural_decisions`.\n\nThe work log allows `file_paths` and `symbol_names` (needed for standup context) but denies `code_snippets` (no raw code in work logs). The learning journal allows `pattern_descriptions` (abstract learnings) but denies `symbol_names_with_context` (no project-specific details in the cross-project journal).\n\n**Redaction patterns** use regex to scrub sensitive content before it is stored. The default learning journal redaction catches environment variable names (`\\b[A-Z_]{2,}_KEY\\b`) and common secret terms (`password|secret|token`). Matches are replaced with `[REDACTED]`.\n\n## Per-Agent Overrides\n\nThe `agent_overrides` section allows per-agent policy customization:\n\n```yaml\nagent_overrides:\n security:\n observation:\n allow: [\"src/**\", \".paradigm/**\", \"portal.yaml\", \".env.example\"]\n learning_journal:\n deny_content: [code_snippets, file_contents, diff_content, implementation_details]\n upstream:\n opt_in: false\n```\n\nThis gives the security agent slightly broader observation (it can read `.env.example` to verify it matches the template) while restricting its journal content and disabling upstream feedback.\n\n## Enforcement Boundaries\n\nThe data policy is enforced at eight boundaries:\n\n1. **event-emission** \u2014 Before an event enters the stream, check if the path is observable\n2. **attention-filtering** \u2014 Agents only score events for paths they are allowed to observe\n3. **work-log-recording** \u2014 Content is filtered and redacted before writing to the work log\n4. **journal-recording** \u2014 Content is filtered and redacted before writing to the journal\n5. **cross-project-transfer** \u2014 Journal entries marked `transferable` are checked against ring 2 rules\n6. **upstream-feedback** \u2014 Data flowing to agent creators is checked against ring 3 rules\n7. **network-aggregation** \u2014 Data flowing to the network is checked against ring 4 rules\n8. **notebook-promotion** \u2014 Journal entries promoted to notebook are checked against content rules\n\nEvery enforcement action is auditable. The `AuditEntry` interface captures who, when, what boundary, what data category, and what action was taken (allowed, filtered, redacted, or blocked).\n\n## The Merge Rule\n\nWhen a user provides a `data-policy.yaml`, it is merged over the `DEFAULT_DATA_POLICY` with a critical rule: **deny lists are additive, never replacing**. If the default denies `.env*` and the user's policy does not mention `.env*`, the deny still applies. The user can add more deny patterns but cannot remove built-in protections. Allow lists, by contrast, can be fully replaced.",
|
|
384
|
-
"keyConcepts": [
|
|
385
|
-
"Four trust rings: project-locked (ring 1), user-scoped (ring 2), creator-upstream (ring 3), network-public (ring 4)",
|
|
386
|
-
"All data is project-locked by default \u2014 the local brain principle",
|
|
387
|
-
"data-policy.yaml configures observation rules, stream content rules, upstream rules, and network rules",
|
|
388
|
-
"Deny lists are additive on merge \u2014 users can add deny patterns but never remove built-in protections",
|
|
389
|
-
"Eight enforcement boundaries from event-emission to notebook-promotion",
|
|
390
|
-
"Per-agent overrides allow customized observation and content rules per agent role"
|
|
391
|
-
],
|
|
392
|
-
"quiz": [
|
|
393
|
-
{
|
|
394
|
-
"id": "q1",
|
|
395
|
-
"question": "An agent's learning journal entry about JWT refresh token rotation in project A should be available when the same agent works on project B. Which trust ring enables this?",
|
|
396
|
-
"choices": {
|
|
397
|
-
"A": "Ring 1: project-locked \u2014 journal entries stay in the project",
|
|
398
|
-
"B": "Ring 2: user-scoped \u2014 the journal travels with the agent across the user's projects",
|
|
399
|
-
"C": "Ring 3: creator-upstream \u2014 the agent creator's infrastructure bridges projects",
|
|
400
|
-
"D": "Ring 4: network-public \u2014 cross-project sharing requires public access",
|
|
401
|
-
"E": "No ring \u2014 cross-project sharing is not supported"
|
|
402
|
-
},
|
|
403
|
-
"correct": "B",
|
|
404
|
-
"explanation": "Learning journal entries are stored at `~/.paradigm/agents/{id}/journal/` (user-scoped, ring 2). This location is in the user's home directory, not the project directory, so it persists across projects. The same agent identity (`id`) is used across projects, giving the agent access to its own journal entries regardless of which project it is currently working in. Ring 2 is the minimum trust level needed for cross-project travel within the same user's scope."
|
|
405
|
-
},
|
|
406
|
-
{
|
|
407
|
-
"id": "q2",
|
|
408
|
-
"question": "A user creates a `data-policy.yaml` with `observation.deny: ['**/logs/**']` but does NOT include `.env*` in their deny list. Can agents observe `.env` files?",
|
|
409
|
-
"choices": {
|
|
410
|
-
"A": "Yes \u2014 the user's policy replaces the default, and `.env*` is not in the user's deny list",
|
|
411
|
-
"B": "No \u2014 deny lists are additive on merge. The default `.env*` deny is always present, and the user's `**/logs/**` is added to it",
|
|
412
|
-
"C": "It depends on whether the agent has an override in `agent_overrides`",
|
|
413
|
-
"D": "Yes, but the content will be redacted before storage",
|
|
414
|
-
"E": "No \u2014 `.env` files are hardcoded as blocked regardless of policy"
|
|
415
|
-
},
|
|
416
|
-
"correct": "B",
|
|
417
|
-
"explanation": "The merge rule states that deny lists are additive, never replacing. The default policy denies `.env*`, `**/*.key`, `**/*.pem`, and `**/secrets/**`. The user's policy adds `**/logs/**` to this list. The merged deny list contains all five patterns. This design prevents users from accidentally exposing secret files by omitting them from their custom policy."
|
|
418
|
-
},
|
|
419
|
-
{
|
|
420
|
-
"id": "q3",
|
|
421
|
-
"question": "A work log entry is recorded with `summary: 'Fixed the API_SECRET_KEY rotation logic in auth.ts'`. The work log stream has `deny_content: [code_snippets]` but allows `file_paths` and `symbol_names`. What happens to the summary?",
|
|
422
|
-
"choices": {
|
|
423
|
-
"A": "The summary is stored as-is \u2014 `API_SECRET_KEY` is a symbol name, not a code snippet",
|
|
424
|
-
"B": "The entire summary is blocked \u2014 it contains a reference to a secret",
|
|
425
|
-
"C": "The summary is stored as-is because content category filtering only applies to structured fields, not free-text summaries",
|
|
426
|
-
"D": "If the learning journal's redaction patterns are applied to the work log, `API_SECRET_KEY` would match `\\b[A-Z_]{2,}_KEY\\b` and be replaced with `[REDACTED]`",
|
|
427
|
-
"E": "The summary is rejected and the work log entry fails to record"
|
|
428
|
-
},
|
|
429
|
-
"correct": "D",
|
|
430
|
-
"explanation": "Redaction patterns are regex-based and applied to content before storage. The pattern `\\b[A-Z_]{2,}_KEY\\b` matches `API_SECRET_KEY`. If this pattern is configured on the work log stream (or if the filtering function applies cross-stream redaction), the summary would become `'Fixed the [REDACTED] rotation logic in auth.ts'`. In the default policy, this specific redaction is configured on `learning_journal`, not `work_log` \u2014 but the `filterContent` function applies whatever redaction patterns are configured for the target stream."
|
|
431
|
-
},
|
|
432
|
-
{
|
|
433
|
-
"id": "q4",
|
|
434
|
-
"question": "What upstream data is allowed by default in the DEFAULT_DATA_POLICY?",
|
|
435
|
-
"choices": {
|
|
436
|
-
"A": "Everything is allowed upstream \u2014 creators need full data for improvement",
|
|
437
|
-
"B": "Only `task_type`, `outcome`, `helpfulness`, `duration_bucket`, and `error_category` \u2014 code, file paths, symbol names, conversation content, and user identity are explicitly denied",
|
|
438
|
-
"C": "Nothing is allowed \u2014 upstream sharing is disabled by default",
|
|
439
|
-
"D": "File paths and symbol names are allowed, but code content is denied",
|
|
440
|
-
"E": "Only aggregated statistics, no individual session data"
|
|
441
|
-
},
|
|
442
|
-
"correct": "B",
|
|
443
|
-
"explanation": "The default upstream rules allow five high-level fields: `task_type` (what kind of work), `outcome` (success/failure), `helpfulness` (user rating), `duration_bucket` (how long it took, bucketed), and `error_category` (what went wrong, categorized). Five categories are explicitly denied: `code_of_any_kind`, `file_paths`, `symbol_names`, `conversation_content`, and `user_identity`. This ensures agent creators get enough signal to improve their agents without receiving any sensitive project data."
|
|
444
|
-
}
|
|
445
|
-
]
|
|
446
|
-
},
|
|
447
|
-
{
|
|
448
|
-
"id": "agent-renaissance",
|
|
449
|
-
"title": "Agent Manifest Renaissance",
|
|
450
|
-
"content": "## Six New Dimensions\n\nBefore v5.0, an agent profile (`AgentProfile`) had six core fields: `id`, `role`, `description`, `personality`, `expertise`, and `transferable` patterns. These covered identity and knowledge but said nothing about how agents observe, learn, contribute context, report work, collaborate, or decide when to speak up.\n\nv5.0 adds six new dimensions to the agent manifest, transforming it from a static identity card into a living behavioral specification:\n\n1. **Attention** (`AgentAttention`) \u2014 What this agent notices in the event stream\n2. **Learning** (`AgentLearning`) \u2014 How this agent improves over time\n3. **Context** (`AgentContext`) \u2014 What this agent contributes to shared context\n4. **Reporting** (`AgentReporting`) \u2014 How this agent logs work and learnings\n5. **Collaboration** (`AgentCollaboration`) \u2014 How this agent interacts with others\n6. **Nomination** (`AgentNomination`) \u2014 When this agent speaks up in ambient mode\n\nAll six are optional on the `AgentProfile` interface for backward compatibility. Agents without these fields use sensible defaults or are treated as non-ambient (they do not participate in the observation-nomination loop).\n\n## Attention (AgentAttention)\n\nCovered in detail in the Attention & Scoring lesson. The attention dimension defines what the agent watches for: symbol patterns, file paths, semantic concepts, and signal types. The threshold determines sensitivity.\n\nDefault configs exist for five standard roles via `DEFAULT_ATTENTION`. For example, the security agent defaults to watching all gate symbols (`^*`), auth-related components and paths, security concepts, and gate/route signals with a low threshold of 0.4.\n\n## Learning (AgentLearning)\n\nThe learning dimension has two layers:\n\n**Intrinsic Learning** (`IntrinsicLearning`) \u2014 The agent's own drive to improve. This is optional for downloaded agents (they may or may not want to learn from user feedback). Four sub-sections:\n\n- **feedback** \u2014 When to ask for assessment: after work, after recommendations, from which agents, from humans. A security agent might configure `from_agents: ['architect', 'reviewer']` to weight peer feedback.\n- **adaptation** \u2014 How to adjust: `confidence_ema_alpha` (default 0.3) controls how quickly confidence scores move. `notebook_auto_promote` auto-promotes high-value journal entries. `pattern_extraction` extracts reusable patterns from learnings.\n- **reflection** \u2014 When to self-reflect: on failure, on correction, on debate loss. Each trigger records a journal entry with the relevant trigger type.\n- **calibration** \u2014 Accuracy targets: `target_accuracy` (default 0.85) is the goal. `overconfidence_alert` (default 0.15) triggers when estimated confidence exceeds actual accuracy by more than 15 points.\n\n**Platform Learning** (`PlatformLearning`) \u2014 Mandated for all marketplace agents. `feedback_required: true` is always set. Collects `work_outcome`, `helpfulness`, and `would_use_again` metrics. Feedback flows upstream anonymized. Aggregation is configurable per-offering, per-session, or per-project.\n\n## Context (AgentContext)\n\nThe context dimension defines what the agent contributes to the composed context and what it requires to be loaded.\n\n**Contributions** \u2014 An array of `ContextContribution` items. Each specifies a `section` name (e.g., \"Security Warnings\"), inline `content` or a `content_ref` MCP resource URI, and a `priority` (high, medium, low). High-priority contributions are always included in composed context. Medium-priority contributions are included if token budget allows. Low-priority contributions are loaded on demand.\n\n**Requirements** \u2014 An array of `ContextRequirement` items specifying files or sections the agent needs loaded before it can work effectively. A security agent might require `portal.yaml` and the \"gates\" section of CLAUDE.md.\n\n## Reporting (AgentReporting)\n\nThe reporting dimension controls how the agent captures its work and learnings in the knowledge streams.\n\n**Work Log Config** (`WorkLogConfig`):\n- `auto_record` \u2014 Automatically create work log entries when work completes\n- `structure` \u2014 Which structured fields to include: `task_ref`, `files_modified`, `symbols_touched`, `next_steps`, `blockers`\n- `destination` \u2014 Always `'work-log'`\n\n**Learning Journal Config** (`LearningJournalConfig`):\n- `auto_record` \u2014 Automatically record learning moments\n- `triggers` \u2014 Which events trigger journal entries: `correction_received`, `confidence_miss`, `pattern_discovered`\n- `destination` \u2014 Always `'journal'` (agent-private)\n\n## Collaboration (AgentCollaboration)\n\nThe collaboration dimension defines how the agent interacts with others in multi-agent contexts.\n\n**Default Stance** (`CollaborationStance`) \u2014 One of five stances:\n- `lead` \u2014 Drives decisions, sets direction (architect default)\n- `advisory` \u2014 Offers guidance but does not drive (reviewer, security defaults)\n- `supportive` \u2014 Follows direction, executes (builder default)\n- `observer` \u2014 Watches but rarely acts\n- `peer` \u2014 Equal footing with no hierarchy\n\n**Per-Agent Relationships** \u2014 The `with` record allows overriding stance per agent. A builder might be `supportive` by default but `peer` with another builder.\n\n**Debate Config** \u2014 Controls debate behavior: `will_challenge` (will push back), `evidence_required` (must cite specific code/patterns), `escalate_to_human` (ask human if debate does not resolve).\n\nDefault configs exist via `DEFAULT_COLLABORATION`. The architect defaults to `lead` stance with evidence-based challenging and human escalation. The builder defaults to `supportive` with `can_contradict: false` toward the architect.\n\n## Nomination (AgentNomination)\n\nThe nomination dimension defines behavioral rules for self-nomination beyond the threshold check.\n\n**speak_when** \u2014 Conditions for speaking up:\n- `relevance_above` \u2014 Score threshold (default 0.6, mirrors attention threshold)\n- `urgency` \u2014 Always speak for specific urgency types: `security_risk`, `breaking_change`, `gate_missing`, `test_failure`, `performance_risk`\n- `asked_directly` \u2014 Always respond to direct questions (default: true)\n\n**quiet_when** \u2014 Conditions for staying silent:\n- `relevance_below` \u2014 Hard floor below which the agent never speaks\n- `another_agent_handling` \u2014 Stay quiet if another agent is already addressing this\n- `human_explicitly_excluded` \u2014 Respect human's explicit exclusion\n\n**contribution_style** \u2014 How the agent communicates:\n- `brief_first` \u2014 Start with a short summary, elaborate if asked\n- `cite_sources` \u2014 Reference specific code and patterns\n- `offer_action` \u2014 Offer concrete actions rather than just observations\n\n## buildProfileEnrichment\n\nThe `buildProfileEnrichment` function composes all six dimensions into a prompt enrichment string for orchestration. It takes the agent profile, relevant symbols, optional notebook entries, and optional ambient context (recent decisions, journal insights, pending nominations).\n\nThe output is structured markdown with sections for: Agent Identity, Expertise on Relevant Symbols, Transferable Patterns, Relevant Notebook Entries, Attention patterns, Collaboration stance, Nomination preferences, Recent Team Decisions, Transferable Insights, and Pending Nominations.\n\nThis enrichment is injected into the agent's prompt during orchestration, giving it full awareness of its identity, capabilities, behavioral rules, and ambient context \u2014 all derived from the `.agent` file and the knowledge streams.",
|
|
451
|
-
"keyConcepts": [
|
|
452
|
-
"Six new dimensions: attention, learning, context, reporting, collaboration, nomination",
|
|
453
|
-
"Intrinsic learning (optional, agent-driven) vs platform learning (mandated for marketplace agents)",
|
|
454
|
-
"Five collaboration stances: lead, advisory, supportive, observer, peer",
|
|
455
|
-
"Context contributions have three priorities: high (always included), medium (if budget allows), low (on demand)",
|
|
456
|
-
"DEFAULT_ATTENTION and DEFAULT_COLLABORATION provide sensible defaults for architect, builder, reviewer, tester, security",
|
|
457
|
-
"buildProfileEnrichment composes all dimensions into a structured markdown prompt"
|
|
458
|
-
],
|
|
459
|
-
"quiz": [
|
|
460
|
-
{
|
|
461
|
-
"id": "q1",
|
|
462
|
-
"question": "An agent installed from a marketplace does not define an `intrinsic` learning section but has `platform` learning configured. Is this valid?",
|
|
463
|
-
"choices": {
|
|
464
|
-
"A": "No \u2014 all agents must have intrinsic learning configured",
|
|
465
|
-
"B": "Yes \u2014 intrinsic learning is optional (the agent's choice), but platform learning is mandated for marketplace agents",
|
|
466
|
-
"C": "No \u2014 platform learning requires intrinsic learning as a prerequisite",
|
|
467
|
-
"D": "Yes, but the agent will not be able to participate in ambient coordination",
|
|
468
|
-
"E": "It depends on whether the agent has attention patterns configured"
|
|
469
|
-
},
|
|
470
|
-
"correct": "B",
|
|
471
|
-
"explanation": "Intrinsic learning is optional \u2014 it represents the agent's own drive to improve and is a design choice by the agent creator. A marketplace agent might deliberately not implement intrinsic learning if it relies on periodic updates from its creator instead. Platform learning (`feedback_required: true`) is mandated for all marketplace agents to ensure quality signals flow back to creators. The two layers are independent."
|
|
472
|
-
},
|
|
473
|
-
{
|
|
474
|
-
"id": "q2",
|
|
475
|
-
"question": "A builder agent is in a multi-agent session with an architect. The builder has `collaboration.with.architect.can_contradict: false`. The builder notices the architect's approach will cause a performance regression. What should happen?",
|
|
476
|
-
"choices": {
|
|
477
|
-
"A": "The builder stays silent \u2014 it cannot contradict the architect",
|
|
478
|
-
"B": "The builder can still nominate an observation or warning \u2014 `can_contradict` affects debate dynamics, not whether the agent can surface concerns",
|
|
479
|
-
"C": "The builder overrides its collaboration stance because performance is critical",
|
|
480
|
-
"D": "The builder files a nomination but it is automatically suppressed",
|
|
481
|
-
"E": "The builder must wait for the reviewer to raise the concern"
|
|
482
|
-
},
|
|
483
|
-
"correct": "B",
|
|
484
|
-
"explanation": "The `can_contradict` field in collaboration affects debate dynamics \u2014 whether the agent will actively argue against the other agent's position. It does not prevent the agent from surfacing observations or warnings via the nomination system. The builder can still create a nomination with `type: 'warning'` or `type: 'observation'` about the performance regression. The nomination is surfaced to the human, who decides how to proceed. Collaboration stance governs interaction style, not whether an agent can report concerns."
|
|
485
|
-
},
|
|
486
|
-
{
|
|
487
|
-
"id": "q3",
|
|
488
|
-
"question": "An agent's context contributions include a section with `priority: 'low'` and `content_ref: 'paradigm://guidance/portal'`. When is this content loaded?",
|
|
489
|
-
"choices": {
|
|
490
|
-
"A": "Always \u2014 all contributions are loaded regardless of priority",
|
|
491
|
-
"B": "Never \u2014 low priority contributions are ignored",
|
|
492
|
-
"C": "On demand \u2014 low priority contributions are loaded only when the agent or human explicitly requests them, not automatically included in composed context",
|
|
493
|
-
"D": "Only during the first session \u2014 subsequent sessions cache it",
|
|
494
|
-
"E": "Only when the agent's attention score exceeds 0.8"
|
|
495
|
-
},
|
|
496
|
-
"correct": "C",
|
|
497
|
-
"explanation": "Context contributions have three priority levels that control inclusion in composed context. High-priority contributions are always included. Medium-priority contributions are included if the token budget allows. Low-priority contributions are loaded only on demand \u2014 when the agent requests the resource or the human explicitly asks for it. The `content_ref` URI (`paradigm://guidance/portal`) means the content is fetched from the MCP resource system rather than stored inline, reducing the base context size."
|
|
498
|
-
},
|
|
499
|
-
{
|
|
500
|
-
"id": "q4",
|
|
501
|
-
"question": "What does `buildProfileEnrichment` produce, and when is it used?",
|
|
502
|
-
"choices": {
|
|
503
|
-
"A": "It produces a YAML file that replaces the agent's `.agent` identity",
|
|
504
|
-
"B": "It produces structured markdown combining all agent dimensions \u2014 identity, expertise, transferable patterns, attention, collaboration, nominations, and ambient context \u2014 injected into the agent's prompt during orchestration",
|
|
505
|
-
"C": "It produces a JSON summary for the Conductor UI dashboard",
|
|
506
|
-
"D": "It produces a diff between the current agent profile and its defaults",
|
|
507
|
-
"E": "It produces a compliance report checking all six dimensions"
|
|
508
|
-
},
|
|
509
|
-
"correct": "B",
|
|
510
|
-
"explanation": "`buildProfileEnrichment` takes the agent profile, relevant symbols, optional notebook entries, and optional ambient context (recent decisions, journal insights, pending nominations). It composes a structured markdown string with sections for Agent Identity, Expertise, Transferable Patterns, Notebook Entries, Attention patterns, Collaboration stance, Nomination preferences, and ambient context. This markdown is injected into the agent's system prompt during orchestration, giving it full self-awareness and situational context."
|
|
511
|
-
},
|
|
512
|
-
{
|
|
513
|
-
"id": "q5",
|
|
514
|
-
"question": "An agent has `learning.intrinsic.calibration.overconfidence_alert: 0.15` and its estimated confidence for `#auth-middleware` is 0.90 while its actual accuracy (from assessment verdicts) is 0.70. What happens?",
|
|
515
|
-
"choices": {
|
|
516
|
-
"A": "Nothing \u2014 the system does not track actual accuracy",
|
|
517
|
-
"B": "The confidence is automatically reduced to 0.70",
|
|
518
|
-
"C": "An overconfidence alert is triggered because the delta (0.90 - 0.70 = 0.20) exceeds the 0.15 threshold",
|
|
519
|
-
"D": "The agent is prevented from working on `#auth-middleware`",
|
|
520
|
-
"E": "The EMA alpha is increased to make confidence converge faster"
|
|
521
|
-
},
|
|
522
|
-
"correct": "C",
|
|
523
|
-
"explanation": "The `overconfidence_alert` threshold triggers when the gap between estimated confidence and actual accuracy exceeds the configured value. Here, the delta is 0.20 (0.90 - 0.70), which exceeds the 0.15 threshold. This alert signals that the agent is more confident than its track record warrants for this symbol. The agent's confidence will naturally adjust over time via the EMA formula as more assessment verdicts come in, but the alert provides an immediate signal that the agent should be more cautious on `#auth-middleware`."
|
|
524
|
-
}
|
|
525
|
-
]
|
|
526
|
-
},
|
|
527
|
-
{
|
|
528
|
-
"id": "context-composition",
|
|
529
|
-
"title": "Context Composition",
|
|
530
|
-
"content": "## From Verbose to Slim\n\nParadigm's CLAUDE.md historically contained everything an agent might need: logging rules, portal conventions, MCP workflow guidance, flow patterns, orchestration instructions, workspace configuration, and more. At its peak, the file was ~856 lines \u2014 loaded in full at the start of every session, consuming thousands of tokens regardless of whether the task involved logging, security, or lore.\n\nThis approach has two problems. First, it wastes context window space. An agent working on test coverage does not need 200 lines of portal gate conventions. Second, it creates staleness \u2014 with all guidance in one file, updates to any topic require reading and understanding the entire file.\n\nv5.0 restructured this into a two-layer architecture: a slim CLAUDE.md (~150 lines) for universal orientation, plus 12 on-demand guidance resources for topic-specific depth.\n\n## The Slim CLAUDE.md\n\nThe reduced CLAUDE.md contains only what every session needs:\n\n1. **Project Overview** \u2014 What this project is and which version of Paradigm it uses\n2. **Symbol System** \u2014 The 5 symbols (#, $, ^, !, ~) and their meanings\n3. **Conventions** \u2014 Naming, commit format, .purpose rules\n4. **Agent Onboarding** \u2014 What to call first (`paradigm_status`), what to check\n5. **Before Implementing** \u2014 Protocol search, ripple, gates check\n6. **Automatic Enforcement** \u2014 What the stop hook blocks\n7. **On-Demand Guidance** \u2014 Table of 12 guidance resources with their MCP URIs\n\nThis provides enough context for any agent to orient itself and know where to find deeper guidance, without spending tokens on content irrelevant to the current task.\n\n## 12 Guidance Resources\n\nGuidance resources are served via MCP at `paradigm://guidance/{topic}`. Each resource generates its content on demand \u2014 it is not a static file but a function that produces the latest guidance.\n\nThe 12 topics:\n\n| Topic | What It Covers |\n|---|---|\n| `logging` | Logger usage, symbol-to-method mapping by directory |\n| `portal` | Portal protocol, gate patterns, route declarations |\n| `mcp-workflow` | MCP tool orchestration, token budgets |\n| `flows` | Flow-first development, $flow documentation |\n| `orchestration` | Multi-agent orchestration, agent spawning |\n| `workspaces` | Multi-project symbol awareness |\n| `university` | Knowledge base, courses, PLSAT |\n| `calibration` | Confidence calibration, overconfidence alerts |\n| `checkpoints` | Session checkpoints, crash recovery |\n| `navigation` | Task recipes, navigation patterns |\n| `component-types` | Component hierarchy, type guidelines |\n| `troubleshooting` | Common issues, diagnostic steps |\n\nAn agent working on portal.yaml calls `paradigm://guidance/portal` to get the full portal protocol. An agent setting up multi-project awareness calls `paradigm://guidance/workspaces`. This on-demand model means the agent pays the token cost only for the guidance it actually uses.\n\n## Agent Contributions Section\n\nBeyond the static guidance resources, composed context includes a dynamic **Agent Contributions** section built from active agents' `AgentContext.contributions`.\n\nFor example, if a security agent is active and its profile includes:\n\n```yaml\ncontext:\n contributions:\n - section: \"Security Warnings\"\n content: \"New routes added in this session require ^authenticated gate minimum.\"\n priority: high\n - section: \"Portal Conventions\"\n content_ref: \"paradigm://guidance/portal\"\n priority: medium\n```\n\n...the composed context will include a \"Security Warnings\" section (always, because `priority: high`) and may include the full portal guidance (if token budget allows, because `priority: medium`).\n\nContributions with `content_ref` instead of inline `content` are resolved lazily \u2014 the MCP resource is fetched only when the contribution is actually included in the composed context.\n\n## paradigm_context_compose\n\nThe `paradigm_context_compose` tool assembles the full context for a session. It takes:\n\n- The active agent(s) and their profiles\n- The current task or focus area\n- Token budget constraints\n\nIt produces a composed context string that includes:\n\n1. **Base CLAUDE.md content** \u2014 Universal orientation\n2. **Agent identity section** \u2014 From `buildProfileEnrichment`\n3. **High-priority contributions** \u2014 From all active agents' context contributions\n4. **Relevant guidance** \u2014 On-demand resources loaded based on the task\n5. **Ambient context** \u2014 Recent team decisions, transferable journal insights, pending nominations\n6. **Medium-priority contributions** \u2014 If token budget allows\n\nLow-priority contributions and unused guidance resources are omitted from the initial context but remain available via MCP resource URIs if the agent needs them mid-session.\n\n## The Full Loop: Journal to Context\n\nHere is where everything connects. Consider this sequence:\n\n1. **Session A**: Builder modifies `#payment-service`, makes a mistake with JWT token ordering, gets corrected by the human. The builder records a journal entry: `trigger: 'correction_received', insight: 'JWT refresh tokens must be validated before access tokens when both are present', transferable: true, pattern: { id: 'jwt-ordering', applies_when: 'validating multiple JWT types', correct_approach: 'Check refresh token first, then access token' }`.\n\n2. **Between sessions**: The journal entry is stored at `~/.paradigm/agents/builder/journal/`. The pattern is extracted as a transferable pattern.\n\n3. **Session B**: A different agent (or the same builder on a different project) starts work on an authentication module. `paradigm_context_compose` runs, loading the builder's profile. The `buildProfileEnrichment` function includes the transferable pattern and the journal insight in the \"Transferable Insights\" section of the composed context.\n\n4. **Result**: The agent in Session B sees the JWT ordering insight before writing any code, preventing the same mistake.\n\nThis is the closed loop: DO (Session A work) -> RECORD (journal entry) -> ASSESS (attention scoring recognizes auth work in Session B) -> LEARN (pattern extracted) -> ADAPT (context composition includes the pattern) -> DO (Session B starts with the insight).\n\n## emitAndProcess\n\nThe `emitAndProcess` function unifies event emission with nomination processing. When an event is emitted, it is simultaneously:\n\n1. Written to the event stream (JSONL file + memory buffer)\n2. Scored against all active agents' attention patterns\n3. For any agent exceeding its threshold, a nomination opportunity is created\n\nThis single-call pattern ensures that no event is emitted without being evaluated for nominations. It prevents the race condition where an event is emitted but attention scoring happens too late to catch it.\n\nThe function returns both the emitted event and any nominations that were generated, giving the caller full visibility into what happened.\n\n## Putting It All Together\n\nAmbient coordination is not a single feature \u2014 it is a system of interconnected capabilities:\n\n- **Knowledge streams** split lore into purpose-specific channels\n- **Events** capture every meaningful action in a structured format\n- **Attention** filters events to the right agents\n- **Nominations** let agents contribute without being asked\n- **Data sovereignty** ensures data stays where it belongs\n- **Agent renaissance** gives agents the behavioral vocabulary to participate\n- **Context composition** closes the loop by feeding learnings back into future sessions\n\nEach piece is independently useful, but together they create a system that gets smarter with every session \u2014 not because any single component is intelligent, but because the loop never breaks.",
|
|
531
|
-
"keyConcepts": [
|
|
532
|
-
"Slim CLAUDE.md (~150 lines) plus 12 on-demand guidance resources via paradigm://guidance/{topic}",
|
|
533
|
-
"paradigm_context_compose assembles base content, agent identity, contributions, guidance, and ambient context",
|
|
534
|
-
"Agent contributions use three priorities: high (always), medium (if budget), low (on demand)",
|
|
535
|
-
"Journal insights feed back into next session's context via buildProfileEnrichment \u2014 closing the learning loop",
|
|
536
|
-
"emitAndProcess unifies event emission with nomination processing in a single call",
|
|
537
|
-
"The full system: knowledge streams + events + attention + nominations + sovereignty + renaissance + composition"
|
|
538
|
-
],
|
|
539
|
-
"quiz": [
|
|
540
|
-
{
|
|
541
|
-
"id": "q1",
|
|
542
|
-
"question": "An agent is working on test coverage and calls `paradigm_context_compose`. Which guidance resources are most likely loaded into the composed context?",
|
|
543
|
-
"choices": {
|
|
544
|
-
"A": "All 12 guidance resources \u2014 the compose tool always includes everything",
|
|
545
|
-
"B": "None \u2014 guidance resources are never included in composed context",
|
|
546
|
-
"C": "Only the guidance resources relevant to the task \u2014 likely testing-related topics, not portal or orchestration guidance",
|
|
547
|
-
"D": "Only `paradigm://guidance/troubleshooting` \u2014 it is always included",
|
|
548
|
-
"E": "Whichever resources the agent loaded in the previous session"
|
|
549
|
-
},
|
|
550
|
-
"correct": "C",
|
|
551
|
-
"explanation": "`paradigm_context_compose` selects guidance resources based on the current task and focus area. An agent working on test coverage would benefit from testing-related guidance but not portal conventions or multi-agent orchestration details. The on-demand model means only relevant guidance consumes context window tokens. The agent can always request additional guidance mid-session via the MCP resource URIs if needed."
|
|
552
|
-
},
|
|
553
|
-
{
|
|
554
|
-
"id": "q2",
|
|
555
|
-
"question": "A security agent has a high-priority context contribution with inline content and a medium-priority contribution with `content_ref: 'paradigm://guidance/portal'`. Token budget is tight. What gets included?",
|
|
556
|
-
"choices": {
|
|
557
|
-
"A": "Both are included \u2014 priority does not affect inclusion",
|
|
558
|
-
"B": "Only the high-priority contribution is included \u2014 the medium-priority contribution is omitted due to token budget constraints",
|
|
559
|
-
"C": "Only the medium-priority contribution is included because it references official guidance",
|
|
560
|
-
"D": "Neither is included \u2014 contributions are disabled when budget is tight",
|
|
561
|
-
"E": "Both are included but the medium-priority content is truncated"
|
|
562
|
-
},
|
|
563
|
-
"correct": "B",
|
|
564
|
-
"explanation": "High-priority contributions are always included in composed context regardless of token budget. Medium-priority contributions are included only if the token budget allows. When budget is tight, medium and low priority contributions are omitted. The `content_ref` URI remains available \u2014 the agent can fetch `paradigm://guidance/portal` later if it discovers it needs portal guidance. This tiered inclusion ensures the most important context always fits."
|
|
565
|
-
},
|
|
566
|
-
{
|
|
567
|
-
"id": "q3",
|
|
568
|
-
"question": "In Session A, a builder records a journal entry with `transferable: true` and a `LearningPattern`. In Session B (different project), the same builder starts working on related code. How does the Session A insight reach Session B's context?",
|
|
569
|
-
"choices": {
|
|
570
|
-
"A": "The human manually copies the journal entry to the new project",
|
|
571
|
-
"B": "The journal entry is stored at `~/.paradigm/agents/builder/journal/` (user-scoped), and `buildProfileEnrichment` includes transferable insights in the composed context when the builder's profile is loaded",
|
|
572
|
-
"C": "The insight is stored in `.paradigm/lore/` and shared via Symphony networking",
|
|
573
|
-
"D": "The pattern is automatically added to CLAUDE.md in the new project",
|
|
574
|
-
"E": "Cross-project transfer is not supported \u2014 insights stay in the originating project"
|
|
575
|
-
},
|
|
576
|
-
"correct": "B",
|
|
577
|
-
"explanation": "The journal entry is stored at `~/.paradigm/agents/builder/journal/` \u2014 a user-scoped location (trust ring 2) that persists across projects. When `paradigm_context_compose` runs in Session B, it loads the builder's profile and calls `buildProfileEnrichment`. This function includes transferable journal insights and patterns in the \"Transferable Insights\" section of the composed context. The builder in Session B sees the insight before writing any code. No manual copying or network sharing is needed \u2014 the user-scoped storage location is the mechanism."
|
|
578
|
-
},
|
|
579
|
-
{
|
|
580
|
-
"id": "q4",
|
|
581
|
-
"question": "What does `emitAndProcess` do differently from calling `emitEvent` and `scoreEventForAgent` separately?",
|
|
582
|
-
"choices": {
|
|
583
|
-
"A": "It is faster because it batches file I/O",
|
|
584
|
-
"B": "It unifies event emission with nomination processing in a single call, ensuring no event is emitted without being evaluated \u2014 preventing the race condition where scoring happens too late",
|
|
585
|
-
"C": "It automatically records work log entries for every event",
|
|
586
|
-
"D": "It skips the memory buffer and writes directly to disk",
|
|
587
|
-
"E": "There is no functional difference \u2014 it is a convenience wrapper"
|
|
588
|
-
},
|
|
589
|
-
"correct": "B",
|
|
590
|
-
"explanation": "`emitAndProcess` ensures atomicity between event emission and nomination processing. If you call `emitEvent` and `scoreEventForAgent` separately, there is a window where an event exists in the stream but has not been evaluated for nominations. Another process might read the event and miss the nominations. `emitAndProcess` emits the event, immediately scores it against all active agents' attention patterns, and generates nominations for any agent exceeding its threshold \u2014 all in one call. It returns both the event and any generated nominations."
|
|
591
|
-
},
|
|
592
|
-
{
|
|
593
|
-
"id": "q5",
|
|
594
|
-
"question": "Before v5.0, CLAUDE.md was ~856 lines. After v5.0, it is ~150 lines plus 12 guidance resources. What is the primary benefit?",
|
|
595
|
-
"choices": {
|
|
596
|
-
"A": "The file is easier to edit because it is shorter",
|
|
597
|
-
"B": "Agents load faster because there is less to parse",
|
|
598
|
-
"C": "Context window tokens are spent on task-relevant content rather than a static wall of instructions \u2014 agents pay the token cost only for guidance they actually use",
|
|
599
|
-
"D": "The guidance resources are more accurate because they are generated dynamically",
|
|
600
|
-
"E": "The slim CLAUDE.md can be committed to version control while the old one could not"
|
|
601
|
-
},
|
|
602
|
-
"correct": "C",
|
|
603
|
-
"explanation": "The primary benefit is context window efficiency. A 856-line CLAUDE.md consumes thousands of tokens every session, most of which are irrelevant to the current task. The slim CLAUDE.md (~150 lines) provides universal orientation at minimal token cost, and guidance resources are loaded on demand only when needed. An agent working on test coverage does not pay the token cost for portal conventions, logging rules, or orchestration patterns. This leaves more of the context window available for actual code, tool results, and conversation."
|
|
604
|
-
}
|
|
605
|
-
]
|
|
606
|
-
},
|
|
607
|
-
{
|
|
608
|
-
"id": "maestro-team-collab",
|
|
609
|
-
"title": "Maestro: Visible Team Orchestration",
|
|
610
|
-
"content": "## From Synthesized Summaries to Attributed Conversations\n\nTraditional multi-agent orchestration has a visibility problem. An orchestrator spawns three agents, waits for their responses, synthesizes a summary, and presents it to the human. The human sees one voice \u2014 the orchestrator's \u2014 and loses all nuance from individual agent perspectives. If the architect disagreed with the security agent, you would never know. If the builder had a novel approach, it gets flattened into a consensus view.\n\nThe Maestro model inverts this pattern. Every agent speaks for itself.\n\n## The Maestro Model\n\nMaestro is not a separate system \u2014 it is a behavior pattern for the active Claude Code session. When you ask a complex question that benefits from multiple perspectives, Maestro:\n\n1. **Evaluates expertise** \u2014 Which agents have the highest confidence scores on the relevant symbols?\n2. **Loads ambient context** \u2014 Recent team decisions, journal insights, pending nominations are injected into each agent's prompt via `buildProfileEnrichment()`.\n3. **Spawns subagents** \u2014 Each agent receives its full profile: personality, expertise history, transferable patterns, notebook entries, and the ambient context.\n4. **Presents attributed responses** \u2014 Each agent's response appears with a `[role]` or `[nickname (role)]` prefix. You see exactly who said what.\n5. **Records to Symphony** \u2014 Each contribution is written as a Symphony message, creating a persistent team thread visible in Conductor and the Platform dashboard.\n6. **Learns from feedback** \u2014 At session end, `paradigm_ambient_learn` adjusts each agent's attention threshold based on acceptance/dismissal rates.\n\n## Agent Profiles and Nicknames\n\nEach agent has an `.agent` YAML file in `~/.paradigm/agents/` with:\n\n- **personality** \u2014 style (deliberate/rapid/exploratory/methodical), risk tolerance, verbosity\n- **expertise** \u2014 per-symbol confidence scores, exponential moving average from lore\n- **attention** \u2014 threshold, symbol/path/concept/signal subscriptions\n- **collaboration** \u2014 default stance toward other agents, debate behavior\n- **nomination** \u2014 urgency patterns, communication style\n- **nickname** \u2014 optional display name (e.g., \"George\" for the architect)\n- **benched** \u2014 if true, Maestro skips this agent entirely\n\nThe `nickname` field makes agents feel like team members. Terminal output shows `[George (architect)]` instead of the generic `[architect]`.\n\n## Bench and Activate\n\nNot every agent should speak on every task. The bench system lets you silence noisy agents:\n\n- `paradigm agent bench security` \u2014 security agent stops nominating and is excluded from orchestration\n- `paradigm agent activate security` \u2014 restore to active status\n- `paradigm agent roster` \u2014 see who is active vs benched with stats\n\nBenched agents are skipped in both `paradigm_orchestrate_inline` and the nomination engine's `processEvent`. Their profiles remain intact \u2014 bench is a pause, not a delete.\n\n## Symphony Team Threads\n\nEvery orchestration creates a thread prefixed `thr-orch-`. Maestro writes each agent contribution as a Symphony message from the agent's identity (`{project}/{role}`). This creates:\n\n- **Persistent record** \u2014 The team conversation survives session restarts\n- **Conductor visibility** \u2014 The TeamThreadView shows messages with colored role prefixes\n- **Platform dashboard** \u2014 The Team section displays the same thread in a browser\n- **Recovery context** \u2014 Next session's handoff includes which agents contributed and what they said\n\n## The Neverland Test\n\nNamed after the validation criteria in the spec, the Neverland test tracks whether agent learning actually works across sessions:\n\n- **Sessions 1-3**: Agents accumulate \u2014 touching symbols, recording lore, discovering patterns\n- **Sessions 4-5**: Maestro routes based on learned confidence scores\n- **Sessions 6-10**: Accepted suggestions lower threshold (agent speaks more). Dismissed suggestions raise it (agent speaks less).\n\nMeasurable targets:\n- By session 10, Maestro routes to the right agent >80% of the time\n- Agent acceptance rate improves from ~50% (cold start) to >70%\n\nTrack progress with `paradigm_ambient_health` \u2014 returns per-agent stats and overall health status (cold-start \u2192 accumulating \u2192 calibrating \u2192 mature).\n\n## Postflight Learning Loop\n\nThe postflight skill closes the feedback loop after every task:\n\n1. **Step 8b** runs `paradigm_ambient_learn` for each contributing agent \u2014 adjusts attention thresholds based on accept/dismiss rates\n2. Runs `paradigm_ambient_promote` \u2014 auto-promotes high-confidence journal patterns to the agent's notebook\n3. Records contributions via Symphony if not already done during execution\n\nThis ensures every session makes agents incrementally smarter. The handoff skill captures agent performance summaries so the next session inherits this knowledge.\n\n## The Teacher Model\n\nThe learning loop has a quality problem: the nomination engine only sees file paths, never content. Briefs like \"review for consistency\" get dismissed, which raises the agent's threshold, which silences the agent. The system learns to be *silent* instead of *better*.\n\nThe Teacher Model fixes this. Maestro (the active session) acts as a teacher who observes the full session and writes targeted feedback.\n\n### Session Work Log\n\nDuring each session, a running JSONL log at `.paradigm/events/session-log.jsonl` captures:\n- **Agent contributions**: what each agent was asked to do (from orchestration)\n- **User verdicts**: accepted / dismissed / revised, with the reason why\n\nThis is the data Maestro reads at postflight to write meaningful learning feedback.\n\n### Postflight Learning Pass\n\nAt session end, Step 8b reads the session work log and writes journal entries per agent:\n\n- **Accepted** \u2192 `human_feedback` trigger, confidence 0.85, extract the pattern that was confirmed correct\n- **Dismissed** \u2192 `correction_received` trigger, confidence 0.4, explain what was wrong and what to do differently\n- **Revised** \u2192 `correction_received` trigger, confidence 0.65, include the delta between proposal and actual\n\nThese journal entries include `pattern.applies_when` and `pattern.correct_approach` fields \u2014 the exact knowledge that gets promoted to notebooks.\n\n### Training New Behaviors\n\nThe journal \u2192 notebook \u2192 `buildProfileEnrichment` pipeline is also how you teach agents new skills. If you say \"documentor, also update CHANGELOG from now on,\" Maestro writes a journal entry. It promotes to a notebook entry. Next session, that knowledge is in the agent's context. No configuration needed.\n\n## The Documentor Agent\n\nThe 6th core agent. Its sole job: maintain Paradigm metadata files after other agents finish their work.\n\n- Always runs as the **final orchestration stage**\n- Reviews what changed (git diff, session work log)\n- Updates .purpose files, portal.yaml, symbol registrations\n- Uses ONLY `paradigm_purpose_*`, `paradigm_portal_*`, and `paradigm_reindex` MCP tools\n- Never modifies source code\n- Relieves all other agents of Paradigm compliance\n\nThis separation of concerns means architect, builder, security, and reviewer can focus purely on their domain. The documentor handles the bookkeeping.",
|
|
611
|
-
"keyConcepts": [
|
|
612
|
-
"Maestro is a behavior pattern, not a separate system \u2014 the active session decides who to consult",
|
|
613
|
-
"Attributed responses: [nickname (role)] prefix makes agents visible as distinct team members",
|
|
614
|
-
"Agent profiles carry expertise, personality, attention patterns, and ambient context across sessions",
|
|
615
|
-
"Bench/activate controls which agents participate \u2014 bench is a pause, not a delete",
|
|
616
|
-
"Symphony team threads (thr-orch-*) persist the conversation for Conductor and Platform display",
|
|
617
|
-
"Neverland test: measurable learning validation \u2014 >80% routing accuracy by session 10",
|
|
618
|
-
"Postflight learning loop: paradigm_ambient_learn + paradigm_ambient_promote after every task",
|
|
619
|
-
"Context injection: decisions, journal insights, nominations fed to agents via buildProfileEnrichment()",
|
|
620
|
-
"Teacher Model: Maestro observes full session and writes targeted journal entries \u2014 not generic template briefs",
|
|
621
|
-
"Session work log (.paradigm/events/session-log.jsonl) captures contributions + verdicts with reasons",
|
|
622
|
-
"Documentor agent: 6th core agent, always final stage, handles all .purpose and portal.yaml updates via MCP tools only"
|
|
623
|
-
],
|
|
624
|
-
"quiz": [
|
|
625
|
-
{
|
|
626
|
-
"id": "q1",
|
|
627
|
-
"question": "What is the primary difference between Maestro and traditional multi-agent orchestration?",
|
|
628
|
-
"choices": {
|
|
629
|
-
"A": "Maestro uses more agents per task",
|
|
630
|
-
"B": "Maestro presents each agent's response as an attributed message rather than synthesizing a single summary",
|
|
631
|
-
"C": "Maestro runs agents in parallel instead of sequentially",
|
|
632
|
-
"D": "Maestro eliminates the need for human approval",
|
|
633
|
-
"E": "Maestro uses persistent background agents"
|
|
634
|
-
},
|
|
635
|
-
"correct": "B",
|
|
636
|
-
"explanation": "Maestro's key innovation is visibility. Instead of synthesizing agent responses into a single voice, each agent speaks for itself with an attribution prefix like [architect]. This preserves individual perspectives and lets the human see disagreements, novel approaches, and the reasoning behind each contribution."
|
|
637
|
-
},
|
|
638
|
-
{
|
|
639
|
-
"id": "q2",
|
|
640
|
-
"question": "When a security agent is consistently dismissed by the user (>60% dismissal rate), what happens at postflight?",
|
|
641
|
-
"choices": {
|
|
642
|
-
"A": "The agent is automatically deleted",
|
|
643
|
-
"B": "The agent is moved to a different project",
|
|
644
|
-
"C": "paradigm_ambient_learn raises the agent's attention threshold, making it nominate less",
|
|
645
|
-
"D": "The agent's expertise scores are reset to zero",
|
|
646
|
-
"E": "Nothing changes \u2014 threshold adjustment is manual"
|
|
647
|
-
},
|
|
648
|
-
"correct": "C",
|
|
649
|
-
"explanation": "The learning loop is automatic. When dismissal rate exceeds 60%, paradigm_ambient_learn raises the agent's attention threshold by 0.05, meaning it requires higher relevance scores before nominating. This self-tunes the agent to speak less when it is being noisy. Conversely, >80% acceptance lowers the threshold, encouraging the agent to contribute more."
|
|
650
|
-
},
|
|
651
|
-
{
|
|
652
|
-
"id": "q3",
|
|
653
|
-
"question": "What ambient context is injected into agent profiles before Maestro spawns them?",
|
|
654
|
-
"choices": {
|
|
655
|
-
"A": "Only the agent's expertise scores",
|
|
656
|
-
"B": "The full project codebase",
|
|
657
|
-
"C": "Recent team decisions, transferable journal insights, and pending nominations",
|
|
658
|
-
"D": "The complete git history",
|
|
659
|
-
"E": "Nothing \u2014 agents start fresh each session"
|
|
660
|
-
},
|
|
661
|
-
"correct": "C",
|
|
662
|
-
"explanation": "buildProfileEnrichment() accepts an ambientContext parameter containing recent team decisions (from decision-loader), transferable journal insights (from journal-loader), and pending nominations (from nomination-engine). This gives each agent awareness of what the team has decided, what patterns have been discovered, and what issues are outstanding \u2014 creating genuine team intelligence rather than isolated tool execution."
|
|
663
|
-
},
|
|
664
|
-
{
|
|
665
|
-
"id": "q4",
|
|
666
|
-
"question": "What does the Neverland test health status 'calibrating' indicate?",
|
|
667
|
-
"choices": {
|
|
668
|
-
"A": "No agents have been created yet",
|
|
669
|
-
"B": "Agents have fewer than 10 total nominations",
|
|
670
|
-
"C": "Average acceptance rate is between 50% and 70% \u2014 agents are learning but haven't reached target",
|
|
671
|
-
"D": "All agents have been benched",
|
|
672
|
-
"E": "The system has reached maturity and needs no further adjustment"
|
|
673
|
-
},
|
|
674
|
-
"correct": "C",
|
|
675
|
-
"explanation": "The Neverland health statuses are: cold-start (<10 nominations), accumulating (<50% accept rate), calibrating (50-70% accept rate), and mature (>70%). 'Calibrating' means agents are past the initial noise phase and their thresholds are actively being tuned by the learning loop, but haven't yet reached the 70% target. This typically occurs around sessions 4-7."
|
|
676
|
-
},
|
|
677
|
-
{
|
|
678
|
-
"id": "q5",
|
|
679
|
-
"question": "How does benching an agent differ from deleting it?",
|
|
680
|
-
"choices": {
|
|
681
|
-
"A": "There is no difference \u2014 bench removes the agent profile",
|
|
682
|
-
"B": "Benching sets benched=true \u2014 the profile stays intact but Maestro and the nomination engine skip it",
|
|
683
|
-
"C": "Benching only affects CLI commands, not MCP tools",
|
|
684
|
-
"D": "Benching removes expertise but keeps the profile",
|
|
685
|
-
"E": "Benching is permanent while deletion can be undone"
|
|
686
|
-
},
|
|
687
|
-
"correct": "B",
|
|
688
|
-
"explanation": "Benching is a reversible pause. The agent's .agent file remains with all expertise, journal entries, notebooks, and transferable patterns intact. Both paradigm_orchestrate_inline and processEvent check profile.benched and skip the agent if true. paradigm agent activate restores the agent immediately with all its accumulated learning preserved."
|
|
689
|
-
},
|
|
690
|
-
{
|
|
691
|
-
"id": "q6",
|
|
692
|
-
"question": "Why does the Teacher Model write journal entries instead of adjusting thresholds directly?",
|
|
693
|
-
"choices": {
|
|
694
|
-
"A": "Journal entries are cheaper in token cost",
|
|
695
|
-
"B": "Thresholds can only go up, not down",
|
|
696
|
-
"C": "Journal entries carry semantic knowledge (what to look for, what was wrong) while thresholds are just a single number that says 'speak more' or 'speak less'",
|
|
697
|
-
"D": "The threshold system is being removed in favor of journals",
|
|
698
|
-
"E": "Journal entries are visible to the user while thresholds are hidden"
|
|
699
|
-
},
|
|
700
|
-
"correct": "C",
|
|
701
|
-
"explanation": "Thresholds control volume (speak more/less) but not quality. A threshold can't teach the security agent to distinguish audit logging from vulnerabilities. A journal entry can: 'When auth files change with logging additions, recognize these as security-positive \u2014 don't flag as violations.' This knowledge promotes to notebooks and appears in future prompts via buildProfileEnrichment, making the agent genuinely smarter rather than just quieter."
|
|
702
|
-
},
|
|
703
|
-
{
|
|
704
|
-
"id": "q7",
|
|
705
|
-
"question": "What is the documentor agent's relationship to the stop hook?",
|
|
706
|
-
"choices": {
|
|
707
|
-
"A": "The documentor replaces the stop hook entirely",
|
|
708
|
-
"B": "The documentor runs before the stop hook and should prevent it from ever triggering, but the hook remains as a safety net",
|
|
709
|
-
"C": "The stop hook runs the documentor automatically",
|
|
710
|
-
"D": "They are unrelated systems",
|
|
711
|
-
"E": "The documentor disables the stop hook for the session"
|
|
712
|
-
},
|
|
713
|
-
"correct": "B",
|
|
714
|
-
"explanation": "The stop hook blocks when .purpose files are missing for modified source directories. The documentor's job is to update these files as the final orchestration stage, so the stop hook should never need to block. However, the stop hook remains as a safety net \u2014 if the documentor misses something or orchestration wasn't used, the hook catches it. Defense in depth."
|
|
715
|
-
}
|
|
716
|
-
]
|
|
717
|
-
}
|
|
718
|
-
]
|
|
719
|
-
}
|