@a-company/paradigm 5.38.0 → 6.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{accept-orchestration-OATWIRHP.js → accept-orchestration-TIXUQQGR.js} +1 -1
- package/dist/add-UOR4INIV.js +8 -0
- package/dist/agent-MB3H5EZA.js +33 -0
- package/dist/{agent-loader-RIVI6QPP.js → agent-loader-VGBPL3TH.js} +1 -1
- package/dist/{agent-loader-RJRVO5GQ.js → agent-loader-W3RQJVW7.js} +1 -1
- package/dist/{agents-suggest-HYTFMQD3.js → agents-suggest-IKY6VD2R.js} +1 -1
- package/dist/{ambient-WTLYUAQM.js → ambient-AI42BOM5.js} +12 -12
- package/dist/{ambient-76YMUA5Q.js → ambient-FNNFB4AP.js} +1 -1
- package/dist/{assess-UFPYEJKP.js → assess-63WXHWJV.js} +1 -1
- package/dist/authority-FA3HLEOA.js +2 -0
- package/dist/{calibration-OLJYB5HN.js → calibration-BDHGYJOK.js} +1 -1
- package/dist/chunk-23T6UG73.js +605 -0
- package/dist/{chunk-4L7665QV.js → chunk-2AU5L333.js} +1 -1
- package/dist/{chunk-BOYQAMGC.js → chunk-4N56FRNE.js} +1 -1
- package/dist/{chunk-5QOCKWK5.js → chunk-4PSD5R7N.js} +2 -2
- package/dist/{chunk-MQIG6SMF.js → chunk-6QXBXZF6.js} +1 -1
- package/dist/{chunk-ORDKEGII.js → chunk-AMLD7IYC.js} +1 -1
- package/dist/{chunk-3DZK54RU.js → chunk-DBEWOKD6.js} +32 -7
- package/dist/{chunk-AGFPVSX5.js → chunk-F6E3HW45.js} +1 -1
- package/dist/{chunk-X3U3IGYT.js → chunk-GD4F2HC6.js} +2 -2
- package/dist/chunk-GRZQIKST.js +2 -0
- package/dist/{chunk-HOBHJPTL.js → chunk-IOVHF4SR.js} +1 -1
- package/dist/{chunk-RLCH7DXQ.js → chunk-K7X3Z3GL.js} +1 -1
- package/dist/{chunk-74SGKSRQ.js → chunk-KAFQA7HV.js} +2 -2
- package/dist/{chunk-NEJ4ZLCY.js → chunk-LAYBUKMB.js} +1 -1
- package/dist/{chunk-4VKSEOXZ.js → chunk-LPBCQM5Y.js} +3 -3
- package/dist/chunk-Q527BPUF.js +2 -0
- package/dist/{chunk-AO7ZSRME.js → chunk-TQOT2LBO.js} +2 -2
- package/dist/{chunk-3XGNXXCT.js → chunk-UZ5H7K6Q.js} +1 -1
- package/dist/chunk-VIG5LSGZ.js +2 -0
- package/dist/chunk-VNIX5KBT.js +3 -0
- package/dist/chunk-WXF5VFB4.js +111 -0
- package/dist/chunk-XQLO5URP.js +11 -0
- package/dist/{chunk-DOCDDDTD.js → chunk-YNDPSWOE.js} +5 -5
- package/dist/chunk-Z5QW6USC.js +2 -0
- package/dist/{compliance-D7GD6ZYC.js → compliance-J3VOV445.js} +1 -1
- package/dist/config-schema-FLHRVZMI.js +2 -0
- package/dist/{context-audit-XRPT3OU2.js → context-audit-JVCA6GSV.js} +1 -1
- package/dist/{cursorrules-U5O4G5T4.js → cursorrules-ZXPXPZ3P.js} +1 -1
- package/dist/decision-loader-HELL2AMX.js +2 -0
- package/dist/{delete-P5VULXR4.js → delete-2C6ALLYY.js} +1 -1
- package/dist/{diff-YGHBIJY5.js → diff-75MABOSL.js} +1 -1
- package/dist/{dist-KGRCLBJP-2QAPFYNF.js → dist-GQ42YS5N-4HIJZVBB.js} +10 -10
- package/dist/{docs-USDAF26F.js → docs-TSAAS4W3.js} +1 -1
- package/dist/doctor-L5XZENCF.js +2 -0
- package/dist/{edit-GUU3HBVW.js → edit-P3MDAZLU.js} +1 -1
- package/dist/{flow-FVZR3YJ4.js → flow-BGXOVE2V.js} +1 -1
- package/dist/{hooks-TFMMMB2H.js → hooks-KUEE5KMM.js} +1 -1
- package/dist/index.js +6 -6
- package/dist/init-M44SO65G.js +2 -0
- package/dist/{init-XYB62Q3X.js → init-V4KSEKPK.js} +1 -1
- package/dist/{list-YKIQNKGB.js → list-2XIWUEMA.js} +1 -1
- package/dist/list-CFHINXIS.js +12 -0
- package/dist/lore-loader-D2ISOASW.js +2 -0
- package/dist/lore-loader-PXFKMKAN.js +2 -0
- package/dist/mcp.js +4 -4
- package/dist/metrics-UESGUHTA.js +2 -0
- package/dist/{migrate-Z5UQN57G.js → migrate-ZPNYDNM4.js} +1 -1
- package/dist/migrate-assessments-YSITX7KM.js +4 -0
- package/dist/migrate-decisions-NPLQOEEH.js +6 -0
- package/dist/migrate-plsat-EM2ACIQ3.js +6 -0
- package/dist/migration-notices-BHLEYC4T.js +4 -0
- package/dist/{nomination-engine-EALA5MGI.js → nomination-engine-NCLTGMAK.js} +1 -1
- package/dist/{notebook-loader-PXNRBBXD.js → notebook-loader-3J2OFMS3.js} +1 -1
- package/dist/{orchestrate-M5PBZBJQ.js → orchestrate-K4KBTBYK.js} +1 -1
- package/dist/{platform-server-DNAMH4YI.js → platform-server-ANOALDPL.js} +1 -1
- package/dist/{portal-check-ZMLVBIGW.js → portal-check-DV2VSJ5E.js} +1 -1
- package/dist/portal-compliance-JONQ4SOP.js +2 -0
- package/dist/{probe-3FTG6LYO.js → probe-5HAXULAD.js} +1 -1
- package/dist/{providers-AWA7WLLM.js → providers-TBPOE4DI.js} +1 -1
- package/dist/quiz-WYIZJG5K.js +10 -0
- package/dist/{record-YXPB34MY.js → record-N3VNYYKJ.js} +1 -1
- package/dist/registry-OUTA3DXW.js +20 -0
- package/dist/reindex-IZCD2JGD.js +2 -0
- package/dist/{retag-N5XF3KXP.js → retag-72R2OSZV.js} +1 -1
- package/dist/{review-77QI6VOC.js → review-2INNWLTW.js} +1 -1
- package/dist/{sentinel-HYAZ3CO5.js → sentinel-EFPEX246.js} +1 -1
- package/dist/{sentinel-bridge-VR357PKL.js → sentinel-bridge-UR2MKARY.js} +1 -1
- package/dist/{serve-U47GULB6.js → serve-3FMUWW5K.js} +1 -1
- package/dist/serve-OQYUO7CR.js +12 -0
- package/dist/{server-4YNUIK4W.js → server-4D77LCST.js} +1 -1
- package/dist/server-FGUL2FWQ.js +7 -0
- package/dist/session-tracker-HHNY6J4I.js +2 -0
- package/dist/{session-work-log-ZP45TREI.js → session-work-log-MEJ33TYD.js} +1 -1
- package/dist/{session-work-log-PAKXOFGL.js → session-work-log-ZVVJGO7X.js} +1 -1
- package/dist/{setup-FEWSYS3Y.js → setup-ZSEC72BS.js} +1 -1
- package/dist/shift-WGMZGWOC.js +60 -0
- package/dist/{show-PJ5LFLIL.js → show-JH7LJ5MT.js} +1 -1
- package/dist/show-WVHAL4VU.js +7 -0
- package/dist/{spawn-M5BAV252.js → spawn-KKDDR6UR.js} +1 -1
- package/dist/status-S7Z5FVIE.js +6 -0
- package/dist/{summary-PYTEIJ4U.js → summary-WLI3NF4G.js} +2 -2
- package/dist/{sweep-HU74OPVW.js → sweep-7TZFN5NS.js} +1 -1
- package/dist/sync-55U6QPIA.js +2 -0
- package/dist/{sync-llms-7CAI74QL.js → sync-llms-GF7DDQDI.js} +1 -1
- package/dist/{team-PDK64JXI.js → team-2LGZQRP4.js} +1 -1
- package/dist/{timeline-K3ZFKJ3R.js → timeline-RK7O2SCM.js} +1 -1
- package/dist/tools-4RRFTU5H.js +2 -0
- package/dist/university-content/notes/N-para-001-build-something.md +126 -0
- package/dist/university-content/notes/N-para-001-meet-the-team.md +85 -0
- package/dist/university-content/notes/N-para-001-shift-setup.md +74 -0
- package/dist/university-content/notes/N-para-101-component-types.md +99 -0
- package/dist/university-content/notes/N-para-101-first-steps.md +134 -0
- package/dist/university-content/notes/N-para-101-five-symbols.md +128 -0
- package/dist/university-content/notes/N-para-101-paradigm-logger.md +89 -0
- package/dist/university-content/notes/N-para-101-portal-yaml.md +112 -0
- package/dist/university-content/notes/N-para-101-project-structure.md +143 -0
- package/dist/university-content/notes/N-para-101-purpose-files.md +121 -0
- package/dist/university-content/notes/N-para-101-tags-and-classification.md +93 -0
- package/dist/university-content/notes/N-para-101-welcome.md +51 -0
- package/dist/university-content/notes/N-para-201-architecture-review.md +175 -0
- package/dist/university-content/notes/N-para-201-aspect-graph.md +79 -0
- package/dist/university-content/notes/N-para-201-aspects-and-anchors.md +112 -0
- package/dist/university-content/notes/N-para-201-component-patterns.md +138 -0
- package/dist/university-content/notes/N-para-201-cross-cutting-concerns.md +145 -0
- package/dist/university-content/notes/N-para-201-disciplines.md +187 -0
- package/dist/university-content/notes/N-para-201-flows-deep-dive.md +119 -0
- package/dist/university-content/notes/N-para-201-gates-deep-dive.md +165 -0
- package/dist/university-content/notes/N-para-201-portal-protocol.md +133 -0
- package/dist/university-content/notes/N-para-201-signal-patterns.md +159 -0
- package/dist/university-content/notes/N-para-201-symbol-naming.md +149 -0
- package/dist/university-content/notes/N-para-301-context-management.md +53 -0
- package/dist/university-content/notes/N-para-301-decisions.md +99 -0
- package/dist/university-content/notes/N-para-301-doctor-and-validation.md +70 -0
- package/dist/university-content/notes/N-para-301-enforcement-levels.md +102 -0
- package/dist/university-content/notes/N-para-301-fragility-tracking.md +50 -0
- package/dist/university-content/notes/N-para-301-history-system.md +42 -0
- package/dist/university-content/notes/N-para-301-navigation-system.md +55 -0
- package/dist/university-content/notes/N-para-301-operations-review.md +55 -0
- package/dist/university-content/notes/N-para-301-paradigm-shift.md +93 -0
- package/dist/university-content/notes/N-para-301-protocols.md +113 -0
- package/dist/university-content/notes/N-para-301-ripple-analysis.md +53 -0
- package/dist/university-content/notes/N-para-301-sentinel-observability.md +87 -0
- package/dist/university-content/notes/N-para-301-sync-and-maintenance.md +57 -0
- package/dist/university-content/notes/N-para-301-wisdom-system.md +89 -0
- package/dist/university-content/notes/N-para-401-agent-identity.md +99 -0
- package/dist/university-content/notes/N-para-401-agent-interop.md +87 -0
- package/dist/university-content/notes/N-para-401-agent-roles.md +107 -0
- package/dist/university-content/notes/N-para-401-commit-conventions.md +82 -0
- package/dist/university-content/notes/N-para-401-mastery-review.md +71 -0
- package/dist/university-content/notes/N-para-401-mcp-tools-overview.md +102 -0
- package/dist/university-content/notes/N-para-401-multi-agent-coordination.md +80 -0
- package/dist/university-content/notes/N-para-401-notebooks-permissions.md +66 -0
- package/dist/university-content/notes/N-para-401-orchestration-workflow.md +101 -0
- package/dist/university-content/notes/N-para-401-pm-governance.md +71 -0
- package/dist/university-content/notes/N-para-401-provider-cascade.md +75 -0
- package/dist/university-content/notes/N-para-401-quick-check.md +95 -0
- package/dist/university-content/notes/N-para-451-agent-routing.md +117 -0
- package/dist/university-content/notes/N-para-451-archetypes-vs-instances.md +82 -0
- package/dist/university-content/notes/N-para-451-identity-layers.md +76 -0
- package/dist/university-content/notes/N-para-451-orchestration-modes.md +85 -0
- package/dist/university-content/notes/N-para-451-paradigm-shift.md +95 -0
- package/dist/university-content/notes/N-para-451-partners-primitive.md +107 -0
- package/dist/university-content/notes/N-para-451-roster-management.md +132 -0
- package/dist/university-content/notes/N-para-451-roster-reference.md +106 -0
- package/dist/university-content/notes/N-para-451-the-team-pattern.md +87 -0
- package/dist/university-content/notes/N-para-451-tiers.md +81 -0
- package/dist/university-content/notes/N-para-451-welcome.md +55 -0
- package/dist/university-content/notes/N-para-451-what-is-an-agent.md +73 -0
- package/dist/university-content/notes/N-para-501-advanced-workflows.md +122 -0
- package/dist/university-content/notes/N-para-501-aspect-graph-advanced.md +195 -0
- package/dist/university-content/notes/N-para-501-aspect-graph-internals.md +97 -0
- package/dist/university-content/notes/N-para-501-assessment-loops.md +116 -0
- package/dist/university-content/notes/N-para-501-conductor-workspace.md +77 -0
- package/dist/university-content/notes/N-para-501-habits-practice.md +164 -0
- package/dist/university-content/notes/N-para-501-hook-enforcement.md +100 -0
- package/dist/university-content/notes/N-para-501-lore-system.md +155 -0
- package/dist/university-content/notes/N-para-501-platform-agent-ui.md +108 -0
- package/dist/university-content/notes/N-para-501-review-compliance.md +72 -0
- package/dist/university-content/notes/N-para-501-sentinel-deep-dive.md +173 -0
- package/dist/university-content/notes/N-para-501-session-intelligence.md +104 -0
- package/dist/university-content/notes/N-para-501-symphony-a-mail.md +120 -0
- package/dist/university-content/notes/N-para-501-symphony-networking.md +119 -0
- package/dist/university-content/notes/N-para-501-task-management.md +100 -0
- package/dist/university-content/notes/N-para-601-agent-renaissance.md +121 -0
- package/dist/university-content/notes/N-para-601-attention-scoring.md +129 -0
- package/dist/university-content/notes/N-para-601-context-composition.md +146 -0
- package/dist/university-content/notes/N-para-601-data-sovereignty.md +140 -0
- package/dist/university-content/notes/N-para-601-event-stream.md +126 -0
- package/dist/university-content/notes/N-para-601-knowledge-streams.md +144 -0
- package/dist/university-content/notes/N-para-601-learning-loop.md +68 -0
- package/dist/university-content/notes/N-para-601-maestro-team-collab.md +136 -0
- package/dist/university-content/notes/N-para-601-nominations-debates.md +115 -0
- package/dist/university-content/notes/N-para-701-agent-notebooks.md +131 -0
- package/dist/university-content/notes/N-para-701-agent-pods-nevrland.md +182 -0
- package/dist/university-content/notes/N-para-701-agent-profiles.md +197 -0
- package/dist/university-content/notes/N-para-701-agent-roster.md +82 -0
- package/dist/university-content/notes/N-para-701-agent-state.md +180 -0
- package/dist/university-content/notes/N-para-701-learning-feedback-loop.md +188 -0
- package/dist/university-content/notes/N-para-701-model-tier-resolution.md +204 -0
- package/dist/university-content/notes/N-para-701-orchestration-enforcement.md +169 -0
- package/dist/university-content/notes/N-para-701-per-project-rosters.md +198 -0
- package/dist/university-content/notes/N-para-701-symphony-visibility.md +142 -0
- package/dist/university-content/paths/LP-para-001.yaml +29 -0
- package/dist/university-content/paths/LP-para-101.yaml +59 -0
- package/dist/university-content/paths/LP-para-201.yaml +69 -0
- package/dist/university-content/paths/LP-para-301.yaml +84 -0
- package/dist/university-content/paths/LP-para-401.yaml +74 -0
- package/dist/university-content/paths/LP-para-451.yaml +69 -0
- package/dist/university-content/paths/LP-para-501.yaml +89 -0
- package/dist/university-content/paths/LP-para-601.yaml +59 -0
- package/dist/university-content/paths/LP-para-701.yaml +64 -0
- package/dist/university-content/quizzes/Q-para-001-build-something.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-001-meet-the-team.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-001-shift-setup.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-101-component-types.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-101-first-steps.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-five-symbols.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-101-paradigm-logger.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-portal-yaml.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-project-structure.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-101-purpose-files.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-tags-and-classification.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-101-welcome.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-architecture-review.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-201-aspect-graph.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-201-aspects-and-anchors.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-component-patterns.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-cross-cutting-concerns.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-disciplines.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-201-flows-deep-dive.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-201-gates-deep-dive.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-201-portal-protocol.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-signal-patterns.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-201-symbol-naming.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-301-context-management.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-decisions.yaml +76 -0
- package/dist/university-content/quizzes/Q-para-301-doctor-and-validation.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-301-enforcement-levels.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-fragility-tracking.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-history-system.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-navigation-system.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-operations-review.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-301-paradigm-shift.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-protocols.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-ripple-analysis.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-301-sentinel-observability.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-sync-and-maintenance.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-301-wisdom-system.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-401-agent-identity.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-agent-interop.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-401-agent-roles.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-401-commit-conventions.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-401-mastery-review.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-mcp-tools-overview.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-multi-agent-coordination.yaml +76 -0
- package/dist/university-content/quizzes/Q-para-401-notebooks-permissions.yaml +61 -0
- package/dist/university-content/quizzes/Q-para-401-orchestration-workflow.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-pm-governance.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-401-provider-cascade.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-401-quick-check.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-451-foundations.yaml +154 -0
- package/dist/university-content/quizzes/Q-para-451-when-to-invoke.yaml +182 -0
- package/dist/university-content/quizzes/Q-para-501-advanced-workflows.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-aspect-graph-advanced.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-aspect-graph-internals.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-assessment-loops.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-501-conductor-workspace.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-501-habits-practice.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-501-hook-enforcement.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-lore-system.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-platform-agent-ui.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-review-compliance.yaml +61 -0
- package/dist/university-content/quizzes/Q-para-501-sentinel-deep-dive.yaml +86 -0
- package/dist/university-content/quizzes/Q-para-501-session-intelligence.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-symphony-a-mail.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-symphony-networking.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-501-task-management.yaml +46 -0
- package/dist/university-content/quizzes/Q-para-601-agent-renaissance.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-601-attention-scoring.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-601-context-composition.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-601-data-sovereignty.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-601-event-stream.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-601-knowledge-streams.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-601-learning-loop.yaml +56 -0
- package/dist/university-content/quizzes/Q-para-601-maestro-team-collab.yaml +86 -0
- package/dist/university-content/quizzes/Q-para-601-nominations-debates.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-notebooks.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-pods-nevrland.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-profiles.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-roster.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-agent-state.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-learning-feedback-loop.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-model-tier-resolution.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-orchestration-enforcement.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-per-project-rosters.yaml +66 -0
- package/dist/university-content/quizzes/Q-para-701-symphony-visibility.yaml +66 -0
- package/dist/university-content/quizzes/Q-plsat-v2.yaml +904 -0
- package/dist/university-content/quizzes/Q-plsat-v3.yaml +2909 -0
- package/dist/university-content/reference.json +2 -2
- package/dist/university-ui/assets/{index-CecQrfSn.js → index-nNgzO1il.js} +2 -2
- package/dist/university-ui/assets/{index-CecQrfSn.js.map → index-nNgzO1il.js.map} +1 -1
- package/dist/university-ui/index.html +1 -1
- package/dist/{upgrade-GX56QE3C.js → upgrade-NKN63VTY.js} +2 -2
- package/dist/validate-XUQZTF3H.js +9 -0
- package/dist/{watch-YCODNIET.js → watch-25GJHQYT.js} +1 -1
- package/lore-ui/dist/assets/{index-Bk-K0qgN.js → index-DKhNxgtW.js} +10 -10
- package/lore-ui/dist/index.html +1 -1
- package/package.json +2 -2
- package/platform-ui/dist/assets/{AmbientSection-BYjt75R1.js → AmbientSection-CwatqcBD.js} +1 -1
- package/platform-ui/dist/assets/{CanvasSection-rKvA_vZj.js → CanvasSection-dFAthehN.js} +1 -1
- package/platform-ui/dist/assets/{DocsSection-CI9K73M-.js → DocsSection-BZ2SFJBZ.js} +1 -1
- package/platform-ui/dist/assets/{GitSection-DSGj_c6S.js → GitSection-MNNYU1tO.js} +1 -1
- package/platform-ui/dist/assets/{GraphSection-CawN7pC5.js → GraphSection-COYjb4Pt.js} +1 -1
- package/platform-ui/dist/assets/LoreSection-B0hUbfsJ.js +1 -0
- package/platform-ui/dist/assets/{SentinelSection-DNgoYMH0.js → SentinelSection-BCxW1DCp.js} +1 -1
- package/platform-ui/dist/assets/{SymphonySection-C0zfcqv3.js → SymphonySection-BsucZRqy.js} +1 -1
- package/platform-ui/dist/assets/{TeamSection-Bzd3Dt9Q.js → TeamSection-C0QNTudW.js} +1 -1
- package/platform-ui/dist/assets/{UniversitySection-tBr62R0S.js → UniversitySection-DN1-g9pw.js} +1 -1
- package/platform-ui/dist/assets/{index-BaOmyn11.js → index-DwUT8pju.js} +2 -2
- package/platform-ui/dist/index.html +1 -1
- package/dist/add-P76GEMGF.js +0 -8
- package/dist/agent-X6I2YWOB.js +0 -33
- package/dist/chunk-JQKKVAAN.js +0 -2
- package/dist/chunk-NQ47TA6C.js +0 -111
- package/dist/chunk-ODVKPZZ4.js +0 -2
- package/dist/chunk-Q2J542ST.js +0 -2
- package/dist/chunk-RBLK34IA.js +0 -11
- package/dist/chunk-RN4VE6P3.js +0 -521
- package/dist/chunk-WS2N27RX.js +0 -3
- package/dist/config-schema-GUQY2QN7.js +0 -2
- package/dist/decision-loader-2XPZE4EZ.js +0 -2
- package/dist/doctor-WMVULMQD.js +0 -2
- package/dist/list-5IUGP3ZB.js +0 -7
- package/dist/lore-loader-RVQI5GXL.js +0 -2
- package/dist/lore-loader-XY5MZRR2.js +0 -2
- package/dist/migrate-assessments-GEI5WMI2.js +0 -4
- package/dist/portal-compliance-6YR27IQU.js +0 -2
- package/dist/quiz-FE5UGAY2.js +0 -10
- package/dist/registry-KOOKFUWD.js +0 -20
- package/dist/reindex-I6LPAKCC.js +0 -2
- package/dist/serve-OY6XYL7F.js +0 -12
- package/dist/server-2MNROHF6.js +0 -7
- package/dist/session-tracker-MWJAJA6Z.js +0 -2
- package/dist/shift-PC6C7NUX.js +0 -60
- package/dist/show-BOAVWZPZ.js +0 -7
- package/dist/status-A37ECYNJ.js +0 -6
- package/dist/sync-DLUBV5HQ.js +0 -2
- package/dist/tools-5ITPEPSV.js +0 -2
- package/dist/university-content/courses/.purpose +0 -492
- package/dist/university-content/courses/para-001.json +0 -166
- package/dist/university-content/courses/para-101.json +0 -615
- package/dist/university-content/courses/para-201.json +0 -794
- package/dist/university-content/courses/para-301.json +0 -830
- package/dist/university-content/courses/para-401.json +0 -868
- package/dist/university-content/courses/para-501.json +0 -1166
- package/dist/university-content/courses/para-601.json +0 -719
- package/dist/university-content/courses/para-701.json +0 -807
- package/dist/university-content/plsat/.purpose +0 -162
- package/dist/university-content/plsat/v2.0.json +0 -760
- package/dist/university-content/plsat/v3.0.json +0 -3453
- package/dist/validate-C6SMKGYD.js +0 -9
- package/platform-ui/dist/assets/LoreSection-oO5dCe6O.js +0 -1
- /package/dist/{chunk-BV5PRPLB.js → chunk-HXGYVS2N.js} +0 -0
- /package/templates/paradigm/specs/{scan.md → probe.md} +0 -0
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
id: Q-para-601-learning-loop
|
|
2
|
+
title: 'PARA 601: Paradigm Ambient — The Learning Loop'
|
|
3
|
+
description: 'Quiz for lesson: The Learning Loop'
|
|
4
|
+
author: paradigm
|
|
5
|
+
created: '2026-04-22'
|
|
6
|
+
updated: '2026-04-22'
|
|
7
|
+
tags:
|
|
8
|
+
- course
|
|
9
|
+
- para-601
|
|
10
|
+
symbols: []
|
|
11
|
+
difficulty: beginner
|
|
12
|
+
passThreshold: 0.7
|
|
13
|
+
category: paradigm-core
|
|
14
|
+
origin: imported
|
|
15
|
+
source: courses/para-601.json
|
|
16
|
+
questions:
|
|
17
|
+
- id: q1
|
|
18
|
+
question: An agent completes a session where it discovers that Express v5 requires explicit async error wrapping. It records this in a lore entry but no journal entry is created. Three weeks later, a different agent makes the same mistake. Which phase of the learning loop failed?
|
|
19
|
+
choices:
|
|
20
|
+
A: DO — the first agent should not have made the mistake
|
|
21
|
+
B: RECORD — the lore entry was insufficient
|
|
22
|
+
C: LEARN — the insight was captured in lore but never extracted into a journal entry that could feed back into future context
|
|
23
|
+
D: ADAPT — the context composition tool was broken
|
|
24
|
+
E: ASSESS — the event stream missed the original correction
|
|
25
|
+
correct: C
|
|
26
|
+
explanation: 'The LEARN phase failed. The observation was recorded (RECORD phase worked — there is a lore entry), but the insight was never extracted into a learning journal entry with a transferable pattern. Without a journal entry, the ADAPT phase has nothing to inject into future sessions. The fix is recording a journal entry with `transferable: true` and extracting a `LearningPattern` so it feeds into context composition.'
|
|
27
|
+
- id: q2
|
|
28
|
+
question: Why did Paradigm v5.0 reduce CLAUDE.md from ~856 lines to ~150 lines?
|
|
29
|
+
choices:
|
|
30
|
+
A: The old content was outdated and no longer relevant
|
|
31
|
+
B: Smaller files load faster from disk
|
|
32
|
+
C: Loading all guidance every session wastes tokens on content irrelevant to the current task — on-demand resources let agents load only what they need
|
|
33
|
+
D: Claude Code has a strict file size limit for CLAUDE.md
|
|
34
|
+
E: The content was moved to .paradigm/config.yaml instead
|
|
35
|
+
correct: C
|
|
36
|
+
explanation: Context engineering is about putting high-signal, task-relevant content in the context window. A 856-line CLAUDE.md loaded every session means hundreds of tokens spent on logging rules when the task is about testing, or portal conventions when the task is about lore. The slim CLAUDE.md provides universal orientation, and 12 `paradigm://guidance/{topic}` resources provide targeted guidance on demand.
|
|
37
|
+
- id: q3
|
|
38
|
+
question: Which phases of the learning loop existed before v5.0?
|
|
39
|
+
choices:
|
|
40
|
+
A: All six phases existed but were unreliable
|
|
41
|
+
B: Only DO existed — everything else is new in v5.0
|
|
42
|
+
C: DO, RECORD, and partial ASSESS (via ripple analysis) — LEARN, ADAPT, and full ASSESS were manual
|
|
43
|
+
D: DO, RECORD, ASSESS, and LEARN — only ADAPT was missing
|
|
44
|
+
E: DO and ADAPT — recording and assessment were added in v5.0
|
|
45
|
+
correct: C
|
|
46
|
+
explanation: Before v5.0, Paradigm had DO (agents perform work), RECORD (lore entries, .purpose files, portal.yaml), and partial ASSESS (ripple analysis could identify impact, but there was no event stream or attention filtering). The LEARN phase (journal entries, nominations) and ADAPT phase (context composition from learnings) were entirely manual — a human had to read lore and brief the next agent.
|
|
47
|
+
- id: q4
|
|
48
|
+
question: A security agent contributes a context section listing recently added gates. Where is this contribution defined?
|
|
49
|
+
choices:
|
|
50
|
+
A: In the security agent's `.agent` file under the `context.contributions` field
|
|
51
|
+
B: In `.paradigm/config.yaml` under a `context_sections` key
|
|
52
|
+
C: In CLAUDE.md as a static section
|
|
53
|
+
D: In `portal.yaml` under each gate definition
|
|
54
|
+
E: In the security agent's learning journal
|
|
55
|
+
correct: A
|
|
56
|
+
explanation: Agent context contributions are defined in the `AgentContext.contributions` array on the agent's profile (the `.agent` file). Each contribution specifies a `section` name, inline `content` or a `content_ref` MCP resource URI, and a `priority` (high, medium, low). High-priority contributions are always included; low-priority ones are loaded on demand. This allows each agent to inject task-relevant context without hardcoding it into CLAUDE.md.
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
id: Q-para-601-maestro-team-collab
|
|
2
|
+
title: 'PARA 601: Paradigm Ambient — Maestro: Visible Team Orchestration'
|
|
3
|
+
description: 'Quiz for lesson: Maestro: Visible Team Orchestration'
|
|
4
|
+
author: paradigm
|
|
5
|
+
created: '2026-04-22'
|
|
6
|
+
updated: '2026-04-22'
|
|
7
|
+
tags:
|
|
8
|
+
- course
|
|
9
|
+
- para-601
|
|
10
|
+
symbols: []
|
|
11
|
+
difficulty: beginner
|
|
12
|
+
passThreshold: 0.7
|
|
13
|
+
category: paradigm-core
|
|
14
|
+
origin: imported
|
|
15
|
+
source: courses/para-601.json
|
|
16
|
+
questions:
|
|
17
|
+
- id: q1
|
|
18
|
+
question: What is the primary difference between Maestro and traditional multi-agent orchestration?
|
|
19
|
+
choices:
|
|
20
|
+
A: Maestro uses more agents per task
|
|
21
|
+
B: Maestro presents each agent's response as an attributed message rather than synthesizing a single summary
|
|
22
|
+
C: Maestro runs agents in parallel instead of sequentially
|
|
23
|
+
D: Maestro eliminates the need for human approval
|
|
24
|
+
E: Maestro uses persistent background agents
|
|
25
|
+
correct: B
|
|
26
|
+
explanation: Maestro's key innovation is visibility. Instead of synthesizing agent responses into a single voice, each agent speaks for itself with an attribution prefix like [architect]. This preserves individual perspectives and lets the human see disagreements, novel approaches, and the reasoning behind each contribution.
|
|
27
|
+
- id: q2
|
|
28
|
+
question: When a security agent is consistently dismissed by the user (>60% dismissal rate), what happens at postflight?
|
|
29
|
+
choices:
|
|
30
|
+
A: The agent is automatically deleted
|
|
31
|
+
B: The agent is moved to a different project
|
|
32
|
+
C: paradigm_ambient_learn raises the agent's attention threshold, making it nominate less
|
|
33
|
+
D: The agent's expertise scores are reset to zero
|
|
34
|
+
E: Nothing changes — threshold adjustment is manual
|
|
35
|
+
correct: C
|
|
36
|
+
explanation: The learning loop is automatic. When dismissal rate exceeds 60%, paradigm_ambient_learn raises the agent's attention threshold by 0.05, meaning it requires higher relevance scores before nominating. This self-tunes the agent to speak less when it is being noisy. Conversely, >80% acceptance lowers the threshold, encouraging the agent to contribute more.
|
|
37
|
+
- id: q3
|
|
38
|
+
question: What ambient context is injected into agent profiles before Maestro spawns them?
|
|
39
|
+
choices:
|
|
40
|
+
A: Only the agent's expertise scores
|
|
41
|
+
B: The full project codebase
|
|
42
|
+
C: Recent team decisions, transferable journal insights, and pending nominations
|
|
43
|
+
D: The complete git history
|
|
44
|
+
E: Nothing — agents start fresh each session
|
|
45
|
+
correct: C
|
|
46
|
+
explanation: buildProfileEnrichment() accepts an ambientContext parameter containing recent team decisions (from decision-loader), transferable journal insights (from journal-loader), and pending nominations (from nomination-engine). This gives each agent awareness of what the team has decided, what patterns have been discovered, and what issues are outstanding — creating genuine team intelligence rather than isolated tool execution.
|
|
47
|
+
- id: q4
|
|
48
|
+
question: What does the Neverland test health status 'calibrating' indicate?
|
|
49
|
+
choices:
|
|
50
|
+
A: No agents have been created yet
|
|
51
|
+
B: Agents have fewer than 10 total nominations
|
|
52
|
+
C: Average acceptance rate is between 50% and 70% — agents are learning but haven't reached target
|
|
53
|
+
D: All agents have been benched
|
|
54
|
+
E: The system has reached maturity and needs no further adjustment
|
|
55
|
+
correct: C
|
|
56
|
+
explanation: 'The Neverland health statuses are: cold-start (<10 nominations), accumulating (<50% accept rate), calibrating (50-70% accept rate), and mature (>70%). ''Calibrating'' means agents are past the initial noise phase and their thresholds are actively being tuned by the learning loop, but haven''t yet reached the 70% target. This typically occurs around sessions 4-7.'
|
|
57
|
+
- id: q5
|
|
58
|
+
question: How does benching an agent differ from deleting it?
|
|
59
|
+
choices:
|
|
60
|
+
A: There is no difference — bench removes the agent profile
|
|
61
|
+
B: Benching sets benched=true — the profile stays intact but Maestro and the nomination engine skip it
|
|
62
|
+
C: Benching only affects CLI commands, not MCP tools
|
|
63
|
+
D: Benching removes expertise but keeps the profile
|
|
64
|
+
E: Benching is permanent while deletion can be undone
|
|
65
|
+
correct: B
|
|
66
|
+
explanation: Benching is a reversible pause. The agent's .agent file remains with all expertise, journal entries, notebooks, and transferable patterns intact. Both paradigm_orchestrate_inline and processEvent check profile.benched and skip the agent if true. paradigm agent activate restores the agent immediately with all its accumulated learning preserved.
|
|
67
|
+
- id: q6
|
|
68
|
+
question: Why does the Teacher Model write journal entries instead of adjusting thresholds directly?
|
|
69
|
+
choices:
|
|
70
|
+
A: Journal entries are cheaper in token cost
|
|
71
|
+
B: Thresholds can only go up, not down
|
|
72
|
+
C: Journal entries carry semantic knowledge (what to look for, what was wrong) while thresholds are just a single number that says 'speak more' or 'speak less'
|
|
73
|
+
D: The threshold system is being removed in favor of journals
|
|
74
|
+
E: Journal entries are visible to the user while thresholds are hidden
|
|
75
|
+
correct: C
|
|
76
|
+
explanation: 'Thresholds control volume (speak more/less) but not quality. A threshold can''t teach the security agent to distinguish audit logging from vulnerabilities. A journal entry can: ''When auth files change with logging additions, recognize these as security-positive — don''t flag as violations.'' This knowledge promotes to notebooks and appears in future prompts via buildProfileEnrichment, making the agent genuinely smarter rather than just quieter.'
|
|
77
|
+
- id: q7
|
|
78
|
+
question: What is the documentor agent's relationship to the stop hook?
|
|
79
|
+
choices:
|
|
80
|
+
A: The documentor replaces the stop hook entirely
|
|
81
|
+
B: The documentor runs before the stop hook and should prevent it from ever triggering, but the hook remains as a safety net
|
|
82
|
+
C: The stop hook runs the documentor automatically
|
|
83
|
+
D: They are unrelated systems
|
|
84
|
+
E: The documentor disables the stop hook for the session
|
|
85
|
+
correct: B
|
|
86
|
+
explanation: The stop hook blocks when .purpose files are missing for modified source directories. The documentor's job is to update these files as the final orchestration stage, so the stop hook should never need to block. However, the stop hook remains as a safety net — if the documentor misses something or orchestration wasn't used, the hook catches it. Defense in depth.
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
id: Q-para-601-nominations-debates
|
|
2
|
+
title: 'PARA 601: Paradigm Ambient — Nominations & Debates'
|
|
3
|
+
description: 'Quiz for lesson: Nominations & Debates'
|
|
4
|
+
author: paradigm
|
|
5
|
+
created: '2026-04-22'
|
|
6
|
+
updated: '2026-04-22'
|
|
7
|
+
tags:
|
|
8
|
+
- course
|
|
9
|
+
- para-601
|
|
10
|
+
symbols: []
|
|
11
|
+
difficulty: beginner
|
|
12
|
+
passThreshold: 0.7
|
|
13
|
+
category: paradigm-core
|
|
14
|
+
origin: imported
|
|
15
|
+
source: courses/para-601.json
|
|
16
|
+
questions:
|
|
17
|
+
- id: q1
|
|
18
|
+
question: 'A security agent creates a nomination with `urgency: ''medium''` and `type: ''warning''` about a missing gate. The user has configured the security agent''s `min_urgency` to `''high''`. What happens?'
|
|
19
|
+
choices:
|
|
20
|
+
A: The nomination is deleted — it does not meet the urgency threshold
|
|
21
|
+
B: The nomination is recorded but not surfaced — it falls below the user's configured minimum urgency for that agent
|
|
22
|
+
C: The nomination is surfaced anyway because warnings always override urgency settings
|
|
23
|
+
D: The nomination is upgraded to `high` urgency automatically
|
|
24
|
+
E: The user's setting is ignored because security nominations are always shown
|
|
25
|
+
correct: B
|
|
26
|
+
explanation: 'The nomination is still recorded in `.paradigm/events/nominations.jsonl` (all nominations are persisted), but surfacing rules respect the user''s configuration. Since `min_urgency: ''high''` means only `high` and `critical` nominations from the security agent are shown, a `medium` nomination is suppressed. The nomination remains available if the user later queries all nominations or lowers the threshold.'
|
|
27
|
+
- id: q2
|
|
28
|
+
question: An architect nominates "Use a message queue for async processing" while a builder nominates "Use direct HTTP calls for simplicity" — both triggered by the same `route-created` event. How does Paradigm classify this?
|
|
29
|
+
choices:
|
|
30
|
+
A: Two independent nominations — no debate is detected because different agents created them
|
|
31
|
+
B: A complementary debate — both are responding to the same event
|
|
32
|
+
C: A conflicting debate — the nominations share a `triggered_by` event but propose different approaches
|
|
33
|
+
D: An error — only one agent should nominate per event
|
|
34
|
+
E: The nomination with the higher relevance score wins automatically
|
|
35
|
+
correct: C
|
|
36
|
+
explanation: Debate detection checks for overlapping `triggered_by` event IDs. Both nominations reference the same `route-created` event, so they are grouped. Since they propose different approaches (message queue vs HTTP calls), the debate is classified as `conflicting`. The debate is surfaced as a group so the human sees both perspectives together rather than individual nominations. Resolution requires a human choice or consensus.
|
|
37
|
+
- id: q3
|
|
38
|
+
question: 'A nomination has `evidence: [{ symbol: ''^payment-authorized'', file: ''portal.yaml'', lines: { start: 42, end: 45 }, description: ''All /api/payments routes require this gate'' }]`. Why is this better than a nomination without evidence?'
|
|
39
|
+
choices:
|
|
40
|
+
A: Evidence makes the nomination sort higher in the UI
|
|
41
|
+
B: Nominations without evidence are automatically dismissed
|
|
42
|
+
C: Evidence transforms the nomination from opinion to argument — the human can verify the claim against specific code locations without investigating from scratch
|
|
43
|
+
D: Evidence is required for all nomination types
|
|
44
|
+
E: Evidence triggers automatic remediation
|
|
45
|
+
correct: C
|
|
46
|
+
explanation: Evidence gives the nomination credibility and actionability. Without evidence, "This route needs a gate" requires the human to verify the claim manually. With evidence pointing to portal.yaml line 42 and the specific gate symbol, the human can quickly confirm the pattern and act. Evidence is optional (the `evidence` field is nullable), but nominations with evidence are more likely to be accepted rather than dismissed.
|
|
47
|
+
- id: q4
|
|
48
|
+
question: 'A tester creates a nomination with `type: ''offer''` and `action_offered: ''Write integration tests for the new payment flow''`. The human responds via `paradigm_ambient_engage` with `response: ''accepted''`. What happens next?'
|
|
49
|
+
choices:
|
|
50
|
+
A: Paradigm automatically generates the test files
|
|
51
|
+
B: The nomination's `detail` and `evidence` are returned so the tester agent can proceed with the offered action
|
|
52
|
+
C: The nomination is moved to a task queue for later execution
|
|
53
|
+
D: The tester agent is immediately spawned in a new session
|
|
54
|
+
E: Nothing — acceptance is recorded but has no effect
|
|
55
|
+
correct: B
|
|
56
|
+
explanation: When a nomination is accepted via `paradigm_ambient_engage`, the full nomination including `detail` and `evidence` is returned. For `offer` type nominations, this signals that the agent should proceed with the offered action. The actual execution depends on the orchestration context — in a multi-agent session, the tester may act immediately; in a single-agent session, the offer acceptance is recorded for the next session. Paradigm does not auto-generate code; it surfaces the offer for the agent to execute.
|
|
57
|
+
- id: q5
|
|
58
|
+
question: Over 30 sessions, a reviewer agent's nominations are dismissed 80% of the time. What does this pattern suggest?
|
|
59
|
+
choices:
|
|
60
|
+
A: The reviewer agent should be removed from the project
|
|
61
|
+
B: The reviewer's attention threshold may be too low — it is nominating on weak matches, and raising the threshold would reduce false positives
|
|
62
|
+
C: The human is ignoring valid feedback and should be trained
|
|
63
|
+
D: Dismissed nominations indicate the system is working correctly — not all observations are actionable
|
|
64
|
+
E: The nomination storage file is corrupted
|
|
65
|
+
correct: B
|
|
66
|
+
explanation: An 80% dismissal rate is a strong signal that the agent is speaking up too often on weak matches. The most likely fix is raising the agent's attention threshold (e.g., from 0.6 to 0.75) so it only nominates on stronger matches. The `paradigm_ambient_engage` feedback loop exists precisely for this calibration — over time, the pattern of accepted vs dismissed nominations reveals whether an agent's threshold is well-tuned.
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
id: Q-para-701-agent-notebooks
|
|
2
|
+
title: 'PARA 701: Agent Mastery — Lesson 3: Agent Notebooks'
|
|
3
|
+
description: 'Quiz for lesson: Lesson 3: Agent Notebooks'
|
|
4
|
+
author: paradigm
|
|
5
|
+
created: '2026-04-22'
|
|
6
|
+
updated: '2026-04-22'
|
|
7
|
+
tags:
|
|
8
|
+
- course
|
|
9
|
+
- para-701
|
|
10
|
+
symbols: []
|
|
11
|
+
difficulty: beginner
|
|
12
|
+
passThreshold: 0.7
|
|
13
|
+
category: paradigm-core
|
|
14
|
+
origin: imported
|
|
15
|
+
source: courses/para-701.json
|
|
16
|
+
questions:
|
|
17
|
+
- id: q1
|
|
18
|
+
question: A security agent has 30 notebook entries. During orchestration, how many are injected into its prompt?
|
|
19
|
+
choices:
|
|
20
|
+
A: All 30 — notebooks are always fully injected
|
|
21
|
+
B: The top 5 by concept match against the task's relevant symbols, sorted by appliedCount
|
|
22
|
+
C: The top 10, since 10 high-signal entries is the recommended maximum
|
|
23
|
+
D: Only entries with appliedCount > 0
|
|
24
|
+
E: A random selection of 5 entries
|
|
25
|
+
correct: B
|
|
26
|
+
explanation: 'buildProfileEnrichment() includes `notebookEntries.slice(0, 5)` — a hard limit of 5 entries. These are pre-filtered by concept match against the task''s relevant symbols and sorted by appliedCount descending (most-used first). The 5-entry budget is a deliberate token constraint: each entry consumes 100-300 tokens, so 5 entries use 500-1,500 tokens, which balances value against context budget.'
|
|
27
|
+
- id: q2
|
|
28
|
+
question: The security agent has a global notebook entry `nb-jwt-validation-001` and the current project has a project notebook entry with the same ID. Which one is used?
|
|
29
|
+
choices:
|
|
30
|
+
A: The global entry — global always takes precedence
|
|
31
|
+
B: Both entries are merged into one
|
|
32
|
+
C: The project entry wins — project overrides global on ID collision
|
|
33
|
+
D: An error is thrown for duplicate IDs
|
|
34
|
+
E: The entry with the higher appliedCount is used
|
|
35
|
+
correct: C
|
|
36
|
+
explanation: 'The notebook loader reads global entries first, then project entries. Entries are stored in a Map keyed by ID. When a project entry has the same ID as a global entry, the project entry overwrites the global one: `entries.set(entry.id, entry)`. This allows a project to customize an agent''s generic knowledge for project-specific needs — for example, overriding a generic JWT pattern with the project''s specific token rotation strategy.'
|
|
37
|
+
- id: q3
|
|
38
|
+
question: 'What is the difference between a notebook entry with `provenance.source: ''lore''` and one with `provenance.source: ''manual''`?'
|
|
39
|
+
choices:
|
|
40
|
+
A: Lore entries are read-only; manual entries are editable
|
|
41
|
+
B: Lore entries were promoted from session experience via promoteFromLore() and link to the original lore entry; manual entries were written directly without a session origin
|
|
42
|
+
C: Manual entries have higher confidence scores by default
|
|
43
|
+
D: Lore entries are global; manual entries are project-scoped
|
|
44
|
+
E: There is no functional difference — provenance is informational only
|
|
45
|
+
correct: B
|
|
46
|
+
explanation: 'Provenance tracks the origin of a notebook entry. `source: ''lore''` means the entry was created by `promoteFromLore()` — it links to the original lore entry via `loreEntryId` and was extracted from real session experience. `source: ''manual''` means someone wrote it directly (bootstrapping or direct curation). Lore-promoted entries have a verifiable chain of evidence (the session where the pattern was discovered), while manual entries rely on the author''s judgment. Both are functionally equivalent in how they''re used during enrichment.'
|
|
47
|
+
- id: q4
|
|
48
|
+
question: Why is `appliedCount` the primary sorting key for notebook entries rather than `confidence`?
|
|
49
|
+
choices:
|
|
50
|
+
A: appliedCount is easier to compute than confidence
|
|
51
|
+
B: Confidence is deprecated in favor of appliedCount
|
|
52
|
+
C: appliedCount is an empirical signal — an entry applied 15 times is proven useful in practice, while confidence is an initial estimate that may not reflect actual utility
|
|
53
|
+
D: appliedCount determines the entry's storage priority on disk
|
|
54
|
+
E: Confidence only applies to expertise scores, not notebook entries
|
|
55
|
+
correct: C
|
|
56
|
+
explanation: appliedCount tracks how many times an entry was actually used in orchestration. An entry with appliedCount of 15 has been surfaced and found useful in 15 sessions — this is empirical evidence of value. Confidence is an initial estimate (0.0-1.0) that may be set optimistically when the entry is created. A newly bootstrapped entry might have confidence 0.8 but appliedCount 0, meaning it looks good on paper but has never been validated. The system trusts what has been proven (appliedCount) over what has been estimated (confidence).
|
|
57
|
+
- id: q5
|
|
58
|
+
question: An agent's notebook entry snippet is 800 characters long. How does buildProfileEnrichment() handle this?
|
|
59
|
+
choices:
|
|
60
|
+
A: It injects the full 800-character snippet
|
|
61
|
+
B: It skips the entry entirely
|
|
62
|
+
C: It truncates the snippet to 300 characters and appends '...'
|
|
63
|
+
D: It splits the snippet across multiple prompt sections
|
|
64
|
+
E: It compresses the snippet using summarization
|
|
65
|
+
correct: C
|
|
66
|
+
explanation: 'buildProfileEnrichment() truncates long snippets: `nb.snippet.length > 300 ? nb.snippet.slice(0, 300) + ''...'' : nb.snippet`. The 300-character limit prevents a single large entry from consuming the entire notebook token budget. If the agent needs the full snippet, it can use `paradigm_notebook_search` to retrieve the complete entry on demand. This is the context engineering principle at work — inject enough to be useful, provide a retrieval path for more detail.'
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
id: Q-para-701-agent-pods-nevrland
|
|
2
|
+
title: 'PARA 701: Agent Mastery — Lesson 10: Agent Pods & nevr.land'
|
|
3
|
+
description: 'Quiz for lesson: Lesson 10: Agent Pods & nevr.land'
|
|
4
|
+
author: paradigm
|
|
5
|
+
created: '2026-04-22'
|
|
6
|
+
updated: '2026-04-22'
|
|
7
|
+
tags:
|
|
8
|
+
- course
|
|
9
|
+
- para-701
|
|
10
|
+
symbols: []
|
|
11
|
+
difficulty: beginner
|
|
12
|
+
passThreshold: 0.7
|
|
13
|
+
category: paradigm-core
|
|
14
|
+
origin: imported
|
|
15
|
+
source: courses/para-701.json
|
|
16
|
+
questions:
|
|
17
|
+
- id: q1
|
|
18
|
+
question: A developer activates the Ship Pod and then the Design Pod. How many agents are in the roster?
|
|
19
|
+
choices:
|
|
20
|
+
A: 6 — the Design Pod replaces the Ship Pod
|
|
21
|
+
B: 11 — Ship Pod (6) + Design Pod (5), no overlap
|
|
22
|
+
C: The union of both pods — pods are additive, agents from both are in the roster with no duplicates
|
|
23
|
+
D: Only the Design Pod agents — the last activated pod wins
|
|
24
|
+
E: All 54 agents — activating any pod enables all agents
|
|
25
|
+
correct: C
|
|
26
|
+
explanation: 'Pods are additive. Activating a pod adds its agents to the existing roster. The Ship Pod adds architect, builder, reviewer, tester, security, and documentor. The Design Pod adds designer, copywriter, a11y, creative, and presenter. The roster now contains the union of both: 11 unique agents (no overlap between these two pods). If pods had overlapping agents (e.g., both include reviewer), the agent would appear once in the roster.'
|
|
27
|
+
- id: q2
|
|
28
|
+
question: What is the difference between a pod and a roster?
|
|
29
|
+
choices:
|
|
30
|
+
A: They are the same thing with different names
|
|
31
|
+
B: A pod is a named preset of agents (a template); a roster is the actual list of active agents on a project. Activating a pod modifies the roster.
|
|
32
|
+
C: A pod modifies agent behavior; a roster just lists agent names
|
|
33
|
+
D: A roster can contain pods but not individual agents
|
|
34
|
+
E: Pods are for production; rosters are for development
|
|
35
|
+
correct: B
|
|
36
|
+
explanation: A pod is a template — a named group of agents optimized for a workflow (like "Ship Pod" = architect + builder + reviewer + tester + security + documentor). A roster is the actual configuration file (`.paradigm/roster.yaml`) that lists which agents are active on a project. The command `paradigm agents activate --pod ship-pod` reads the pod template and adds its agents to the roster. The pod is the input; the roster is the output.
|
|
37
|
+
- id: q3
|
|
38
|
+
question: A community-published agent from nevr.land is installed. How does it participate in the local Paradigm system?
|
|
39
|
+
choices:
|
|
40
|
+
A: It runs in a sandboxed mode with limited capabilities
|
|
41
|
+
B: 'It is installed to ~/.paradigm/agents/ and participates identically to built-in agents: same orchestration, attention scoring, learning loop, expertise tracking, and notebook system'
|
|
42
|
+
C: It can only be used manually, not through orchestration
|
|
43
|
+
D: It requires an API key from the publisher to function
|
|
44
|
+
E: It is read-only and cannot accumulate expertise or notebook entries
|
|
45
|
+
correct: B
|
|
46
|
+
explanation: 'An installed agent follows the standard `.agent` schema and is placed in `~/.paradigm/agents/`. The system treats it identically to a built-in agent: it is included in orchestration planning (if in the roster), scores events against its attention patterns, self-nominates contributions, accumulates expertise through verdicts, and builds notebook entries through the learning loop. Trust level affects installation warnings, not runtime capabilities.'
|
|
47
|
+
- id: q4
|
|
48
|
+
question: An agent package on nevr.land includes a `notebooks/` directory with 5 YAML files. Where are these installed?
|
|
49
|
+
choices:
|
|
50
|
+
A: In the project's .paradigm/notebooks/{agent-id}/
|
|
51
|
+
B: In ~/.paradigm/notebooks/{agent-id}/ as global entries that travel across all projects
|
|
52
|
+
C: They are not installed — notebooks must be created manually
|
|
53
|
+
D: In the agent's .agent file as inline snippets
|
|
54
|
+
E: In ~/.paradigm/agents/{agent-id}/notebooks/
|
|
55
|
+
correct: B
|
|
56
|
+
explanation: 'Notebook entries from an agent package are installed into `~/.paradigm/notebooks/{agent-id}/` as global entries. This means they are available on every project the agent joins, providing bootstrapping knowledge from day one. This matches the storage pattern: global notebooks at ~/.paradigm/notebooks/ travel with the agent, while project notebooks at .paradigm/notebooks/ are project-specific. Bootstrapping entries should be global because they represent the agent''s foundational knowledge, not project-specific patterns.'
|
|
57
|
+
- id: q5
|
|
58
|
+
question: Why does the agent package format use YAML files instead of a compiled binary format?
|
|
59
|
+
choices:
|
|
60
|
+
A: YAML is faster to parse than binary formats
|
|
61
|
+
B: Binary formats are not supported on all platforms
|
|
62
|
+
C: YAML is human-readable, enabling inspection, forking, and modification before installation — agents are knowledge, not code, and should be as shareable and composable as possible
|
|
63
|
+
D: YAML is required by the Paradigm schema validator
|
|
64
|
+
E: Binary formats would require code signing
|
|
65
|
+
correct: C
|
|
66
|
+
explanation: The design principle is that agents are knowledge, not compiled code. A human-readable YAML format means you can inspect an agent's personality, expertise, behaviors, and attention patterns before installing it. You can fork a community agent, modify its attention threshold, add a behavior, and republish. You can copy a single transferable pattern from one agent to another. A binary format would make all of this opaque. The package format (agent.yaml + notebooks/ + metadata.yaml) is intentionally the simplest possible structure that captures the complete agent identity.
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
id: Q-para-701-agent-profiles
|
|
2
|
+
title: 'PARA 701: Agent Mastery — Lesson 2: Agent Profiles Deep Dive'
|
|
3
|
+
description: 'Quiz for lesson: Lesson 2: Agent Profiles Deep Dive'
|
|
4
|
+
author: paradigm
|
|
5
|
+
created: '2026-04-22'
|
|
6
|
+
updated: '2026-04-22'
|
|
7
|
+
tags:
|
|
8
|
+
- course
|
|
9
|
+
- para-701
|
|
10
|
+
symbols: []
|
|
11
|
+
difficulty: beginner
|
|
12
|
+
passThreshold: 0.7
|
|
13
|
+
category: paradigm-core
|
|
14
|
+
origin: imported
|
|
15
|
+
source: courses/para-701.json
|
|
16
|
+
questions:
|
|
17
|
+
- id: q1
|
|
18
|
+
question: 'A security agent has `confidence: 0.85` on `#auth-security`. Over the next 5 sessions, its security review contributions are accepted 4 times and dismissed 1 time. What is the approximate new confidence score?'
|
|
19
|
+
choices:
|
|
20
|
+
A: 0.85 — confidence does not change automatically
|
|
21
|
+
B: ~0.95 — 4 accepts at +0.03 each, 1 dismiss at -0.02 = +0.10 net
|
|
22
|
+
C: 1.0 — confidence maxes out after enough acceptances
|
|
23
|
+
D: ~0.80 — the single dismissal outweighs the acceptances
|
|
24
|
+
E: Depends on the reviewer, not the verdicts
|
|
25
|
+
correct: B
|
|
26
|
+
explanation: 'Expertise confidence adjusts per verdict: `+0.03` for accepted, `-0.02` for dismissed, `-0.01` for revised. Four acceptances: `4 * 0.03 = +0.12`. One dismissal: `1 * -0.02 = -0.02`. Net delta: `+0.10`. Starting from 0.85, the new confidence is approximately 0.95. In practice, confidence is clamped to `[0.0, 1.0]`, so it would be `min(1.0, 0.95) = 0.95`.'
|
|
27
|
+
- id: q2
|
|
28
|
+
question: What is the difference between `collaboration.stance` and `collaboration.with.{agent}.stance`?
|
|
29
|
+
choices:
|
|
30
|
+
A: They are the same field with different syntax
|
|
31
|
+
B: '`collaboration.stance` is the default stance toward all agents; `collaboration.with.{agent}.stance` overrides it for a specific agent'
|
|
32
|
+
C: '`collaboration.with` is deprecated in favor of `collaboration.stance`'
|
|
33
|
+
D: '`collaboration.stance` applies to humans; `collaboration.with` applies to agents'
|
|
34
|
+
E: '`collaboration.stance` only applies during orchestration; `collaboration.with` applies in Symphony'
|
|
35
|
+
correct: B
|
|
36
|
+
explanation: 'The top-level `collaboration.stance` (e.g., `advisory`) is the default relationship the agent has with all other agents. The `collaboration.with.{agent}.stance` (e.g., `with.architect.stance: peer`) overrides it for a specific agent. This allows fine-grained relationships: the security agent is `advisory` by default but treats the architect as a `peer` with `can_contradict: true`.'
|
|
37
|
+
- id: q3
|
|
38
|
+
question: 'An agent has a transferable pattern with `successRate: 0.5`. How does this affect its inclusion in orchestration prompts?'
|
|
39
|
+
choices:
|
|
40
|
+
A: It is included with a warning label
|
|
41
|
+
B: It is always included — all patterns are injected regardless of success rate
|
|
42
|
+
C: It is excluded — buildProfileEnrichment() only includes patterns with successRate >= 0.7
|
|
43
|
+
D: It is included but with reduced priority
|
|
44
|
+
E: It triggers a notification to the human to update the pattern
|
|
45
|
+
correct: C
|
|
46
|
+
explanation: 'The `buildProfileEnrichment()` function filters transferable patterns: `(profile.transferable || []).filter(p => p.successRate >= 0.7)`. Patterns with a success rate below 0.7 are excluded from prompt enrichment. A 0.5 success rate means the pattern works only half the time — injecting it into every orchestration prompt would waste context tokens on unreliable guidance. The agent needs to improve the pattern (or it will naturally improve as the system tracks successes) before it gets promoted to prompt enrichment.'
|
|
47
|
+
- id: q4
|
|
48
|
+
question: Why does the security agent's description explicitly state "He flags issues but does NOT implement fixes — that's the Builder's job"?
|
|
49
|
+
choices:
|
|
50
|
+
A: It is a style preference with no functional impact
|
|
51
|
+
B: The description is injected into the agent's prompt during orchestration, so explicit boundaries prevent the security agent from writing implementation code when it should only be reviewing
|
|
52
|
+
C: It is documentation for the human developer only
|
|
53
|
+
D: It prevents the security agent from being activated on implementation tasks
|
|
54
|
+
E: It triggers the orchestrator to always pair security with builder
|
|
55
|
+
correct: B
|
|
56
|
+
explanation: The agent's `description` field is injected into the orchestration prompt. When the LLM receives the security agent's prompt, it reads this boundary statement and constrains its behavior accordingly. Without explicit boundaries in the description, the LLM might generate implementation code when it should only flag issues for the builder. Clear description boundaries are how you enforce separation of concerns between agents that share the same underlying LLM.
|
|
57
|
+
- id: q5
|
|
58
|
+
question: What happens when the orchestrator calls buildProfileEnrichment() for an agent?
|
|
59
|
+
choices:
|
|
60
|
+
A: It writes the agent's profile to disk in a new format
|
|
61
|
+
B: It compiles the agent's .agent file into a binary prompt template
|
|
62
|
+
C: It assembles the agent's personality, relevant expertise, transferable patterns, notebook entries, and agent state into a markdown prompt section that makes the base LLM behave as that specific agent
|
|
63
|
+
D: It validates the agent's profile for schema errors
|
|
64
|
+
E: It sends the profile to the Conductor UI for display
|
|
65
|
+
correct: C
|
|
66
|
+
explanation: buildProfileEnrichment() is the function that transforms a static .agent file into a dynamic prompt enrichment section. It takes the agent's profile, the relevant symbols for the current task, notebook entries matched by concept, ambient context (decisions, journal insights, nominations), and agent state (last session, pending work). It assembles all of this into markdown that includes sections like '## Agent Identity', '## Your Expertise on Relevant Symbols', '## Transferable Patterns', '## Relevant Notebook Entries', and '## Your Recent Work on This Project'. This prompt enrichment is what makes the same base LLM behave differently as each agent.
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
id: Q-para-701-agent-roster
|
|
2
|
+
title: 'PARA 701: Agent Mastery — Lesson 1: The Agent Roster'
|
|
3
|
+
description: 'Quiz for lesson: Lesson 1: The Agent Roster'
|
|
4
|
+
author: paradigm
|
|
5
|
+
created: '2026-04-22'
|
|
6
|
+
updated: '2026-04-22'
|
|
7
|
+
tags:
|
|
8
|
+
- course
|
|
9
|
+
- para-701
|
|
10
|
+
symbols: []
|
|
11
|
+
difficulty: beginner
|
|
12
|
+
passThreshold: 0.7
|
|
13
|
+
category: paradigm-core
|
|
14
|
+
origin: imported
|
|
15
|
+
source: courses/para-701.json
|
|
16
|
+
questions:
|
|
17
|
+
- id: q1
|
|
18
|
+
question: A new SaaS web app project needs to build a payment flow with Stripe integration. The task touches `^authenticated`, `$checkout-flow`, and `#payment-service`. Which agents would the orchestrator most likely include, and why?
|
|
19
|
+
choices:
|
|
20
|
+
A: Only the builder — it is a coding task
|
|
21
|
+
B: The architect (plans the flow), builder (implements), security (gates on ^authenticated), reviewer (quality check), and documentor (.purpose updates) — because their attention patterns match the symbols involved
|
|
22
|
+
C: All 54 agents — payments are critical and need full coverage
|
|
23
|
+
D: The designer and copywriter — it is a user-facing payment page
|
|
24
|
+
E: The devops agent — Stripe is infrastructure
|
|
25
|
+
correct: B
|
|
26
|
+
explanation: The orchestrator matches task symbols against agent attention patterns. The architect's `$*` pattern matches `$checkout-flow`. Security's `^*` pattern matches `^authenticated`. The builder's path patterns match source files. The reviewer watches all symbol types. The documentor has the lowest threshold (0.3) and matches symbol patterns. The designer and copywriter might be included if UI work is required, but the core selection is driven by symbol matching against attention patterns.
|
|
27
|
+
- id: q2
|
|
28
|
+
question: Why does the security agent have an attention threshold of 0.45 while the builder has 0.75?
|
|
29
|
+
choices:
|
|
30
|
+
A: The security agent is more important than the builder
|
|
31
|
+
B: The builder writes more code and needs to be more selective about when it speaks up
|
|
32
|
+
C: The cost of missing a security issue (low threshold = more alerts) far outweighs the cost of a false alarm, while the builder should only engage when directly relevant to implementation
|
|
33
|
+
D: Lower thresholds mean the agent runs faster
|
|
34
|
+
E: The thresholds are arbitrary defaults with no design rationale
|
|
35
|
+
correct: C
|
|
36
|
+
explanation: Threshold values encode the asymmetry of costs. A security agent that stays quiet when a gate is missing creates a vulnerability — the cost of a false negative is high. So it uses a low threshold (0.45) to speak up early and often. The builder speaking up on tasks not directly relevant to implementation just adds noise. The cost of a builder false positive is wasted context tokens. So it uses a higher threshold (0.75) to stay focused.
|
|
37
|
+
- id: q3
|
|
38
|
+
question: What is the purpose of the Meta tier agents (forge, trainer, documentor, mediator)?
|
|
39
|
+
choices:
|
|
40
|
+
A: They manage the codebase directly, writing and reviewing source files
|
|
41
|
+
B: They manage other agents — designing new agents, training existing ones, maintaining Paradigm files, and resolving agent disagreements
|
|
42
|
+
C: They are administrative agents that handle user authentication
|
|
43
|
+
D: They provide backup for the builder when it is unavailable
|
|
44
|
+
E: They are deprecated and no longer used in the roster
|
|
45
|
+
correct: B
|
|
46
|
+
explanation: Meta agents operate on the agent system itself rather than on the codebase directly. Loid (forge) designs and builds new agents by understanding the full .agent profile schema. Sensei (trainer) reviews agent performance and curates notebook entries to improve them. The documentor maintains .purpose and portal.yaml files after other agents finish. Bridge (mediator) resolves disagreements between agents. They are the agents that make other agents better.
|
|
47
|
+
- id: q4
|
|
48
|
+
question: A developer is working on a game project built with Godot. Which of these agents would likely NOT be in the project roster?
|
|
49
|
+
choices:
|
|
50
|
+
A: The gamedev agent (Pixel)
|
|
51
|
+
B: The 3d agent (Neon)
|
|
52
|
+
C: The audio agent (Echo)
|
|
53
|
+
D: The SEO agent (Beacon)
|
|
54
|
+
E: The builder agent
|
|
55
|
+
correct: D
|
|
56
|
+
explanation: The SEO agent (Beacon) specializes in search engine optimization — concepts like meta tags, crawlability, structured data, and organic traffic. A Godot game project has no web pages to optimize for search engines. The game project type suggests a roster including architect, builder, reviewer, tester, documentor, gamedev (Pixel), 3d (Neon), audio (Echo), designer, performance, and debugger. SEO is irrelevant to this domain.
|
|
57
|
+
- id: q5
|
|
58
|
+
question: What distinguishes Jinx (advocate) from the reviewer in the Reviewers tier?
|
|
59
|
+
choices:
|
|
60
|
+
A: Jinx writes code and the reviewer does not
|
|
61
|
+
B: Jinx is a devil's advocate who stress-tests assumptions and challenges approaches, while the reviewer checks code correctness, quality, and Paradigm compliance
|
|
62
|
+
C: Jinx is the reviewer's backup — they do the same work
|
|
63
|
+
D: Jinx only works on security-related code
|
|
64
|
+
E: Jinx has a higher attention threshold than the reviewer
|
|
65
|
+
correct: B
|
|
66
|
+
explanation: 'Jinx and the reviewer serve fundamentally different purposes. The reviewer checks code quality: correctness, naming conventions, .purpose coverage, aspect anchors, test coverage. Jinx''s job is to argue against the current approach entirely — she stress-tests assumptions, finds edge cases nobody considered, and asks uncomfortable questions. Her personality is confrontational and aggressive, while the reviewer is deliberate and conservative. Jinx attacks the design; the reviewer evaluates the implementation.'
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
id: Q-para-701-agent-state
|
|
2
|
+
title: 'PARA 701: Agent Mastery — Lesson 4: Agent State & Continuity'
|
|
3
|
+
description: 'Quiz for lesson: Lesson 4: Agent State & Continuity'
|
|
4
|
+
author: paradigm
|
|
5
|
+
created: '2026-04-22'
|
|
6
|
+
updated: '2026-04-22'
|
|
7
|
+
tags:
|
|
8
|
+
- course
|
|
9
|
+
- para-701
|
|
10
|
+
symbols: []
|
|
11
|
+
difficulty: beginner
|
|
12
|
+
passThreshold: 0.7
|
|
13
|
+
category: paradigm-core
|
|
14
|
+
origin: imported
|
|
15
|
+
source: courses/para-701.json
|
|
16
|
+
questions:
|
|
17
|
+
- id: q1
|
|
18
|
+
question: The security agent reviewed 5 files in session A, deferred 2 items, then in session B completed 1 of those items and deferred 1 new item. How many pending items does the agent see at the start of session C?
|
|
19
|
+
choices:
|
|
20
|
+
A: 0 — pending work resets each session
|
|
21
|
+
B: 1 — only the item deferred in session B
|
|
22
|
+
C: 2 — the 1 remaining from session A plus the 1 new from session B
|
|
23
|
+
D: 3 — all items ever deferred
|
|
24
|
+
E: It depends on the project roster configuration
|
|
25
|
+
correct: C
|
|
26
|
+
explanation: 'Pending work accumulates across sessions and persists until explicitly completed via `completePendingWork()`. Session A deferred 2 items. Session B completed 1 (leaving 1 from A) and added 1 new item. At the start of session C, the agent sees 2 pending items: the 1 remaining from session A and the 1 added in session B. This is the key value of pending work tracking — nothing falls through the cracks across session boundaries.'
|
|
27
|
+
- id: q2
|
|
28
|
+
question: What is the difference between `recentPatterns` in project state and `transferable` patterns in the .agent file?
|
|
29
|
+
choices:
|
|
30
|
+
A: They are the same thing stored in different locations
|
|
31
|
+
B: recentPatterns are project-specific and do not travel; transferable patterns apply across all projects and are included in prompt enrichment when successRate >= 0.7
|
|
32
|
+
C: recentPatterns are more important and always override transferable patterns
|
|
33
|
+
D: transferable patterns are deprecated in favor of recentPatterns
|
|
34
|
+
E: recentPatterns are automatically promoted to transferable after 10 sessions
|
|
35
|
+
correct: B
|
|
36
|
+
explanation: 'recentPatterns live in the project state file and capture knowledge specific to one project (e.g., ''This project uses sliding-window JWT rotation''). They are injected as ''**Project patterns you''ve learned:**'' in the prompt, but only for that project. Transferable patterns live in the .agent file and apply everywhere (e.g., ''always check RLS policies''). They travel across projects and are included in prompt enrichment when successRate >= 0.7. The distinction is scope: project vs universal.'
|
|
37
|
+
- id: q3
|
|
38
|
+
question: 'An agent''s global state shows `totalSessions: 47` with 30 sessions on project A and 2 on project B. The agent is now starting work on project B. How does the orchestrator use this information?'
|
|
39
|
+
choices:
|
|
40
|
+
A: It rejects the agent — 2 sessions is insufficient expertise
|
|
41
|
+
B: It ignores global state — only project state matters
|
|
42
|
+
C: The low session count on project B (relative to the agent's 47 total sessions) may trigger a fresh onboarding pass, and the project state's lastSession age indicates how much context refresh is needed
|
|
43
|
+
D: It assigns the agent to project A instead, where it has more experience
|
|
44
|
+
E: Global state is only used for dashboard display, not orchestration decisions
|
|
45
|
+
correct: C
|
|
46
|
+
explanation: Global state provides experience context. An agent with 47 total sessions is experienced overall, but only 2 sessions on project B means limited project-specific knowledge. The orchestrator can use this to trigger the agent's `onboarding` procedure (defined in the collaboration block), ensuring the agent re-reads the project's .purpose files, config, and portal.yaml before making recommendations. The project state's `lastSession.date` shows how stale the context is — if the 2 sessions were 3 months ago, a full onboarding is warranted.
|
|
47
|
+
- id: q4
|
|
48
|
+
question: Where does project state live and why is it committed to the repository?
|
|
49
|
+
choices:
|
|
50
|
+
A: ~/.paradigm/agent-state/ — it is user-scoped, not committed
|
|
51
|
+
B: .paradigm/agent-state/{id}.yaml — it is committed so that when another team member works on the project, agents remember what was done, not just what one person's agents did
|
|
52
|
+
C: In memory only — state is ephemeral and reconstructed from lore
|
|
53
|
+
D: .paradigm/config.yaml — state is a config value
|
|
54
|
+
E: node_modules/.paradigm/ — it is a build artifact
|
|
55
|
+
correct: B
|
|
56
|
+
explanation: 'Project state at `.paradigm/agent-state/{id}.yaml` is committed to the repository. This is important for team continuity: if developer A''s security agent reviews files and defers items, developer B''s security agent should see those deferred items in the next session. The state is project-scoped (different from global state at ~/.paradigm/agents/ which is user-scoped), so it captures the collective agent experience on the project, not just one user''s sessions.'
|
|
57
|
+
- id: q5
|
|
58
|
+
question: The recentPatterns array in project state has a maximum of 10 entries. An agent learns an 11th pattern. What happens?
|
|
59
|
+
choices:
|
|
60
|
+
A: The 11th pattern is rejected — the agent must manually remove an old one
|
|
61
|
+
B: All patterns are cleared and replaced with the 11th
|
|
62
|
+
C: The oldest pattern is dropped and the new one is added — `recentPatterns.slice(-10)` keeps only the most recent 10
|
|
63
|
+
D: The array expands to 11 — the limit is advisory
|
|
64
|
+
E: The pattern is added to the agent's transferable array instead
|
|
65
|
+
correct: C
|
|
66
|
+
explanation: 'The `addProjectPattern()` function enforces a hard limit of 10 recent patterns: `if (state.recentPatterns.length > 10) { state.recentPatterns = state.recentPatterns.slice(-10); }`. The oldest pattern is dropped and the newest is kept. This bounded window ensures the prompt enrichment section stays within token budgets while always showing the most recent project-specific knowledge. Patterns that prove universally useful should be promoted to the agent''s `transferable` array in the .agent file, which has no such limit.'
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
id: Q-para-701-learning-feedback-loop
|
|
2
|
+
title: 'PARA 701: Agent Mastery — Lesson 9: The Learning Feedback Loop'
|
|
3
|
+
description: 'Quiz for lesson: Lesson 9: The Learning Feedback Loop'
|
|
4
|
+
author: paradigm
|
|
5
|
+
created: '2026-04-22'
|
|
6
|
+
updated: '2026-04-22'
|
|
7
|
+
tags:
|
|
8
|
+
- course
|
|
9
|
+
- para-701
|
|
10
|
+
symbols: []
|
|
11
|
+
difficulty: beginner
|
|
12
|
+
passThreshold: 0.7
|
|
13
|
+
category: paradigm-core
|
|
14
|
+
origin: imported
|
|
15
|
+
source: courses/para-701.json
|
|
16
|
+
questions:
|
|
17
|
+
- id: q1
|
|
18
|
+
question: The security agent's contribution is revised by the human (partially correct). What happens to its expertise confidence on the relevant symbols?
|
|
19
|
+
choices:
|
|
20
|
+
A: No change — revised contributions have no effect
|
|
21
|
+
B: Confidence decreases by 0.02 (same as dismissed)
|
|
22
|
+
C: Confidence decreases by 0.01 — the revised verdict means the agent was partially right, so the penalty is smaller than dismissed (-0.02)
|
|
23
|
+
D: Confidence increases by 0.01 (partially correct is still partially positive)
|
|
24
|
+
E: Confidence is reset to 0.5 (neutral)
|
|
25
|
+
correct: C
|
|
26
|
+
explanation: Revised verdicts trigger a -0.01 adjustment. This is smaller than dismissed (-0.02) because the agent was in the right direction — the human modified the contribution rather than rejecting it entirely. The asymmetric reinforcement scheme (+0.03 accept / -0.02 dismiss / -0.01 revise) is designed so that a mix of mostly-accepted with occasional revisions still trends upward, while consistent dismissals trend downward.
|
|
27
|
+
- id: q2
|
|
28
|
+
question: The Teacher Model runs at postflight and reads the session work log. What is it looking for?
|
|
29
|
+
choices:
|
|
30
|
+
A: Code syntax errors in agent contributions
|
|
31
|
+
B: Patterns in user verdicts — which agents were accepted, dismissed, or revised, and what insights can be extracted for journal entries
|
|
32
|
+
C: Missing .purpose file updates
|
|
33
|
+
D: Whether the orchestrator was called
|
|
34
|
+
E: Token usage metrics for cost optimization
|
|
35
|
+
correct: B
|
|
36
|
+
explanation: 'The Teacher Model''s postflight learning pass reads agent-contribution and user-verdict entries from the session work log. It looks for patterns: which agents were consistently accepted (confirming their expertise), which were revised (indicating partial knowledge gaps), and what specific corrections the human made (revisionDelta). It synthesizes these patterns into journal entries with extracted LearningPatterns that describe when to apply the corrected approach. The Teacher Model does not check code quality or Paradigm compliance — those are the reviewer''s and stop hook''s jobs.'
|
|
37
|
+
- id: q3
|
|
38
|
+
question: 'A journal entry about JWT refresh token rotation has appeared in 4 different sessions with `transferable: true`. Sensei is evaluating whether to promote it to a notebook entry. What criteria does Sensei use?'
|
|
39
|
+
choices:
|
|
40
|
+
A: Only the transferable flag — if true, it is automatically promoted
|
|
41
|
+
B: The number of sessions (4 is enough) — promotion is count-based
|
|
42
|
+
C: Whether the insight is transferable, actionable, confirmed by multiple sessions, and high enough confidence to be reliable
|
|
43
|
+
D: Whether the human explicitly requests promotion
|
|
44
|
+
E: Whether the agent's overall acceptance rate is above 80%
|
|
45
|
+
correct: C
|
|
46
|
+
explanation: 'Sensei evaluates multiple criteria: (1) Is it transferable to other projects? (`transferable: true` confirms this). (2) Is it actionable — specific enough to apply, not just a vague observation? (3) Has it appeared across multiple sessions — 4 appearances confirms the pattern. (4) Is the confidence high enough? A pattern discovered through `correction_received` with `confidence_after: 0.9` is more reliable than one from `self_reflection` with `confidence_after: 0.6`. The promotion is a quality gate, not automatic.'
|
|
47
|
+
- id: q4
|
|
48
|
+
question: Why is the expertise adjustment +0.03 for accepted but only -0.02 for dismissed (asymmetric rather than symmetric)?
|
|
49
|
+
choices:
|
|
50
|
+
A: Positive reinforcement is always stronger than negative in learning theory
|
|
51
|
+
B: The asymmetry prevents a single bad session from collapsing an otherwise reliable agent's confidence — it takes more dismissals than acceptances to significantly change confidence
|
|
52
|
+
C: Symmetric adjustments would cause confidence to oscillate unstably
|
|
53
|
+
D: The specific values are arbitrary and have no design rationale
|
|
54
|
+
E: Accepted contributions are more common, so they need a larger weight
|
|
55
|
+
correct: B
|
|
56
|
+
explanation: The asymmetry is deliberate. If an agent has been consistently good (confidence 0.9) and has one bad session where a contribution is dismissed, symmetric -0.03 would drop it to 0.87. With asymmetric -0.02, it drops to 0.88. This matters because a single bad session should not outweigh multiple good ones. The agent needs more dismissals than acceptances to trend downward, which matches the expectation that occasionally wrong agents are still net-positive contributors.
|
|
57
|
+
- id: q5
|
|
58
|
+
question: An agent's nomination was deferred (not accepted, not dismissed). How does this affect the learning loop?
|
|
59
|
+
choices:
|
|
60
|
+
A: The expertise confidence decreases slightly
|
|
61
|
+
B: The nomination is marked for re-evaluation in the next session
|
|
62
|
+
C: No expertise change occurs — deferred says nothing about correctness, only timing. The journal entry (if written) would use trigger `self_reflection` rather than `human_feedback`.
|
|
63
|
+
D: The agent is temporarily benched until the deferred item is resolved
|
|
64
|
+
E: The Teacher Model treats deferred as a weaker form of dismissal
|
|
65
|
+
correct: C
|
|
66
|
+
explanation: A deferred verdict means the contribution is not relevant right now, but may be valid. The expertise adjustment for deferred is 0 (no change) because deferral carries no signal about whether the agent was right or wrong. The agent's confidence remains unchanged. If the Teacher Model writes a journal entry about the deferred contribution, it would use `self_reflection` as the trigger rather than `human_feedback`, since the human did not evaluate correctness.
|