@a-company/paradigm 3.1.5 → 3.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/dist/{accept-orchestration-CWZNCGZX.js → accept-orchestration-DIGPJVUR.js} +6 -5
  2. package/dist/{aggregate-W7Q6VIM2.js → aggregate-V4KPR3RW.js} +2 -2
  3. package/dist/{beacon-B47XSTL7.js → beacon-XRXL5KZB.js} +2 -2
  4. package/dist/{chunk-4LGLU2LO.js → chunk-2E2RTBSM.js} +533 -182
  5. package/dist/{chunk-YCLN7WXV.js → chunk-2QNZ6PVD.js} +219 -35
  6. package/dist/{chunk-UM54F7G5.js → chunk-4N6AYEEA.js} +1 -1
  7. package/dist/{chunk-MVXJVRFI.js → chunk-5TUAVVIG.js} +65 -1
  8. package/dist/{chunk-5C4SGQKH.js → chunk-6P4IFIK2.js} +4 -2
  9. package/dist/{chunk-WS5KM7OL.js → chunk-6RNYVBSG.js} +1 -1
  10. package/dist/{chunk-N6PJAPDE.js → chunk-AK5M6KJB.js} +18 -0
  11. package/dist/{chunk-VZ7CXFRZ.js → chunk-CRICL4FQ.js} +1004 -17
  12. package/dist/{chunk-MC7XC7XQ.js → chunk-GZDFVP2N.js} +20 -13
  13. package/dist/chunk-HPC3JAUP.js +42 -0
  14. package/dist/chunk-IRVA7NKV.js +657 -0
  15. package/dist/{chunk-ZPN7MXRA.js → chunk-KFHK6EBI.js} +184 -1
  16. package/dist/{chunk-UUZ2DMG5.js → chunk-KWDTBXP2.js} +1 -1
  17. package/dist/{chunk-DRUDZKIT.js → chunk-M2XMTJHQ.js} +693 -70
  18. package/dist/{chunk-PW2EXJQT.js → chunk-MRENOFTR.js} +24 -1
  19. package/dist/{chunk-QS36NGWV.js → chunk-QHJGB5TV.js} +1 -1
  20. package/dist/chunk-UI3XXVJ6.js +449 -0
  21. package/dist/{chunk-AD2LSCHB.js → chunk-Y4XZWCHK.js} +40 -74
  22. package/dist/{constellation-K3CIQCHI.js → constellation-GNK5DIMH.js} +2 -2
  23. package/dist/{cost-AEK6R7HK.js → cost-AGO5N7DD.js} +1 -1
  24. package/dist/{cursorrules-KI5QWHIX.js → cursorrules-LQFA7M62.js} +2 -2
  25. package/dist/{delete-W67IVTLJ.js → delete-3YXAJ5AA.js} +12 -1
  26. package/dist/{diff-AJJ5H6HV.js → diff-J6C5IHPV.js} +6 -5
  27. package/dist/{dist-2F7NO4H4-KSL6SJIO.js → dist-AG5JNIZU-XSEZ2LLK.js} +28 -3
  28. package/dist/dist-JOHRYQUA.js +7294 -0
  29. package/dist/{dist-NHJQVVUW.js → dist-Q6SAZI7X.js} +2 -2
  30. package/dist/{dist-GPQ4LAY3.js → dist-YP2CO4TG.js} +24 -6
  31. package/dist/{doctor-JBIV5PMN.js → doctor-TQYRF7KK.js} +2 -2
  32. package/dist/{edit-Y7XPYSMK.js → edit-EOMPXOG5.js} +1 -1
  33. package/dist/flow-7JUH6D4H.js +185 -0
  34. package/dist/global-AXILUM5X.js +136 -0
  35. package/dist/{habits-FA65W77Y.js → habits-CHP4EW5H.js} +234 -5
  36. package/dist/{hooks-JKWO44WH.js → hooks-DLZEYHI3.js} +1 -1
  37. package/dist/index.js +125 -100
  38. package/dist/{lint-HXKTWRNO.js → lint-N4LMMEXH.js} +141 -1
  39. package/dist/{list-R3QWW4SC.js → list-JKBJ7ESH.js} +1 -1
  40. package/dist/mcp.js +9273 -6515
  41. package/dist/{orchestrate-4ZH5GUQH.js → orchestrate-FAV64G2R.js} +6 -5
  42. package/dist/{probe-OYCP4JYG.js → probe-X3J2JX62.js} +18 -3
  43. package/dist/{promote-E6NBZ3BK.js → promote-HZH5E5CO.js} +1 -1
  44. package/dist/{providers-4PGPZEWP.js → providers-NQ67LO2Z.js} +1 -1
  45. package/dist/{record-OHQNWOUP.js → record-EECZ3E4I.js} +1 -1
  46. package/dist/{remember-6VZ74B7E.js → remember-3KJZGDUG.js} +1 -1
  47. package/dist/{review-RUHX25A5.js → review-BF26ILZB.js} +1 -1
  48. package/dist/{ripple-SBQOSTZD.js → ripple-JIUAMBLA.js} +2 -2
  49. package/dist/sentinel-ZTL224IG.js +63 -0
  50. package/dist/{server-MV4HNFVF.js → server-MZBYDXJY.js} +4193 -9
  51. package/dist/{setup-DF4F3ICN.js → setup-363IB6MO.js} +1 -1
  52. package/dist/{setup-JHBPZAG7.js → setup-UKJ3VGHI.js} +4 -4
  53. package/dist/{shift-2LQFQP4P.js → shift-KDVYB6CR.js} +16 -13
  54. package/dist/{show-WTOJXUTN.js → show-SAMTXEHG.js} +1 -1
  55. package/dist/{snapshot-GTVPRYZG.js → snapshot-KCMONZAO.js} +2 -2
  56. package/dist/{spawn-BJRQA2NR.js → spawn-EO7B2UM3.js} +2 -2
  57. package/dist/{summary-5SBFO7QK.js → summary-E2PU4UN2.js} +3 -3
  58. package/dist/{switch-6EANJ7O6.js → switch-CC2KACXO.js} +1 -1
  59. package/dist/{sync-5KSTPJ4B.js → sync-5VJPZQNX.js} +2 -2
  60. package/dist/sync-llms-7QDA3ZWC.js +166 -0
  61. package/dist/{team-NWP2KJAB.js → team-6CCNANKE.js} +7 -6
  62. package/dist/{test-MA5TWJQV.js → test-DK2RWLTK.js} +91 -8
  63. package/dist/{thread-JCJVRUQR.js → thread-RNSLADXN.js} +18 -2
  64. package/dist/{timeline-P7BARFLI.js → timeline-TJDVVVA3.js} +1 -1
  65. package/dist/{triage-TBIWJA6R.js → triage-PXMU3RWV.js} +2 -2
  66. package/dist/university-content/courses/para-101.json +2 -1
  67. package/dist/university-content/courses/para-201.json +102 -3
  68. package/dist/university-content/courses/para-301.json +14 -11
  69. package/dist/university-content/courses/para-401.json +57 -3
  70. package/dist/university-content/courses/para-501.json +204 -6
  71. package/dist/university-content/plsat/v3.0.json +808 -3
  72. package/dist/university-content/reference.json +270 -0
  73. package/dist/{upgrade-TIYFQYPO.js → upgrade-RBSE4M6I.js} +1 -1
  74. package/dist/{validate-QEEY6KFS.js → validate-2LTHHORX.js} +1 -1
  75. package/dist/{watch-4LT4O6K7.js → watch-NBPOMOEX.js} +76 -0
  76. package/dist/{watch-2XEYUH43.js → watch-PAEH6MOG.js} +1 -1
  77. package/package.json +1 -1
  78. package/dist/chunk-GWM2WRXL.js +0 -1095
  79. package/dist/sentinel-WB7GIK4V.js +0 -43
  80. /package/dist/{chunk-TAP5N3HH.js → chunk-CCG6KYBT.js} +0 -0
@@ -1,10 +1,11 @@
1
1
  {
2
2
  "version": "3.0",
3
3
  "frameworkVersion": "2.0",
4
- "timeLimit": 2700,
4
+ "timeLimit": 5160,
5
+ "totalSlots": 86,
5
6
  "passThreshold": 0.8,
6
7
  "title": "The PLSAT \u2014 Paradigm Licensure Standardized Assessment Test",
7
- "description": "50 questions. 45 minutes. 80% to pass. Good luck, scholar.",
8
+ "description": "86 questions. 86 minutes. 80% to pass. Good luck, scholar.",
8
9
  "items": [
9
10
  {
10
11
  "type": "standalone",
@@ -1326,11 +1327,46 @@
1326
1327
  }
1327
1328
  ]
1328
1329
  },
1330
+ {
1331
+ "type": "standalone",
1332
+ "slot": "slot-059",
1333
+ "course": "para-201",
1334
+ "variants": [
1335
+ {
1336
+ "id": "plsat-059",
1337
+ "scenario": "You run `paradigm flow validate` on your project and receive this output:\n\n```\n⚠ Circular Dependencies (1)\n\n $order-flow → $inventory-flow → $order-flow\n```\n\nBoth flows reference each other via `relatedFlows`.",
1338
+ "question": "What is the best way to resolve this circular dependency?",
1339
+ "choices": {
1340
+ "A": "Delete one of the flows — circular flows are always a design error",
1341
+ "B": "Extract the shared logic into a new `$stock-check-flow` that both flows reference, breaking the cycle",
1342
+ "C": "Ignore it — circular dependencies are just warnings and do not affect anything",
1343
+ "D": "Rename the flows so the validator does not detect the cycle",
1344
+ "E": "Move both flows into the same .purpose file to merge them"
1345
+ },
1346
+ "correct": "B",
1347
+ "explanation": "The recommended resolution for circular flow dependencies is to extract shared logic into a separate flow. If $order-flow and $inventory-flow both need shared behavior, create a third flow (e.g., $stock-check-flow) that both reference unidirectionally. This eliminates the cycle while preserving the relationships. Deletion (A) loses documentation, ignoring (C) hides architectural coupling, and renaming (D) is a workaround that does not fix the underlying issue."
1348
+ },
1349
+ {
1350
+ "id": "plsat-059b",
1351
+ "scenario": "Your project has three flows with these `relatedFlows` references:\n- `$checkout-flow` → `[$payment-flow]`\n- `$payment-flow` → `[$receipt-flow]`\n- `$receipt-flow` → `[$checkout-flow]`\n\nYou run `paradigm_flow_validate({})` to validate all flows.",
1352
+ "question": "What will the circular dependency detection report?",
1353
+ "choices": {
1354
+ "A": "No issues — each flow only references one other flow",
1355
+ "B": "Three separate circular dependencies, one for each flow",
1356
+ "C": "One circular dependency: $checkout-flow → $payment-flow → $receipt-flow → $checkout-flow",
1357
+ "D": "A warning that flows should not have relatedFlows at all",
1358
+ "E": "An error that three-flow cycles are not supported"
1359
+ },
1360
+ "correct": "C",
1361
+ "explanation": "Paradigm's circular dependency detector uses depth-first search to trace the full dependency graph. Starting from $checkout-flow, it follows: $checkout-flow → $payment-flow → $receipt-flow → $checkout-flow, detecting a single 3-node cycle. The cycle is normalized (starting from the lexicographically smallest node) and reported once, not three times."
1362
+ }
1363
+ ]
1364
+ },
1329
1365
  {
1330
1366
  "type": "passage",
1331
1367
  "slot": "passage-habits-review",
1332
1368
  "course": "para-501",
1333
- "passage": "Your team's `.paradigm/habits.yaml` for an e-commerce project:\n\n```yaml\noverrides:\n ripple-before-modify:\n severity: block\n explore-before-implement:\n severity: warn\n test-new-components:\n enabled: false\n record-lore-for-significant:\n severity: block\n\ncustom:\n - id: check-price-validation\n name: Validate Price Calculations\n description: Ensure price calculation tests exist for any payment-related changes\n category: testing\n trigger: postflight\n severity: warn\n check:\n type: tests-exist\n params:\n patterns: [\"**/price*.test.*\", \"**/payment*.test.*\"]\n enabled: true\n```\n\nThe seed habits that are NOT shown in overrides retain their default values. The project has 10 seed habits plus 1 custom habit.",
1369
+ "passage": "Your team's `.paradigm/habits.yaml` for an e-commerce project:\n\n```yaml\noverrides:\n ripple-before-modify:\n severity: block\n explore-before-implement:\n severity: warn\n test-new-components:\n enabled: false\n record-lore-for-significant:\n severity: block\n\ncustom:\n - id: check-price-validation\n name: Validate Price Calculations\n description: Ensure price calculation tests exist for any payment-related changes\n category: testing\n trigger: postflight\n severity: warn\n check:\n type: tests-exist\n params:\n patterns: [\"**/price*.test.*\", \"**/payment*.test.*\"]\n enabled: true\n```\n\nThe seed habits that are NOT shown in overrides retain their default values. The project has 14 seed habits plus 1 custom habit.",
1334
1370
  "questions": [
1335
1371
  {
1336
1372
  "slot": "pg-habits-q1",
@@ -1390,6 +1426,775 @@
1390
1426
  ]
1391
1427
  }
1392
1428
  ]
1429
+ },
1430
+ {
1431
+ "type": "standalone",
1432
+ "slot": "slot-060",
1433
+ "course": "para-501",
1434
+ "variants": [
1435
+ {
1436
+ "id": "plsat-060",
1437
+ "scenario": "A project has these habits enabled:\n- `commit-message-symbols` (on-commit/advisory) — checks commit messages match `type(#symbol):` format and include a `Symbols:` trailer\n- `flow-coverage-for-multi-component` (postflight/advisory) — checks that changes spanning 3+ components have a documented $flow\n\nAn agent modifies `#auth-handler`, `#session-store`, `#login-page`, and `#password-reset` but does not create a $flow. The agent then commits with message: `fix: update auth logic`.",
1438
+ "question": "Which habits are violated?",
1439
+ "choices": {
1440
+ "A": "Only commit-message-symbols — the message lacks type(#symbol): format",
1441
+ "B": "Only flow-coverage — 4 components without a $flow",
1442
+ "C": "Both: commit message lacks #symbol in parens and Symbols: trailer, plus 4 components touched without a flow",
1443
+ "D": "Neither — both are advisory and don't actually check anything",
1444
+ "E": "Only flow-coverage — the commit message format is correct"
1445
+ },
1446
+ "correct": "C",
1447
+ "explanation": "The commit message `fix: update auth logic` matches the conventional prefix `fix:` but lacks a #symbol in parentheses (should be `fix(#auth-handler):`) and has no `Symbols:` trailer. Additionally, 4 components were modified (>= 3 threshold) without a documented $flow. Both habits are violated — the advisory severity means they log notes rather than blocking."
1448
+ },
1449
+ {
1450
+ "id": "plsat-060b",
1451
+ "scenario": "A project enables `context-session-awareness` (preflight/advisory) and `aspect-anchors-valid` (postflight/advisory).\n\nAn agent starts a session, immediately begins modifying `~rate-limited` aspect without calling any context or session recovery tools. After modifying the aspect's anchor locations, the agent calls `paradigm_aspect_check` to verify the anchors are valid.",
1452
+ "question": "What do the habit evaluations show?",
1453
+ "choices": {
1454
+ "A": "Both followed — the agent did check the aspect",
1455
+ "B": "context-session-awareness: skipped (no context tools called); aspect-anchors-valid: followed (paradigm_aspect_check was called)",
1456
+ "C": "Both skipped — advisory habits are always skipped",
1457
+ "D": "context-session-awareness: followed (aspect_check counts as context); aspect-anchors-valid: skipped (anchors were modified)",
1458
+ "E": "Both partial — the agent did some work for each"
1459
+ },
1460
+ "correct": "B",
1461
+ "explanation": "context-session-awareness checks if paradigm_context_check, paradigm_session_recover, or paradigm_session_checkpoint was called — paradigm_aspect_check does not count. aspect-anchors-valid checks if paradigm_aspect_check was called for touched aspects, which it was. So the first is skipped and the second is followed."
1462
+ }
1463
+ ]
1464
+ },
1465
+ {
1466
+ "type": "standalone",
1467
+ "slot": "slot-061",
1468
+ "course": "para-301",
1469
+ "variants": [
1470
+ {
1471
+ "id": "plsat-061",
1472
+ "scenario": "Sentinel groups 8 incidents affecting `#payment-service` with these error messages:\n- 4 incidents: \"Stripe API returned 429: rate limited\"\n- 2 incidents: \"Payment webhook timeout after 30s\"\n- 2 incidents: \"Connection reset by peer during payment callback\"\n\nThe pattern suggester infers a resolution strategy from the grouped incidents.",
1473
+ "question": "What strategy will the suggester infer?",
1474
+ "choices": {
1475
+ "A": "fix-code — the default for any group of incidents",
1476
+ "B": "retry — timeout and network-related errors dominate the group",
1477
+ "C": "scale-up — rate limiting means the service needs more capacity",
1478
+ "D": "rollback — the errors suggest a recent deployment broke something",
1479
+ "E": "config-change — the 429 means the API key needs updating"
1480
+ },
1481
+ "correct": "B",
1482
+ "explanation": "The strategy inference checks error messages for keywords. 'timeout' and 'connection reset' match the retry strategy (timeout, network keywords). While '429: rate limited' could suggest scale-up, the 'timeout' keyword in 2 messages triggers the retry check first in the keyword priority order. The inference returns the first matching strategy, which is retry for timeout/network errors."
1483
+ }
1484
+ ]
1485
+ },
1486
+ {
1487
+ "type": "standalone",
1488
+ "slot": "slot-062",
1489
+ "course": "para-201",
1490
+ "variants": [
1491
+ {
1492
+ "id": "plsat-062a",
1493
+ "scenario": "A portal.yaml defines a gate `^subscription-required` with two locks:\n\n```yaml\nlocks:\n - id: has-user\n keys:\n - expression: \"req.user != null\"\n - id: active-sub\n keys:\n - expression: \"req.user.subscription.status === 'active'\"\n - expression: \"req.user.subscription.plan !== 'free'\"\n```\n\nYou run `paradigm portal test --gate ^subscription-required`.",
1494
+ "question": "How many test cases does the gate lock introspection auto-generate?",
1495
+ "choices": {
1496
+ "A": "2 — one passing case and one failing case",
1497
+ "B": "3 — one passing case, one per-lock failure case for each of the 2 locks, but no empty entity case",
1498
+ "C": "4 — one passing case, one per-lock failure case for each of the 2 locks, and one empty entity case",
1499
+ "D": "5 — one case per key expression plus one empty entity case",
1500
+ "E": "1 — only the passing case with all properties populated"
1501
+ },
1502
+ "correct": "C",
1503
+ "explanation": "Gate lock introspection generates: (1) a passing case with all properties populated from all key expressions, (2) one failure case per lock (omitting that lock's required properties), and (3) an empty entity case that should always fail. With 2 locks, that's 1 + 2 + 1 = 4 test cases."
1504
+ },
1505
+ {
1506
+ "id": "plsat-062b",
1507
+ "scenario": "You need a machine-readable export of your portal configuration for a CI audit pipeline. Your portal.yaml has 5 gates and 12 routes.",
1508
+ "question": "Which command produces a structured export suitable for programmatic consumption in CI?",
1509
+ "choices": {
1510
+ "A": "paradigm portal export --format json",
1511
+ "B": "paradigm portal export --format csv",
1512
+ "C": "paradigm portal export --format markdown",
1513
+ "D": "paradigm doctor --json",
1514
+ "E": "paradigm scan --verbose"
1515
+ },
1516
+ "correct": "A",
1517
+ "explanation": "paradigm portal export --format json produces a structured JSON output with gates and routes arrays, ideal for CI pipelines. CSV is for spreadsheet analysis, markdown for documentation. paradigm doctor --json reports health checks, not portal config."
1518
+ }
1519
+ ]
1520
+ },
1521
+ {
1522
+ "type": "standalone",
1523
+ "slot": "slot-063",
1524
+ "course": "para-301",
1525
+ "variants": [
1526
+ {
1527
+ "id": "plsat-063",
1528
+ "scenario": "You join a team working on a large codebase. Many source directories have code but no `.purpose` files documenting their components. You want to quickly generate draft documentation.",
1529
+ "question": "What is the correct approach using Paradigm's lint tooling?",
1530
+ "choices": {
1531
+ "A": "paradigm lint --fix — automatically creates .purpose files for all undocumented directories",
1532
+ "B": "paradigm lint --auto-populate — scans source directories and suggests .purpose drafts, then paradigm lint --auto-populate --fix to write them",
1533
+ "C": "paradigm scan --fix — rebuilds the index and creates missing .purpose files",
1534
+ "D": "paradigm doctor --fix — finds missing documentation and generates stubs"
1535
+ },
1536
+ "correct": "B",
1537
+ "explanation": "paradigm lint --auto-populate scans source directories (max depth 4) for undocumented components — directories containing source files but no .purpose file. Without --fix it reports suggestions; with --fix it writes draft .purpose files. paradigm lint --fix only fixes lint issues in existing .purpose files, it doesn't create new ones. scan and doctor don't generate .purpose files."
1538
+ }
1539
+ ]
1540
+ },
1541
+ {
1542
+ "type": "standalone",
1543
+ "slot": "slot-064",
1544
+ "course": "para-401",
1545
+ "variants": [
1546
+ {
1547
+ "id": "plsat-064a",
1548
+ "scenario": "A team has both AGENTS.md and llms.txt in their Paradigm project. A new developer asks what each file is for.",
1549
+ "question": "Which statement correctly distinguishes the two files?",
1550
+ "choices": {
1551
+ "A": "AGENTS.md is for Claude, llms.txt is for all other LLMs",
1552
+ "B": "AGENTS.md contains instructions (how to behave), llms.txt contains facts (what exists)",
1553
+ "C": "llms.txt replaces AGENTS.md in Paradigm v2",
1554
+ "D": "They contain the same information in different formats",
1555
+ "E": "AGENTS.md is auto-generated but llms.txt must be hand-written"
1556
+ },
1557
+ "correct": "B",
1558
+ "explanation": "AGENTS.md is prescriptive — it tells agents what tools to use, what conventions to follow, and what workflow to observe. llms.txt is descriptive — it tells agents what symbols exist, what flows are defined, and how the project is structured. Both are auto-generated by Paradigm (sync agents and sync-llms respectively) and serve distinct purposes."
1559
+ },
1560
+ {
1561
+ "id": "plsat-064b",
1562
+ "scenario": "An AI agent spawned in isolation needs to orient itself before working on a task. It has access to AGENTS.md, MCP tools, and the full codebase.",
1563
+ "question": "What is the most token-efficient orientation sequence?",
1564
+ "choices": {
1565
+ "A": "Read all .purpose files, then read portal.yaml",
1566
+ "B": "Read AGENTS.md → paradigm_session_recover → paradigm_navigate with context intent (~500 tokens total)",
1567
+ "C": "paradigm_search for every symbol type → read matching files",
1568
+ "D": "Read every file in .paradigm/ for full context",
1569
+ "E": "Call paradigm_status repeatedly until context is sufficient"
1570
+ },
1571
+ "correct": "B",
1572
+ "explanation": "The Fresh Context Principle: AGENTS.md provides instructions and conventions, paradigm_session_recover provides previous session context, and paradigm_navigate with context intent provides task-relevant files. Total cost: ~500 tokens — compared to thousands of tokens for file-reading approaches."
1573
+ }
1574
+ ]
1575
+ },
1576
+ {
1577
+ "type": "standalone",
1578
+ "slot": "slot-065",
1579
+ "course": "para-201",
1580
+ "variants": [
1581
+ {
1582
+ "id": "plsat-065",
1583
+ "scenario": "You have a `$checkout-flow` with these steps:\n1. ^authenticated (gate)\n2. #validate-cart (action)\n3. #process-payment (action)\n4. !order-placed (signal)\n\nYou run `paradigm flow diagram $checkout-flow`.",
1584
+ "question": "In the generated Mermaid diagram, what shapes represent each step type?",
1585
+ "choices": {
1586
+ "A": "All steps are rectangles with different colors",
1587
+ "B": "Gates are diamonds, actions are rectangles, signals are rounded boxes",
1588
+ "C": "Gates are hexagons, actions are circles, signals are parallelograms",
1589
+ "D": "All steps are circles connected by labeled arrows",
1590
+ "E": "Gates are rounded boxes, actions are diamonds, signals are rectangles"
1591
+ },
1592
+ "correct": "B",
1593
+ "explanation": "Paradigm's Mermaid diagram generator uses conventional flowchart shapes: diamond shapes (decision points) for gates, rectangles for actions, and rounded rectangles for signals. Gates also show deny paths when a failResponse or errorSignal is defined. Steps are color-coded: yellow for gates, blue for actions, green for signals."
1594
+ }
1595
+ ]
1596
+ },
1597
+ {
1598
+ "type": "variant-group",
1599
+ "slot": "slot-066",
1600
+ "course": "para-401",
1601
+ "variants": [
1602
+ {
1603
+ "id": "plsat-066",
1604
+ "scenario": "Your MCP-connected agent calls `paradigm_search` for `#auth` twice within 10 seconds. The project has a ToolCache with a 30-second TTL configured.",
1605
+ "question": "What happens on the second call?",
1606
+ "choices": {
1607
+ "A": "The search runs again because each MCP call is stateless",
1608
+ "B": "The cached result is returned instantly without re-scanning the index",
1609
+ "C": "The cache is checked but always invalidated because search results may change",
1610
+ "D": "The second call is queued until the first cache entry expires",
1611
+ "E": "An error is returned because duplicate calls are rate-limited"
1612
+ },
1613
+ "correct": "B",
1614
+ "explanation": "The ToolCache uses a time-based TTL (default 30 seconds). When the same tool is called with the same arguments within the TTL window, the cached result is returned immediately without re-executing the underlying scan. This saves significant compute for repeated discovery operations like search, status, and navigate."
1615
+ },
1616
+ {
1617
+ "id": "plsat-066b",
1618
+ "scenario": "An agent calls `paradigm_reindex` to rebuild the static index after modifying several .purpose files. The project has ToolCache enabled.",
1619
+ "question": "What happens to the ToolCache when reindex completes?",
1620
+ "choices": {
1621
+ "A": "Nothing — the cache is independent of the index",
1622
+ "B": "Only search-related cache entries are invalidated",
1623
+ "C": "The entire cache is cleared to ensure fresh results from the rebuilt index",
1624
+ "D": "Cache entries are marked stale but still served until they expire naturally",
1625
+ "E": "The cache TTL is doubled to avoid redundant scans after reindex"
1626
+ },
1627
+ "correct": "C",
1628
+ "explanation": "When paradigm_reindex completes successfully, it calls toolCache.clear() to invalidate ALL cached entries. This is critical because the reindex rebuilds the underlying data that search, navigate, and status tools depend on. Serving stale cached results after a reindex would return outdated symbol information."
1629
+ }
1630
+ ]
1631
+ },
1632
+ {
1633
+ "type": "variant-group",
1634
+ "slot": "slot-067",
1635
+ "course": "para-501",
1636
+ "variants": [
1637
+ {
1638
+ "id": "plsat-067",
1639
+ "scenario": "An agent has been working for 45 minutes, modifying 5 source files and touching symbols `#auth-middleware`, `^rate-limited`, and `!login-failed`. The session has 12 breadcrumbs recorded.",
1640
+ "question": "What triggers auto-lore drafting?",
1641
+ "choices": {
1642
+ "A": "Auto-lore drafts after every file modification regardless of count",
1643
+ "B": "Auto-lore drafts when 3+ files are modified, generating a partial LoreEntry from session breadcrumbs",
1644
+ "C": "Auto-lore drafts only when the agent explicitly calls paradigm_lore_record",
1645
+ "D": "Auto-lore drafts at a fixed time interval (every 30 minutes)",
1646
+ "E": "Auto-lore drafts only during the on-stop habit check"
1647
+ },
1648
+ "correct": "B",
1649
+ "explanation": "The draftLoreFromBreadcrumbs() function generates a partial LoreEntry when 3+ files have been modified in a session. It extracts tool usage statistics from breadcrumbs, includes the symbols touched and files modified, and tags the draft with 'auto-draft' for review. The 3-file threshold ensures trivial edits don't generate noise."
1650
+ },
1651
+ {
1652
+ "id": "plsat-067b",
1653
+ "scenario": "After a long coding session, the auto-lore system generates a draft entry. You inspect the draft and notice it has a tag you didn't add.",
1654
+ "question": "What tag does auto-lore always apply to drafted entries?",
1655
+ "choices": {
1656
+ "A": "`auto-generated` — marking it as machine-created",
1657
+ "B": "`auto-draft` — indicating it needs human review before finalization",
1658
+ "C": "`session-log` — categorizing it as a session record",
1659
+ "D": "`pending-review` — flagging it for team approval",
1660
+ "E": "`unverified` — warning that the content may be incomplete"
1661
+ },
1662
+ "correct": "B",
1663
+ "explanation": "Auto-drafted lore entries are always tagged with 'auto-draft' to distinguish them from manually recorded entries. This tag signals that the entry was machine-generated from session breadcrumbs and should be reviewed for accuracy before being treated as authoritative project history."
1664
+ }
1665
+ ]
1666
+ },
1667
+ {
1668
+ "type": "variant-group",
1669
+ "slot": "slot-068",
1670
+ "course": "para-501",
1671
+ "variants": [
1672
+ {
1673
+ "id": "plsat-068",
1674
+ "scenario": "Your project's `.paradigm/config.yaml` contains:\n```yaml\nlimits:\n habitsCacheTtlMs: 60000\n threadTrailMax: 20\n breadcrumbsMax: 100\n```",
1675
+ "question": "What do these configurable limits control?",
1676
+ "choices": {
1677
+ "A": "Maximum file sizes for paradigm-managed files",
1678
+ "B": "Rate limits for MCP tool calls per session",
1679
+ "C": "Tunable parameters for habits cache duration, thread trail depth, and breadcrumb history length",
1680
+ "D": "Hard caps on the number of symbols, flows, and gates allowed",
1681
+ "E": "Timeout durations for CLI commands"
1682
+ },
1683
+ "correct": "C",
1684
+ "explanation": "The LimitsConfig in config.yaml allows projects to tune operational parameters: habitsCacheTtlMs controls how long habit definitions are cached (default 30000ms), threadTrailMax sets the maximum breadcrumbs shown in thread trail output (default 10), and breadcrumbsMax sets the maximum breadcrumbs stored per session. These defaults work for most projects but can be adjusted for larger codebases."
1685
+ },
1686
+ {
1687
+ "id": "plsat-068b",
1688
+ "scenario": "A large monorepo project finds that paradigm_search is running too frequently, consuming unnecessary compute. They want to increase the cache duration for MCP tool results.",
1689
+ "question": "Which config.yaml field controls MCP tool cache duration?",
1690
+ "choices": {
1691
+ "A": "`limits.searchCacheTtlMs` — specific to search operations",
1692
+ "B": "`limits.toolCacheTtlMs` — controls the ToolCache TTL for all cached MCP tools",
1693
+ "C": "`limits.mcpTimeoutMs` — sets the MCP response timeout",
1694
+ "D": "`cache.ttl` — global cache setting for all paradigm operations",
1695
+ "E": "`limits.habitsCacheTtlMs` — since habits and tools share the same cache"
1696
+ },
1697
+ "correct": "B",
1698
+ "explanation": "The limits.toolCacheTtlMs field in config.yaml controls the TTL for the ToolCache that wraps paradigm_search, paradigm_status, and paradigm_navigate. The default is 30000ms (30 seconds). Increasing this value reduces redundant computations but may serve slightly stale results. It's separate from habitsCacheTtlMs which controls the habits definition cache."
1699
+ }
1700
+ ]
1701
+ },
1702
+ {
1703
+ "type": "standalone",
1704
+ "slot": "slot-069",
1705
+ "course": "para-501",
1706
+ "variants": [
1707
+ {
1708
+ "id": "plsat-069",
1709
+ "scenario": "Your team uses Paradigm's Global Brain (`~/.paradigm/`) to share wisdom, lore, and history across projects. After a year, the global directory has grown to contain hundreds of old entries.",
1710
+ "question": "How do you clean up old Global Brain entries?",
1711
+ "choices": {
1712
+ "A": "Manually delete files from `~/.paradigm/` using `rm -rf`",
1713
+ "B": "`paradigm global clean --older-than 90d` removes files older than the specified duration",
1714
+ "C": "`paradigm scan --prune` removes unused global entries",
1715
+ "D": "Global Brain entries are automatically pruned on each `paradigm shift`",
1716
+ "E": "`paradigm doctor --fix` cleans up stale global files"
1717
+ },
1718
+ "correct": "B",
1719
+ "explanation": "The `paradigm global clean` command scans ~/.paradigm/ directories (wisdom, lore, history, cache) for files older than the specified duration. The --older-than flag accepts human-readable durations like 90d, 30d, or 7d. Use --dry-run first to preview what would be deleted. This is safer than manual deletion because it respects directory structure and cleans up empty directories afterward."
1720
+ }
1721
+ ]
1722
+ },
1723
+ {
1724
+ "type": "variant-group",
1725
+ "slot": "slot-070",
1726
+ "course": "para-401",
1727
+ "variants": [
1728
+ {
1729
+ "id": "plsat-070",
1730
+ "scenario": "A Claude Code plugin's `hooks.json` includes:\n```json\n{\n \"compatibleVersions\": {\n \"min\": \"3.0.0\",\n \"max\": \"4.0.0\"\n }\n}\n```\nYour installed Paradigm CLI is version 3.1.6.",
1731
+ "question": "What happens when you run `paradigm hooks install`?",
1732
+ "choices": {
1733
+ "A": "Installation fails because 3.1.6 is not exactly 3.0.0 or 4.0.0",
1734
+ "B": "Installation proceeds normally — 3.1.6 is within the compatible range",
1735
+ "C": "A warning is shown but installation is blocked until you upgrade",
1736
+ "D": "The plugin is downgraded to match version 3.0.0",
1737
+ "E": "The compatibleVersions field is ignored during installation"
1738
+ },
1739
+ "correct": "B",
1740
+ "explanation": "The plugin version compatibility check compares the installed Paradigm version against the min/max range in hooks.json. Since 3.1.6 >= 3.0.0 and 3.1.6 < 4.0.0, installation proceeds normally. If the version were outside the range (e.g., 2.9.0 or 4.1.0), a warning would be displayed advising the user to update their Paradigm version or the plugin."
1741
+ },
1742
+ {
1743
+ "id": "plsat-070b",
1744
+ "scenario": "You're developing a Paradigm plugin and want to ensure it only works with Paradigm versions that support the habits system (introduced in v3.0).",
1745
+ "question": "Where do you declare this version requirement?",
1746
+ "choices": {
1747
+ "A": "In the plugin's `package.json` under `peerDependencies`",
1748
+ "B": "In the plugin's `hooks.json` under the `compatibleVersions` field with `min: \"3.0.0\"`",
1749
+ "C": "In the plugin's `.purpose` file under `dependencies`",
1750
+ "D": "In the project's `.paradigm/config.yaml` under `plugins`",
1751
+ "E": "Version requirements are not enforceable — plugins work with any version"
1752
+ },
1753
+ "correct": "B",
1754
+ "explanation": "Plugin version compatibility is declared in the plugin's hooks.json file using the compatibleVersions field. Setting min to '3.0.0' ensures that paradigm hooks install will warn users running older versions that lack habits support. This check runs at the start of hook installation before any hooks are written."
1755
+ }
1756
+ ]
1757
+ },
1758
+ {
1759
+ "type": "variant-group",
1760
+ "slot": "slot-071",
1761
+ "course": "para-501",
1762
+ "variants": [
1763
+ {
1764
+ "id": "plsat-071",
1765
+ "scenario": "You want to record a lore entry that credits both the human developer and the AI agent that collaborated on a feature. The lore system supports co-authorship tracking.",
1766
+ "question": "Which field on a LoreEntry captures AI collaboration?",
1767
+ "choices": {
1768
+ "A": "`author` — set to the AI agent's name",
1769
+ "B": "`assistedBy` — with type ('agent', 'tool', or 'human'), id, and optional role",
1770
+ "C": "`contributors` — an array of all participant names",
1771
+ "D": "`metadata.aiModel` — storing the model name used",
1772
+ "E": "`tags` — add an 'ai-assisted' tag"
1773
+ },
1774
+ "correct": "B",
1775
+ "explanation": "The assistedBy field on LoreEntry provides structured co-authorship tracking. It records the type of assistant (agent, tool, or human), their identifier (e.g., 'claude-opus-4', 'copilot'), and an optional role description. The author field remains the human developer, while assistedBy captures the AI collaboration context for project history."
1776
+ },
1777
+ {
1778
+ "id": "plsat-071b",
1779
+ "scenario": "Your team reviews lore entries from the past month and wants to understand how much AI assistance was involved in recent changes.",
1780
+ "question": "How does the `assistedBy` field help with this analysis?",
1781
+ "choices": {
1782
+ "A": "It tracks token usage per AI interaction",
1783
+ "B": "It records the AI's confidence score for each change",
1784
+ "C": "It provides structured data (type, id, role) showing which AI tools or agents assisted each recorded session",
1785
+ "D": "It measures the percentage of code written by AI vs human",
1786
+ "E": "It links to the AI conversation transcript"
1787
+ },
1788
+ "correct": "C",
1789
+ "explanation": "The assistedBy field captures three dimensions of AI collaboration: type (was it an agent like Claude, a tool like Copilot, or a human pair-programmer?), id (which specific model or tool?), and role (what was their contribution — implementation, review, planning?). This structured data enables teams to analyze collaboration patterns across their lore timeline."
1790
+ }
1791
+ ]
1792
+ },
1793
+ {
1794
+ "type": "standalone",
1795
+ "slot": "slot-072",
1796
+ "course": "para-501",
1797
+ "variants": [
1798
+ {
1799
+ "id": "plsat-072",
1800
+ "scenario": "A project has no `limits` section in `.paradigm/config.yaml`. An agent calls tools that rely on configurable limits — habits cache, thread trail, and ToolCache.",
1801
+ "question": "What values are used when limits are not configured?",
1802
+ "choices": {
1803
+ "A": "All limits are set to 0 (unlimited)",
1804
+ "B": "An error is thrown requiring explicit configuration",
1805
+ "C": "Sensible defaults: habitsCacheTtlMs=30000, threadTrailMax=10, toolCacheTtlMs=30000, breadcrumbsMax=unlimited",
1806
+ "D": "Limits are inherited from the Global Brain (~/.paradigm/) configuration",
1807
+ "E": "Each tool prompts the user to set a limit on first use"
1808
+ },
1809
+ "correct": "C",
1810
+ "explanation": "All configurable limits have sensible defaults that match the pre-configuration behavior: habits cache refreshes every 30 seconds, thread trail shows the last 10 breadcrumbs, ToolCache entries expire after 30 seconds. These defaults work well for most projects. The limits section in config.yaml is entirely optional — only override when you have a specific need."
1811
+ }
1812
+ ]
1813
+ },
1814
+ {
1815
+ "type": "standalone",
1816
+ "slot": "slot-073",
1817
+ "course": "para-401",
1818
+ "variants": [
1819
+ {
1820
+ "id": "plsat-073",
1821
+ "scenario": "An agent working on a complex feature calls these MCP tools in sequence:\n1. `paradigm_status` (cached)\n2. `paradigm_search` for `#auth` (cached)\n3. Edits 3 files, adds a new component\n4. `paradigm_reindex`\n5. `paradigm_search` for `#auth` again",
1822
+ "question": "Does step 5 return the updated results including the new component?",
1823
+ "choices": {
1824
+ "A": "No — the search cache still has the old results from step 2",
1825
+ "B": "Yes — reindex at step 4 clears all caches, so step 5 runs a fresh search against the rebuilt index",
1826
+ "C": "Only if 30 seconds have passed since step 2",
1827
+ "D": "Only if the agent explicitly called toolCache.clear()",
1828
+ "E": "The search always bypasses cache after a write operation"
1829
+ },
1830
+ "correct": "B",
1831
+ "explanation": "The reindex operation at step 4 has two effects: it rebuilds the static index from .purpose files and clears the entire ToolCache. This means step 5 performs a fresh search against the newly rebuilt index, which includes the new component. This cache-invalidation-on-reindex pattern ensures that discovery tools always reflect the current state after structural changes."
1832
+ }
1833
+ ]
1834
+ },
1835
+ {
1836
+ "type": "variant-group",
1837
+ "slot": "slot-074",
1838
+ "course": "para-501",
1839
+ "variants": [
1840
+ {
1841
+ "id": "plsat-074",
1842
+ "scenario": "An auto-lore draft is generated from session breadcrumbs after modifying 6 files. The breadcrumbs show: 4 Edit tool calls, 2 Write tool calls, 8 Read tool calls, 3 paradigm_navigate calls.",
1843
+ "question": "What information does the auto-lore draft extract from these breadcrumbs?",
1844
+ "choices": {
1845
+ "A": "Only the file paths that were modified",
1846
+ "B": "A complete diff of all code changes",
1847
+ "C": "Tool usage statistics (edit count, write count, read count) plus modified files and symbols touched",
1848
+ "D": "The full text of every tool call and response",
1849
+ "E": "Only the symbols referenced in paradigm_navigate calls"
1850
+ },
1851
+ "correct": "C",
1852
+ "explanation": "The auto-lore drafting function analyzes breadcrumbs to extract tool usage statistics — counting edits, writes, reads, and paradigm tool calls. It combines this with the list of modified files and symbols touched to generate a summary. The draft doesn't include full diffs or response text, keeping the lore entry concise and focused on what happened rather than how."
1853
+ },
1854
+ {
1855
+ "id": "plsat-074b",
1856
+ "scenario": "An agent completes a task that modified only 2 files. The habits system runs the on-stop check.",
1857
+ "question": "Will auto-lore drafting generate an entry?",
1858
+ "choices": {
1859
+ "A": "Yes — any file modification triggers auto-lore",
1860
+ "B": "No — auto-lore requires 3+ modified files to trigger",
1861
+ "C": "Yes — but only if the session lasted longer than 15 minutes",
1862
+ "D": "No — auto-lore only runs during postflight, not on-stop",
1863
+ "E": "It depends on the project's limits.breadcrumbsMax setting"
1864
+ },
1865
+ "correct": "B",
1866
+ "explanation": "Auto-lore drafting has a 3-file minimum threshold. Modifying only 2 files does not trigger a draft because such small changes are typically routine fixes that don't warrant project history entries. This threshold aligns with the lore recording decision tree: 'Did I modify 3+ source files? YES → Record lore.' The threshold prevents noise in project history."
1867
+ }
1868
+ ]
1869
+ },
1870
+ {
1871
+ "type": "standalone",
1872
+ "slot": "slot-075",
1873
+ "course": "para-501",
1874
+ "variants": [
1875
+ {
1876
+ "id": "plsat-075",
1877
+ "scenario": "You run `paradigm global clean --older-than 30d --dry-run` and see:\n```\nWould delete 23 files from wisdom/\nWould delete 45 files from lore/\nWould delete 12 files from history/\nWould delete 0 files from cache/\n```",
1878
+ "question": "What is the safest next step?",
1879
+ "choices": {
1880
+ "A": "Run `paradigm global clean --older-than 30d` to delete all 80 files immediately",
1881
+ "B": "Review the specific files listed, then run without --dry-run if the deletions look correct",
1882
+ "C": "Run `paradigm global clean --older-than 7d` to be more aggressive",
1883
+ "D": "Delete the `~/.paradigm/` directory entirely since most files are old",
1884
+ "E": "Skip cleanup — 80 files is too many to safely remove"
1885
+ },
1886
+ "correct": "B",
1887
+ "explanation": "The --dry-run flag exists specifically to preview destructive operations. The safest workflow is: (1) dry-run to see what would be deleted, (2) review the file list for anything you want to keep, (3) run without --dry-run once satisfied. Global Brain files contain cross-project wisdom and lore that may be valuable — always review before bulk deletion."
1888
+ }
1889
+ ]
1890
+ },
1891
+ {
1892
+ "type": "standalone",
1893
+ "slot": "slot-076",
1894
+ "course": "para-401",
1895
+ "variants": [
1896
+ {
1897
+ "id": "plsat-076",
1898
+ "scenario": "Your project uses both the ToolCache (for MCP tool results) and the habits cache (for habit definitions). Both have configurable TTLs.",
1899
+ "question": "Why are these two separate caches rather than one unified cache?",
1900
+ "choices": {
1901
+ "A": "Historical accident — they were built by different teams",
1902
+ "B": "They cache different data types with different invalidation needs: tool results change on reindex, habit definitions change on file edits",
1903
+ "C": "Performance — two smaller caches are faster than one large cache",
1904
+ "D": "Security — MCP tool results must be isolated from habit definitions",
1905
+ "E": "They are the same cache with different configuration keys"
1906
+ },
1907
+ "correct": "B",
1908
+ "explanation": "The ToolCache caches MCP tool results (search, navigate, status) and is invalidated on reindex when the underlying index changes. The habits cache stores parsed habit definitions from habits.yaml and is invalidated when the file's modification time changes. These fundamentally different invalidation strategies require separate cache implementations — flushing all habit definitions because a .purpose file changed would be wasteful, and vice versa."
1909
+ }
1910
+ ]
1911
+ },
1912
+ {
1913
+ "type": "standalone",
1914
+ "slot": "slot-077",
1915
+ "course": "para-501",
1916
+ "variants": [
1917
+ {
1918
+ "id": "plsat-077",
1919
+ "scenario": "You're configuring a large monorepo with 500+ symbols. Sessions often span 30+ minutes with many breadcrumbs. You want to optimize the Paradigm configuration.",
1920
+ "question": "Which limits configuration would be most appropriate?",
1921
+ "choices": {
1922
+ "A": "Set all limits to maximum values for the largest possible buffers",
1923
+ "B": "Increase threadTrailMax to 25 and toolCacheTtlMs to 60000 for the larger codebase, keep other defaults",
1924
+ "C": "Decrease all TTLs to 5000ms to ensure data is always fresh",
1925
+ "D": "Remove the limits section entirely and rely on defaults",
1926
+ "E": "Set breadcrumbsMax to 10 to save memory"
1927
+ },
1928
+ "correct": "B",
1929
+ "explanation": "For large monorepos, increasing threadTrailMax (from default 10 to 25) provides more session context for complex tasks, and increasing toolCacheTtlMs (from 30s to 60s) reduces redundant index scans across the larger symbol space. Other defaults work well regardless of project size. Setting TTLs too low causes excessive recomputation, while setting breadcrumbsMax too low loses valuable session context."
1930
+ }
1931
+ ]
1932
+ },
1933
+ {
1934
+ "type": "standalone",
1935
+ "slot": "slot-078",
1936
+ "course": "para-501",
1937
+ "variants": [
1938
+ {
1939
+ "id": "plsat-078",
1940
+ "scenario": "Your project has aspects categorized as rules, decisions, constraints, configurations, and invariants. A new aspect states: 'API response payloads must not exceed 5MB.' A developer is unsure which category to assign.",
1941
+ "question": "Which aspect category is correct for this aspect?",
1942
+ "choices": {
1943
+ "A": "`rule` \u2014 because it uses 'must not', indicating a mandatory pattern",
1944
+ "B": "`constraint` \u2014 because it defines a quantitative limit (5MB) on system behavior",
1945
+ "C": "`configuration` \u2014 because the 5MB value could be changed per environment",
1946
+ "D": "`invariant` \u2014 because it must always hold true",
1947
+ "E": "`decision` \u2014 because someone decided 5MB was the right limit"
1948
+ },
1949
+ "correct": "B",
1950
+ "explanation": "The key indicator is the quantitative limit: '5MB'. Constraints define measurable boundaries on system behavior \u2014 file sizes, rate limits, timeouts, quotas. While 'must not exceed' sounds like a rule, the category inference system prioritizes 'limit', 'maximum', 'cannot exceed' keywords for `constraint`. A rule would be a pattern without a specific numeric boundary (e.g., 'all responses must include request IDs'). Configuration would apply if the value explicitly varies by environment."
1951
+ },
1952
+ {
1953
+ "id": "plsat-078b",
1954
+ "scenario": "An aspect definition reads: 'The team decided to use PostgreSQL over MongoDB for the user service due to relational query requirements.' No category field is explicitly set.",
1955
+ "question": "What category will Paradigm's category inference assign?",
1956
+ "choices": {
1957
+ "A": "`rule` \u2014 because it implies PostgreSQL must be used",
1958
+ "B": "`constraint` \u2014 because it limits the database technology",
1959
+ "C": "`decision` \u2014 because the description contains 'decided' and 'chose'",
1960
+ "D": "`configuration` \u2014 because the database choice is a deployment setting",
1961
+ "E": "`invariant` \u2014 because the database choice should never change"
1962
+ },
1963
+ "correct": "C",
1964
+ "explanation": "Category inference uses keyword matching on the description. Words like 'decided', 'chosen', 'selected', 'opted' trigger the `decision` category. The description explicitly says 'The team decided to use PostgreSQL over MongoDB' \u2014 this is a textbook architectural decision with rationale."
1965
+ }
1966
+ ]
1967
+ },
1968
+ {
1969
+ "type": "standalone",
1970
+ "slot": "slot-079",
1971
+ "course": "para-501",
1972
+ "variants": [
1973
+ {
1974
+ "id": "plsat-079",
1975
+ "scenario": "Your aspect graph has the following edges:\n- `~token-expiry-24h` --depends-on--> `~jwt-signing-rs256`\n- `~jwt-signing-rs256` --enforced-by--> `#auth-middleware`\n- `~cache-aggressively` --contradicts--> `~always-fresh-data`\n- `~rate-limit-v2` --supersedes--> `~rate-limit-v1`\n\nYou need to modify `~jwt-signing-rs256` to change the signing algorithm.",
1976
+ "question": "Which aspect will paradigm_ripple surface as impacted through the 'depends-on' edge?",
1977
+ "choices": {
1978
+ "A": "`~cache-aggressively` \u2014 because it has a contradicts edge in the same graph",
1979
+ "B": "`~token-expiry-24h` \u2014 because it depends-on the aspect being modified",
1980
+ "C": "`~rate-limit-v2` \u2014 because it supersedes another aspect",
1981
+ "D": "`#auth-middleware` \u2014 because it enforces the aspect",
1982
+ "E": "All four aspects \u2014 ripple follows all edge types equally"
1983
+ },
1984
+ "correct": "B",
1985
+ "explanation": "Ripple follows dependency edges to discover indirect impacts. `~token-expiry-24h` has a `depends-on` edge to `~jwt-signing-rs256`, meaning changes to the signing algorithm may affect token expiry behavior. `#auth-middleware` has an `enforced-by` edge (reverse direction \u2014 it enforces the aspect, but the aspect doesn't depend on it for correctness). The contradicts and supersedes edges involve unrelated aspects."
1986
+ }
1987
+ ]
1988
+ },
1989
+ {
1990
+ "type": "standalone",
1991
+ "slot": "slot-080",
1992
+ "course": "para-501",
1993
+ "variants": [
1994
+ {
1995
+ "id": "plsat-080",
1996
+ "scenario": "You run `paradigm_aspect_search({ query: 'jwt expiry' })` and get three results:\n- Tier 1 (learned): `~token-expiry-24h` (weight: 3.0)\n- Tier 2 (FTS5): `~session-timeout-30m` (BM25: 0.7)\n- Tier 3 (fuzzy): `~jwt-refresh-rotation` (distance: 2)\n\nThe Tier 1 result is exactly what you need.",
1997
+ "question": "What should you do to reinforce this search mapping?",
1998
+ "choices": {
1999
+ "A": "Nothing \u2014 Tier 1 results are already reinforced by being in the search_weights table",
2000
+ "B": "Call `paradigm_aspect_confirm({ query: 'jwt expiry', aspectId: 'token-expiry-24h' })` to add +1.0 weight and decay the others",
2001
+ "C": "Manually update the search_weights SQLite table to increase the weight",
2002
+ "D": "Call `paradigm_aspect_get({ aspectId: 'token-expiry-24h' })` to register a direct access",
2003
+ "E": "Call `paradigm_reindex` to rebuild the learned mappings"
2004
+ },
2005
+ "correct": "B",
2006
+ "explanation": "paradigm_aspect_confirm is the feedback mechanism for the learning system. Calling it with the query and selected aspect ID adds +1.0 to the confirmed result's weight (3.0 \u2192 4.0) and decays all other results for that query by *0.95. This reinforces the correct mapping. Reindex rebuilds the graph but does not affect search_weights \u2014 those persist across reindexes. Direct access via aspect_get records a heatmap entry but does not affect search learning."
2007
+ }
2008
+ ]
2009
+ },
2010
+ {
2011
+ "type": "standalone",
2012
+ "slot": "slot-081",
2013
+ "course": "para-501",
2014
+ "variants": [
2015
+ {
2016
+ "id": "plsat-081",
2017
+ "scenario": "The aspect graph materialization pipeline runs during `paradigm_reindex`. It processes aspects from .purpose files through a specific sequence of steps.",
2018
+ "question": "What is the correct order of the five-step materialization pipeline?",
2019
+ "choices": {
2020
+ "A": "materialize aspects \u2192 open graph \u2192 materialize lore links \u2192 infer lore edges \u2192 close graph",
2021
+ "B": "open graph \u2192 materialize aspects \u2192 materialize lore links \u2192 infer lore edges \u2192 close graph",
2022
+ "C": "open graph \u2192 infer lore edges \u2192 materialize aspects \u2192 materialize lore links \u2192 close graph",
2023
+ "D": "materialize aspects \u2192 materialize lore links \u2192 open graph \u2192 infer lore edges \u2192 close graph",
2024
+ "E": "open graph \u2192 materialize lore links \u2192 materialize aspects \u2192 close graph \u2192 infer lore edges"
2025
+ },
2026
+ "correct": "B",
2027
+ "explanation": "The materialization pipeline follows a strict order: (1) openAspectGraph opens or creates the SQLite database and clears all tables. (2) materializeAspects reads .purpose files and writes aspects, anchors, and explicit/inferred edges. (3) materializeLoreLinks creates entries connecting aspects to their referenced lore entries. (4) inferLoreEdges scans for shared lore references between aspects and creates learned edges. (5) closeAspectGraph commits changes, runs ANALYZE, and closes the connection."
2028
+ }
2029
+ ]
2030
+ },
2031
+ {
2032
+ "type": "standalone",
2033
+ "slot": "slot-082",
2034
+ "course": "para-501",
2035
+ "variants": [
2036
+ {
2037
+ "id": "plsat-082",
2038
+ "scenario": "You define an aspect with an `applies-to` reference to a component:\n\n```yaml\n~audit-required:\n description: Financial operations must produce audit logs\n applies-to: [\"#payment-service\"]\n edges:\n - target: \"#audit-middleware\"\n relation: enforced-by\n```",
2039
+ "question": "What edges will the materialization pipeline create, and what are their origins and weights?",
2040
+ "choices": {
2041
+ "A": "One edge: `enforced-by` to `#audit-middleware` with origin `explicit` and weight 1.0",
2042
+ "B": "Two edges: `enforced-by` to `#audit-middleware` (origin: explicit, weight: 1.0) and an inferred edge to `#payment-service` (origin: inferred, weight: 0.5)",
2043
+ "C": "Two edges: both with origin `explicit` and weight 1.0",
2044
+ "D": "Three edges: one explicit, one inferred, and one learned",
2045
+ "E": "One edge: `applies-to` is documentation only and does not generate edges"
2046
+ },
2047
+ "correct": "B",
2048
+ "explanation": "The materialization pipeline creates edges from two sources. The explicit `edges` field generates an edge to `#audit-middleware` with origin `explicit` and weight 1.0. The `applies-to` reference generates an inferred edge to `#payment-service` with origin `inferred` and weight 0.5. Inferred edges have lower weight because they represent a weaker relationship than explicitly declared edges."
2049
+ }
2050
+ ]
2051
+ },
2052
+ {
2053
+ "type": "standalone",
2054
+ "slot": "slot-083",
2055
+ "course": "para-501",
2056
+ "variants": [
2057
+ {
2058
+ "id": "plsat-083",
2059
+ "scenario": "An aspect `~session-timeout-30m` was created 3 months ago with an anchor at `src/middleware/session.ts:15-25`. Since then, a developer refactored the file and the session timeout logic is now at lines 40-55. The aspect definition was not updated.\n\nYou run `paradigm_aspect_drift({ aspectId: 'session-timeout-30m' })`.",
2060
+ "question": "What will the drift detection report?",
2061
+ "choices": {
2062
+ "A": "No drift \u2014 the file still exists, so the anchor is valid",
2063
+ "B": "Drift detected: the SHA-256 content hash of lines 15-25 no longer matches the stored hash, indicating the code at the anchored location has changed",
2064
+ "C": "An error \u2014 the anchor line range exceeds the current file length",
2065
+ "D": "Partial drift \u2014 only some lines within the range changed",
2066
+ "E": "No drift \u2014 drift detection only checks whether the file exists, not its contents"
2067
+ },
2068
+ "correct": "B",
2069
+ "explanation": "Drift detection computes a SHA-256 hash of the current code at the anchored line range (15-25) and compares it to the hash stored during materialization. Since the timeout logic moved to different lines, the code at lines 15-25 is now different \u2014 the hashes will not match, and drift is reported. The fix is to update the anchor to `src/middleware/session.ts:40-55` to point to the new location of the timeout logic."
2070
+ }
2071
+ ]
2072
+ },
2073
+ {
2074
+ "type": "standalone",
2075
+ "slot": "slot-084",
2076
+ "course": "para-501",
2077
+ "variants": [
2078
+ {
2079
+ "id": "plsat-084",
2080
+ "scenario": "You run `paradigm_aspect_suggest_scan({ filePath: 'src/auth/jwt.ts' })` on a file containing:\n\n```typescript\nconst TOKEN_EXPIRY = 86400; // 24 hours in seconds\nconst MAX_REFRESH_ATTEMPTS = 3;\nif (process.env.NODE_ENV === 'production') { ... }\nconst EMAIL_REGEX = /^[a-zA-Z0-9+_.-]+@[a-zA-Z0-9.-]+$/;\n```",
2081
+ "question": "Which of the 8 built-in detectors will fire for each pattern?",
2082
+ "choices": {
2083
+ "A": "All four lines trigger the 'magic numbers' detector only",
2084
+ "B": "TOKEN_EXPIRY: time values; MAX_REFRESH_ATTEMPTS: magic numbers; process.env: environment checks; EMAIL_REGEX: regex patterns",
2085
+ "C": "TOKEN_EXPIRY: magic numbers; MAX_REFRESH_ATTEMPTS: rate limits; process.env: feature flags; EMAIL_REGEX: hardcoded strings",
2086
+ "D": "All four lines trigger the 'hardcoded strings' detector",
2087
+ "E": "TOKEN_EXPIRY: configuration; MAX_REFRESH_ATTEMPTS: constraint; process.env: environment checks; EMAIL_REGEX: assertion guards"
2088
+ },
2089
+ "correct": "B",
2090
+ "explanation": "Each pattern matches a specific detector: (1) 86400 with a comment mentioning '24 hours' matches the time values detector (durations, timeouts, TTLs, expiry). (2) MAX_REFRESH_ATTEMPTS = 3 is a numeric literal that is not 0 or 1, matching the magic numbers detector. (3) process.env.NODE_ENV matches the environment checks detector. (4) The regular expression literal matches the regex patterns detector. The detectors are specialized for these exact pattern types."
2091
+ }
2092
+ ]
2093
+ },
2094
+ {
2095
+ "type": "standalone",
2096
+ "slot": "slot-085",
2097
+ "course": "para-501",
2098
+ "variants": [
2099
+ {
2100
+ "id": "plsat-085",
2101
+ "scenario": "Two aspects in your project both reference lore entry `L-2026-01-15-003`:\n- `~token-expiry-24h` has `lore: [L-2026-01-15-003]`\n- `~refresh-token-rotation` has `lore: [L-2026-01-15-003]`\n\nNeither aspect has an explicit edge to the other.",
2102
+ "question": "What happens during the `inferLoreEdges` step of materialization?",
2103
+ "choices": {
2104
+ "A": "Nothing \u2014 edges are only created from explicit YAML definitions",
2105
+ "B": "A learned edge is created between the two aspects with origin 'learned' and weight proportional to shared lore references",
2106
+ "C": "Both aspects are merged into a single aspect",
2107
+ "D": "A lore_links entry is created but no edge is generated",
2108
+ "E": "An explicit edge with weight 1.0 is created between them"
2109
+ },
2110
+ "correct": "B",
2111
+ "explanation": "The inferLoreEdges step scans the lore_links table for aspects that share lore references. When two aspects both reference the same lore entry, a learned edge is created between them with origin 'learned' and a weight proportional to the number of shared references. This discovers implicit relationships \u2014 aspects that were discussed in the same lore context are likely related even without explicit edges."
2112
+ }
2113
+ ]
2114
+ },
2115
+ {
2116
+ "type": "standalone",
2117
+ "slot": "slot-086",
2118
+ "course": "para-501",
2119
+ "variants": [
2120
+ {
2121
+ "id": "plsat-086",
2122
+ "scenario": "Your project has three edge origins in the aspect graph:\n- Explicit edges (weight 1.0) from YAML `edges` fields\n- Inferred edges (weight 0.5) from `applies-to` references\n- Learned edges from shared lore references\n\nDuring recursive ripple, the BFS traverses: an explicit edge (1.0) then an inferred edge (0.5) then another inferred edge (0.5).",
2123
+ "question": "What is the cumulative path weight after these three hops, and will it be pruned by the default minWeight threshold?",
2124
+ "choices": {
2125
+ "A": "Weight: 2.0 (additive) \u2014 well above the 0.1 threshold, not pruned",
2126
+ "B": "Weight: 0.25 (multiplicative: 1.0 * 0.5 * 0.5) \u2014 above 0.1, not pruned",
2127
+ "C": "Weight: 0.5 (only the weakest edge counts) \u2014 above 0.1, not pruned",
2128
+ "D": "Weight: 0.0625 (multiplicative: 1.0 * 0.25 * 0.25) \u2014 below 0.1, pruned",
2129
+ "E": "Weight: 0.167 (average of all three) \u2014 above 0.1, not pruned"
2130
+ },
2131
+ "correct": "B",
2132
+ "explanation": "Recursive ripple uses multiplicative decay: the weight at each hop is multiplied by the edge weight. Starting at 1.0, after an explicit edge (1.0): 1.0 * 1.0 = 1.0. After an inferred edge (0.5): 1.0 * 0.5 = 0.5. After another inferred edge (0.5): 0.5 * 0.5 = 0.25. The cumulative weight 0.25 is above the default minWeight threshold of 0.1, so this path is NOT pruned. One more inferred edge would drop it to 0.125, still above threshold. Two more would reach 0.0625, below threshold and pruned."
2133
+ }
2134
+ ]
2135
+ },
2136
+ {
2137
+ "type": "standalone",
2138
+ "slot": "slot-087",
2139
+ "course": "para-501",
2140
+ "variants": [
2141
+ {
2142
+ "id": "plsat-087",
2143
+ "scenario": "Your project's aspect graph SQLite database at `.paradigm/aspect-graph.db` has six tables. During a governance review, you want to understand which aspects are discovered most frequently and how they are typically found.",
2144
+ "question": "Which table stores this information, and what are its columns?",
2145
+ "choices": {
2146
+ "A": "The `aspects` table with an `access_count` column",
2147
+ "B": "The `edges` table with a `traversal_count` column",
2148
+ "C": "The `heatmap` table with columns: aspect_id, access_type, count, and last_accessed",
2149
+ "D": "The `search_weights` table with a `hit_count` column",
2150
+ "E": "The `anchors` table with a `reference_count` column"
2151
+ },
2152
+ "correct": "C",
2153
+ "explanation": "The `heatmap` table tracks aspect access patterns with four columns: `aspect_id` (which aspect), `access_type` (how it was discovered: search, ripple, navigate, or direct), `count` (frequency), and `last_accessed` (timestamp). This table powers `paradigm_aspect_heatmap` and reveals whether aspects are typically found via search, encountered during ripple analysis, discovered through navigation, or accessed by direct ID lookup."
2154
+ }
2155
+ ]
2156
+ },
2157
+ {
2158
+ "type": "standalone",
2159
+ "slot": "slot-088",
2160
+ "course": "para-501",
2161
+ "variants": [
2162
+ {
2163
+ "id": "plsat-088",
2164
+ "scenario": "You want to extend `paradigm_aspect_suggest_scan` to detect SOC2 compliance annotations specific to your project. The built-in 8 detectors do not cover this pattern.",
2165
+ "question": "How do you add a custom detector?",
2166
+ "choices": {
2167
+ "A": "Edit the Paradigm source code to add a 9th built-in detector",
2168
+ "B": "Define a custom detector in `.paradigm/aspect-detectors.yaml` with regex patterns, language filters, and suggested category/severity",
2169
+ "C": "Create a `.paradigm/plugins/soc2-detector.js` plugin file",
2170
+ "D": "Add a `detectors` section to `.paradigm/config.yaml`",
2171
+ "E": "Custom detectors are not supported \u2014 use paradigm_aspect_search instead"
2172
+ },
2173
+ "correct": "B",
2174
+ "explanation": "Custom detectors are defined in `.paradigm/aspect-detectors.yaml`. Each detector specifies an id, name, description, regex patterns with language filters, and suggestions for category, severity, and tags. Custom detectors are loaded alongside the built-in 8 during `paradigm_aspect_suggest_scan`, extending the detection system without modifying Paradigm's source code."
2175
+ }
2176
+ ]
2177
+ },
2178
+ {
2179
+ "type": "standalone",
2180
+ "slot": "slot-089",
2181
+ "course": "para-501",
2182
+ "variants": [
2183
+ {
2184
+ "id": "plsat-089",
2185
+ "scenario": "During a quarterly governance review of a project with 150 aspects, the heatmap shows 40 aspects with zero access. The drift audit reveals 12 drifted anchors. The category distribution is: 95 rules, 20 constraints, 15 configurations, 12 decisions, 8 invariants.",
2186
+ "question": "What does the category distribution suggest about this project's aspect governance?",
2187
+ "choices": {
2188
+ "A": "The distribution is healthy \u2014 rules should always be the majority",
2189
+ "B": "The project may be over-documenting constraints as rules, and under-documenting strategic decisions \u2014 review whether some 'rules' are actually constraints or decisions",
2190
+ "C": "The project needs more invariants to balance the distribution",
2191
+ "D": "Configuration aspects should equal rules in a well-governed project",
2192
+ "E": "The 40 zero-access aspects indicate the project should reduce to 110 aspects"
2193
+ },
2194
+ "correct": "B",
2195
+ "explanation": "A 63% concentration in the `rule` category (95 out of 150) suggests over-classification. Many numeric limits (which should be constraints) and architectural choices (which should be decisions) may be categorized as rules. The low decision count (12) is a red flag \u2014 a project with 150 aspects likely made more than 12 strategic decisions. The governance review should reclassify mistyped aspects and document missing decisions. Zero-access aspects (40) are a separate concern requiring individual evaluation."
2196
+ }
2197
+ ]
1393
2198
  }
1394
2199
  ]
1395
2200
  }