@a-company/paradigm 3.1.5 → 3.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{accept-orchestration-CWZNCGZX.js → accept-orchestration-DIGPJVUR.js} +6 -5
- package/dist/{aggregate-W7Q6VIM2.js → aggregate-V4KPR3RW.js} +2 -2
- package/dist/{beacon-B47XSTL7.js → beacon-XRXL5KZB.js} +2 -2
- package/dist/{chunk-4LGLU2LO.js → chunk-2E2RTBSM.js} +533 -182
- package/dist/{chunk-YCLN7WXV.js → chunk-2QNZ6PVD.js} +219 -35
- package/dist/{chunk-UM54F7G5.js → chunk-4N6AYEEA.js} +1 -1
- package/dist/{chunk-MVXJVRFI.js → chunk-5TUAVVIG.js} +65 -1
- package/dist/{chunk-5C4SGQKH.js → chunk-6P4IFIK2.js} +4 -2
- package/dist/{chunk-WS5KM7OL.js → chunk-6RNYVBSG.js} +1 -1
- package/dist/{chunk-N6PJAPDE.js → chunk-AK5M6KJB.js} +18 -0
- package/dist/{chunk-VZ7CXFRZ.js → chunk-CRICL4FQ.js} +1004 -17
- package/dist/{chunk-MC7XC7XQ.js → chunk-GZDFVP2N.js} +20 -13
- package/dist/chunk-HPC3JAUP.js +42 -0
- package/dist/chunk-IRVA7NKV.js +657 -0
- package/dist/{chunk-ZPN7MXRA.js → chunk-KFHK6EBI.js} +184 -1
- package/dist/{chunk-UUZ2DMG5.js → chunk-KWDTBXP2.js} +1 -1
- package/dist/{chunk-DRUDZKIT.js → chunk-M2XMTJHQ.js} +693 -70
- package/dist/{chunk-PW2EXJQT.js → chunk-MRENOFTR.js} +24 -1
- package/dist/{chunk-QS36NGWV.js → chunk-QHJGB5TV.js} +1 -1
- package/dist/chunk-UI3XXVJ6.js +449 -0
- package/dist/{chunk-AD2LSCHB.js → chunk-Y4XZWCHK.js} +40 -74
- package/dist/{constellation-K3CIQCHI.js → constellation-GNK5DIMH.js} +2 -2
- package/dist/{cost-AEK6R7HK.js → cost-AGO5N7DD.js} +1 -1
- package/dist/{cursorrules-KI5QWHIX.js → cursorrules-LQFA7M62.js} +2 -2
- package/dist/{delete-W67IVTLJ.js → delete-3YXAJ5AA.js} +12 -1
- package/dist/{diff-AJJ5H6HV.js → diff-J6C5IHPV.js} +6 -5
- package/dist/{dist-2F7NO4H4-KSL6SJIO.js → dist-AG5JNIZU-XSEZ2LLK.js} +28 -3
- package/dist/dist-JOHRYQUA.js +7294 -0
- package/dist/{dist-NHJQVVUW.js → dist-Q6SAZI7X.js} +2 -2
- package/dist/{dist-GPQ4LAY3.js → dist-YP2CO4TG.js} +24 -6
- package/dist/{doctor-JBIV5PMN.js → doctor-TQYRF7KK.js} +2 -2
- package/dist/{edit-Y7XPYSMK.js → edit-EOMPXOG5.js} +1 -1
- package/dist/flow-7JUH6D4H.js +185 -0
- package/dist/global-AXILUM5X.js +136 -0
- package/dist/{habits-FA65W77Y.js → habits-CHP4EW5H.js} +234 -5
- package/dist/{hooks-JKWO44WH.js → hooks-DLZEYHI3.js} +1 -1
- package/dist/index.js +125 -100
- package/dist/{lint-HXKTWRNO.js → lint-N4LMMEXH.js} +141 -1
- package/dist/{list-R3QWW4SC.js → list-JKBJ7ESH.js} +1 -1
- package/dist/mcp.js +9273 -6515
- package/dist/{orchestrate-4ZH5GUQH.js → orchestrate-FAV64G2R.js} +6 -5
- package/dist/{probe-OYCP4JYG.js → probe-X3J2JX62.js} +18 -3
- package/dist/{promote-E6NBZ3BK.js → promote-HZH5E5CO.js} +1 -1
- package/dist/{providers-4PGPZEWP.js → providers-NQ67LO2Z.js} +1 -1
- package/dist/{record-OHQNWOUP.js → record-EECZ3E4I.js} +1 -1
- package/dist/{remember-6VZ74B7E.js → remember-3KJZGDUG.js} +1 -1
- package/dist/{review-RUHX25A5.js → review-BF26ILZB.js} +1 -1
- package/dist/{ripple-SBQOSTZD.js → ripple-JIUAMBLA.js} +2 -2
- package/dist/sentinel-ZTL224IG.js +63 -0
- package/dist/{server-MV4HNFVF.js → server-MZBYDXJY.js} +4193 -9
- package/dist/{setup-DF4F3ICN.js → setup-363IB6MO.js} +1 -1
- package/dist/{setup-JHBPZAG7.js → setup-UKJ3VGHI.js} +4 -4
- package/dist/{shift-2LQFQP4P.js → shift-KDVYB6CR.js} +16 -13
- package/dist/{show-WTOJXUTN.js → show-SAMTXEHG.js} +1 -1
- package/dist/{snapshot-GTVPRYZG.js → snapshot-KCMONZAO.js} +2 -2
- package/dist/{spawn-BJRQA2NR.js → spawn-EO7B2UM3.js} +2 -2
- package/dist/{summary-5SBFO7QK.js → summary-E2PU4UN2.js} +3 -3
- package/dist/{switch-6EANJ7O6.js → switch-CC2KACXO.js} +1 -1
- package/dist/{sync-5KSTPJ4B.js → sync-5VJPZQNX.js} +2 -2
- package/dist/sync-llms-7QDA3ZWC.js +166 -0
- package/dist/{team-NWP2KJAB.js → team-6CCNANKE.js} +7 -6
- package/dist/{test-MA5TWJQV.js → test-DK2RWLTK.js} +91 -8
- package/dist/{thread-JCJVRUQR.js → thread-RNSLADXN.js} +18 -2
- package/dist/{timeline-P7BARFLI.js → timeline-TJDVVVA3.js} +1 -1
- package/dist/{triage-TBIWJA6R.js → triage-PXMU3RWV.js} +2 -2
- package/dist/university-content/courses/para-101.json +2 -1
- package/dist/university-content/courses/para-201.json +102 -3
- package/dist/university-content/courses/para-301.json +14 -11
- package/dist/university-content/courses/para-401.json +57 -3
- package/dist/university-content/courses/para-501.json +204 -6
- package/dist/university-content/plsat/v3.0.json +808 -3
- package/dist/university-content/reference.json +270 -0
- package/dist/{upgrade-TIYFQYPO.js → upgrade-RBSE4M6I.js} +1 -1
- package/dist/{validate-QEEY6KFS.js → validate-2LTHHORX.js} +1 -1
- package/dist/{watch-4LT4O6K7.js → watch-NBPOMOEX.js} +76 -0
- package/dist/{watch-2XEYUH43.js → watch-PAEH6MOG.js} +1 -1
- package/package.json +1 -1
- package/dist/chunk-GWM2WRXL.js +0 -1095
- package/dist/sentinel-WB7GIK4V.js +0 -43
- /package/dist/{chunk-TAP5N3HH.js → chunk-CCG6KYBT.js} +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
{
|
|
7
7
|
"id": "lore-system",
|
|
8
8
|
"title": "The Lore System",
|
|
9
|
-
"content": "## Why Projects Forget\n\nEvery software project accumulates institutional knowledge — why a migration was attempted then rolled back, which approach was chosen for caching and why, what the team learned when the billing system went down at 2 AM. Without a system for capturing this knowledge, it lives only in the heads of the people who were there. When they leave, context-switch, or simply forget, the project loses its memory.\n\nParadigm's Lore system is a structured project timeline. It records sessions, decisions, milestones, incidents, and reviews as date-partitioned YAML entries that both humans and AI agents can search, filter, and learn from.\n\n## Anatomy of a Lore Entry\n\nEvery lore entry follows a consistent structure:\n\n```yaml\nid: L-2026-02-21-001\ntype: agent-session\ntimestamp: \"2026-02-21T14:30:00Z\"\nduration_minutes: 45\nauthor:\n type: agent\n id: claude-opus-4\n model: claude-opus-4-6\ntitle: \"Add JWT authentication to user routes\"\nsummary: \"Implemented RS256 JWT auth middleware, added ^authenticated and ^project-admin gates to portal.yaml, created refresh token rotation.\"\nsymbols_touched: [\"#auth-middleware\", \"^authenticated\", \"^project-admin\"]\nsymbols_created: [\"#refresh-token-handler\"]\nfiles_modified: [\"src/middleware/auth.ts\", \"portal.yaml\"]\nfiles_created: [\"src/handlers/refresh-token.ts\"]\nlines_added: 247\nlines_removed: 12\ncommit: \"a1b2c3d\"\ndecisions:\n - id: jwt-signing\n decision: \"Use RS256 over HS256\"\n rationale: \"Allows public key verification without sharing the signing secret\"\nlearnings:\n - \"Express v5 requires explicit async error wrapping for middleware\"\nverification:\n status: pass\n details: { \"unit-tests\": pass, \"integration\": pass }\ntags: [security, auth]\n```\n\nThe `id` field is auto-generated: `L-{date}-{sequence}`, where the sequence resets daily. This creates a natural chronological index.\n\n## Entry Types\n\nLore recognizes six entry types, each capturing a different kind of project event:\n\n| Type | When to Use |\n|---|---|\n| `agent-session` | An AI agent completed a work session (most common) |\n| `human-note` | A human records context, rationale, or tribal knowledge |\n| `decision` | An architectural or design decision with rationale |\n| `review` | A code review, PR review, or post-mortem |\n| `incident` | A production incident or significant failure |\n| `milestone` | A release, launch, migration completion, or major achievement |\n\nThe type drives how the entry appears in timeline views and which filters surface it.\n\n## Storage: Date-Partitioned YAML\n\nLore entries live in `.paradigm/lore/entries/` organized by date:\n\n```\n.paradigm/lore/\n timeline.yaml # Index metadata\n entries/\n 2026-02-19/\n L-2026-02-19-001.yaml\n L-2026-02-19-002.yaml\n 2026-02-20/\n L-2026-02-20-001.yaml\n 2026-02-21/\n L-2026-02-21-001.yaml\n```\n\nThe `timeline.yaml` index tracks total entry count, last updated timestamp, and known authors. Date partitioning keeps directories small and makes time-range queries efficient — to find entries from last week, you only read 7 directories.\n\n## CLI Tools\n\nThe CLI provides full lore management:\n\n- `paradigm lore list` — List entries with filters (author, type, symbol, date range, tags)\n- `paradigm lore show <id>` — Full detail view of a single entry\n- `paradigm lore record` — Record a new entry with expanded fields (files-modified, files-created, commit, learnings, duration)\n- `paradigm lore edit <id>` — Edit entry fields (title, summary, type, symbols, tags, learnings)\n- `paradigm lore delete <id>` — Delete an entry (with --yes to skip confirmation)\n- `paradigm lore timeline` — Timeline view grouped by date with hot symbols\n- `paradigm lore review <id>` — Add review scores to an entry\n- `paradigm lore` — Launch the web timeline UI\n\n## MCP Tools\n\nSix MCP tools power the Lore system:\n\n**`paradigm_lore_record`** — Create a new entry. Requires `type`, `title`, `summary`, and `symbols_touched`. Optional fields include files, decisions, learnings, and verification status. The entry is written to the correct date directory with an auto-incremented ID.\n\n**`paradigm_lore_search`** — Query entries with filters: by symbol, author, type, date range, tags, review status, and minimum completeness score. Returns matching entries sorted by recency.\n\n**`paradigm_lore_timeline`** — Get a high-level view: recent entries, active authors, hot symbols (most-referenced in recent entries), and timeline metadata. Use this for orientation — it tells you what has been happening in the project.\n\n**`paradigm_lore_get`** — Fetch a single entry by ID. Returns the full entry with all fields, including decisions, learnings, and review data.\n\n**`paradigm_lore_update`** — Update an existing entry. Pass the entry ID and the fields to change (title, summary, type, symbols, tags, learnings). Only specified fields are modified.\n\n**`paradigm_lore_delete`** — Delete an entry by ID. Requires `confirm: true` to prevent accidental deletion.\n\n## Lore Reviews\n\nEntries can be reviewed by humans after the fact. A review adds a `completeness` score (1-5), a `quality` score (1-5), and optional notes. This creates a feedback loop: agents learn which sessions produced high-quality entries and can adjust their recording behavior. You can filter entries by `hasReview` and `minCompleteness` to surface only verified project history.\n\n## When to Record\n\nThe general rule: **record lore when a session modifies 3 or more source files**. This threshold captures significant work sessions while ignoring trivial edits. The stop hook enforces this — if you modified 3+ files without recording a lore entry, it will block your session from completing.\n\nBeyond the threshold, always record lore for: architectural decisions (even if only 1 file changed), production incidents, milestone completions, and any session where you learned something the next developer should know.",
|
|
9
|
+
"content": "## Why Projects Forget\n\nEvery software project accumulates institutional knowledge — why a migration was attempted then rolled back, which approach was chosen for caching and why, what the team learned when the billing system went down at 2 AM. Without a system for capturing this knowledge, it lives only in the heads of the people who were there. When they leave, context-switch, or simply forget, the project loses its memory.\n\nParadigm's Lore system is a structured project timeline. It records sessions, decisions, milestones, incidents, and reviews as date-partitioned YAML entries that both humans and AI agents can search, filter, and learn from.\n\n## Anatomy of a Lore Entry\n\nEvery lore entry follows a consistent structure:\n\n```yaml\nid: L-2026-02-21-001\ntype: agent-session\ntimestamp: \"2026-02-21T14:30:00Z\"\nduration_minutes: 45\nauthor:\n type: agent\n id: claude-opus-4\n model: claude-opus-4-6\ntitle: \"Add JWT authentication to user routes\"\nsummary: \"Implemented RS256 JWT auth middleware, added ^authenticated and ^project-admin gates to portal.yaml, created refresh token rotation.\"\nsymbols_touched: [\"#auth-middleware\", \"^authenticated\", \"^project-admin\"]\nsymbols_created: [\"#refresh-token-handler\"]\nfiles_modified: [\"src/middleware/auth.ts\", \"portal.yaml\"]\nfiles_created: [\"src/handlers/refresh-token.ts\"]\nlines_added: 247\nlines_removed: 12\ncommit: \"a1b2c3d\"\ndecisions:\n - id: jwt-signing\n decision: \"Use RS256 over HS256\"\n rationale: \"Allows public key verification without sharing the signing secret\"\nlearnings:\n - \"Express v5 requires explicit async error wrapping for middleware\"\nverification:\n status: pass\n details: { \"unit-tests\": pass, \"integration\": pass }\ntags: [security, auth]\n```\n\nThe `id` field is auto-generated: `L-{date}-{sequence}`, where the sequence resets daily. This creates a natural chronological index.\n\n## Entry Types\n\nLore recognizes six entry types, each capturing a different kind of project event:\n\n| Type | When to Use |\n|---|---|\n| `agent-session` | An AI agent completed a work session (most common) |\n| `human-note` | A human records context, rationale, or tribal knowledge |\n| `decision` | An architectural or design decision with rationale |\n| `review` | A code review, PR review, or post-mortem |\n| `incident` | A production incident or significant failure |\n| `milestone` | A release, launch, migration completion, or major achievement |\n\nThe type drives how the entry appears in timeline views and which filters surface it.\n\n## Storage: Date-Partitioned YAML\n\nLore entries live in `.paradigm/lore/entries/` organized by date:\n\n```\n.paradigm/lore/\n timeline.yaml # Index metadata\n entries/\n 2026-02-19/\n L-2026-02-19-001.yaml\n L-2026-02-19-002.yaml\n 2026-02-20/\n L-2026-02-20-001.yaml\n 2026-02-21/\n L-2026-02-21-001.yaml\n```\n\nThe `timeline.yaml` index tracks total entry count, last updated timestamp, and known authors. Date partitioning keeps directories small and makes time-range queries efficient — to find entries from last week, you only read 7 directories.\n\n## CLI Tools\n\nThe CLI provides full lore management:\n\n- `paradigm lore list` — List entries with filters (author, type, symbol, date range, tags)\n- `paradigm lore show <id>` — Full detail view of a single entry\n- `paradigm lore record` — Record a new entry with expanded fields (files-modified, files-created, commit, learnings, duration)\n- `paradigm lore edit <id>` — Edit entry fields (title, summary, type, symbols, tags, learnings)\n- `paradigm lore delete <id>` — Delete an entry (with --yes to skip confirmation)\n- `paradigm lore timeline` — Timeline view grouped by date with hot symbols\n- `paradigm lore review <id>` — Add review scores to an entry\n- `paradigm lore` — Launch the web timeline UI\n\n## MCP Tools\n\nSix MCP tools power the Lore system:\n\n**`paradigm_lore_record`** — Create a new entry. Requires `type`, `title`, `summary`, and `symbols_touched`. Optional fields include files, decisions, learnings, and verification status. The entry is written to the correct date directory with an auto-incremented ID. When `validateSymbols: true` is passed, the tool checks each symbol in `symbols_touched` against registered symbols in `.purpose` files, `flows.yaml`, and `portal.yaml`. Unregistered symbols produce advisory warnings (the entry is always recorded regardless).\n\n**`paradigm_lore_search`** — Query entries with filters: by symbol, author, type, date range, tags, review status, and minimum completeness score. Returns matching entries sorted by recency.\n\n**`paradigm_lore_timeline`** — Get a high-level view: recent entries, active authors, hot symbols (most-referenced in recent entries), and timeline metadata. Use this for orientation — it tells you what has been happening in the project.\n\n**`paradigm_lore_get`** — Fetch a single entry by ID. Returns the full entry with all fields, including decisions, learnings, and review data.\n\n**`paradigm_lore_update`** — Update an existing entry. Pass the entry ID and the fields to change (title, summary, type, symbols, tags, learnings). Only specified fields are modified.\n\n**`paradigm_lore_delete`** — Delete an entry by ID. Requires `confirm: true` to prevent accidental deletion.\n\n## Lore Reviews\n\nEntries can be reviewed by humans after the fact. A review adds a `completeness` score (1-5), a `quality` score (1-5), and optional notes. This creates a feedback loop: agents learn which sessions produced high-quality entries and can adjust their recording behavior. You can filter entries by `hasReview` and `minCompleteness` to surface only verified project history.\n\n## When to Record\n\nThe general rule: **record lore when a session modifies 3 or more source files**. This threshold captures significant work sessions while ignoring trivial edits. The stop hook enforces this — if you modified 3+ files without recording a lore entry, it will block your session from completing.\n\nBeyond the threshold, always record lore for: architectural decisions (even if only 1 file changed), production incidents, milestone completions, and any session where you learned something the next developer should know.",
|
|
10
10
|
"keyConcepts": [
|
|
11
11
|
"Lore entries record sessions, decisions, milestones, incidents, and reviews",
|
|
12
12
|
"Six entry types: agent-session, human-note, decision, review, incident, milestone",
|
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
"Six MCP tools: paradigm_lore_record, paradigm_lore_search, paradigm_lore_timeline, paradigm_lore_get, paradigm_lore_update, paradigm_lore_delete",
|
|
16
16
|
"Review scores (completeness 1-5, quality 1-5) enable feedback loops",
|
|
17
17
|
"Recording trigger: 3+ modified source files = significant session",
|
|
18
|
+
"Optional symbol validation checks symbols_touched against registered .purpose, flows, and portal symbols",
|
|
18
19
|
"timeline.yaml index tracks entry counts, authors, and last-updated"
|
|
19
20
|
],
|
|
20
21
|
"quiz": [
|
|
@@ -88,7 +89,7 @@
|
|
|
88
89
|
{
|
|
89
90
|
"id": "sentinel-deep-dive",
|
|
90
91
|
"title": "Sentinel Deep Dive",
|
|
91
|
-
"content": "## Beyond Stack Traces\n\nTraditional error tracking gives you a stack trace and a count. Paradigm Sentinel gives you *symbolic context* — which component failed, where in a flow it failed, what gate was being evaluated, and which known pattern matches the failure. This transforms incident response from \"read the stack trace and hope\" to \"match against institutional knowledge and follow a resolution strategy.\"\n\n## Symbolic Incident Records\n\nWhen Sentinel records an incident, it captures both technical and symbolic context:\n\n```yaml\nid: INC-042\ntimestamp: \"2026-02-21T02:15:00Z\"\nstatus: open\nerror:\n message: \"Cannot read property 'id' of null\"\n stack: \"at PaymentProcessor.processRefund (payment-processor.ts:142)\"\n type: TypeError\nsymbols:\n component: \"#payment-processor\"\n flow: \"$refund-flow\"\n gate: \"^authenticated\"\nflowPosition:\n flowId: \"$refund-flow\"\n expected: [\"^authenticated\", \"^refund-eligible\", \"#process-refund\", \"!refund-completed\"]\n actual: [\"^authenticated\", \"^refund-eligible\", \"#process-refund\"]\n missing: [\"!refund-completed\"]\n failedAt: \"#process-refund\"\nenvironment: production\n```\n\nThe `flowPosition` field is critical — it tells you exactly where in the defined flow the failure occurred. The refund flow expected 4 steps; only 3 completed. The failure happened at `#process-refund`, and the `!refund-completed` signal never fired. This immediately narrows the investigation to the refund processing logic.\n\n## Incident Grouping\n\nSentinel automatically groups related incidents using symbolic similarity. When two incidents share the same component, flow, and error pattern, they form a group. The grouping algorithm uses a similarity threshold of 0.6 — incidents must share at least 60% of their symbolic context to cluster.\n\nAn `IncidentGroup` tracks the common symbols, error patterns, occurrence count, first/last seen timestamps, and which environments are affected. If a group matches a known failure pattern, Sentinel attaches it as a `suggestedPattern`.\n\n## Failure Patterns\n\nPatterns are the institutional knowledge of your error handling. Each pattern defines matching criteria and a resolution strategy:\n\n```yaml\nid: payment-null-ref-001\nname: \"Null reference in payment processing\"\npattern:\n symbols:\n component: \"#payment-processor\"\n errorType: [TypeError]\n errorContains: [\"Cannot read property\", \"null\"]\nresolution:\n description: \"Add null check before accessing refund object properties\"\n strategy: fix-code\n priority: high\n symbolsToModify: [\"#payment-processor\"]\n filesLikelyInvolved: [\"src/services/payment-processor.ts\"]\nconfidence:\n score: 85\n timesMatched: 12\n timesResolved: 10\n timesRecurred: 2\n```\n\nSix resolution strategies exist: `retry` (transient failure), `fallback` (use alternative path), `fix-data` (data issue), `fix-code` (bug), `ignore` (known harmless), and `escalate` (needs human decision). Pattern priority ranges from `low` through `medium` and `high` to `critical`.\n\nPatterns come from four sources: `manual` (team-created), `suggested` (Sentinel auto-generated from groups), `imported` (from another project), and `community` (shared patterns). Paradigm ships 26 seed patterns covering common failures like incomplete flows, gate bypasses, state race conditions, and unhandled signals.\n\n## The Triage Workflow\n\nSentinel follows a defined lifecycle for incidents:\n\n1. **Record** — `paradigm_sentinel_record` creates the incident with error details, symbolic context, and optional flow position. The incident starts as `open`.\n\n2. **Triage** — `paradigm_sentinel_triage` lists incidents filtered by status, symbol, environment, or error text. The matcher automatically suggests patterns that fit each incident.\n\n3. **Investigate** — `paradigm_sentinel_show` with `includeTimeline: true` shows the full flow timeline — every gate passed, signal emitted, and state change leading up to the failure. With `includeSimilar: true`, it surfaces related incidents that may share a root cause.\n\n4. **Resolve** — `paradigm_sentinel_resolve` closes the incident with a resolution: which pattern applied (if any), the fix commit hash, PR URL, and notes. Resolved incidents feed back into pattern confidence scores.\n\n5. **Pattern** — `paradigm_sentinel_add_pattern` creates new patterns from resolved incidents. When you fix a novel failure, capture the fix as a pattern so the next occurrence resolves faster.\n\nThe sequence is: **record → triage → show → resolve → add pattern**. This cycle builds institutional knowledge with every incident.\n\n## Stats and Health Metrics\n\n`paradigm_sentinel_stats` provides operational intelligence for a given time period: total incidents, open vs resolved counts, incidents by environment and day, pattern effectiveness (which patterns resolve most incidents vs which recur), symbol hotspots (components with the highest incident rates), and resolution metrics (average time to resolve, pattern vs manual resolution rates).\n\nThe `symbolHealth` view shows per-symbol incident history — use it to identify which components need hardening or refactoring.",
|
|
92
|
+
"content": "## Beyond Stack Traces\n\nTraditional error tracking gives you a stack trace and a count. Paradigm Sentinel gives you *symbolic context* — which component failed, where in a flow it failed, what gate was being evaluated, and which known pattern matches the failure. This transforms incident response from \"read the stack trace and hope\" to \"match against institutional knowledge and follow a resolution strategy.\"\n\n## Symbolic Incident Records\n\nWhen Sentinel records an incident, it captures both technical and symbolic context:\n\n```yaml\nid: INC-042\ntimestamp: \"2026-02-21T02:15:00Z\"\nstatus: open\nerror:\n message: \"Cannot read property 'id' of null\"\n stack: \"at PaymentProcessor.processRefund (payment-processor.ts:142)\"\n type: TypeError\nsymbols:\n component: \"#payment-processor\"\n flow: \"$refund-flow\"\n gate: \"^authenticated\"\nflowPosition:\n flowId: \"$refund-flow\"\n expected: [\"^authenticated\", \"^refund-eligible\", \"#process-refund\", \"!refund-completed\"]\n actual: [\"^authenticated\", \"^refund-eligible\", \"#process-refund\"]\n missing: [\"!refund-completed\"]\n failedAt: \"#process-refund\"\nenvironment: production\n```\n\nThe `flowPosition` field is critical — it tells you exactly where in the defined flow the failure occurred. The refund flow expected 4 steps; only 3 completed. The failure happened at `#process-refund`, and the `!refund-completed` signal never fired. This immediately narrows the investigation to the refund processing logic.\n\n## Incident Grouping\n\nSentinel automatically groups related incidents using symbolic similarity. When two incidents share the same component, flow, and error pattern, they form a group. The grouping algorithm uses a similarity threshold of 0.6 — incidents must share at least 60% of their symbolic context to cluster.\n\nAn `IncidentGroup` tracks the common symbols, error patterns, occurrence count, first/last seen timestamps, and which environments are affected. If a group matches a known failure pattern, Sentinel attaches it as a `suggestedPattern`.\n\n## Failure Patterns\n\nPatterns are the institutional knowledge of your error handling. Each pattern defines matching criteria and a resolution strategy:\n\n```yaml\nid: payment-null-ref-001\nname: \"Null reference in payment processing\"\npattern:\n symbols:\n component: \"#payment-processor\"\n errorType: [TypeError]\n errorContains: [\"Cannot read property\", \"null\"]\nresolution:\n description: \"Add null check before accessing refund object properties\"\n strategy: fix-code\n priority: high\n symbolsToModify: [\"#payment-processor\"]\n filesLikelyInvolved: [\"src/services/payment-processor.ts\"]\nconfidence:\n score: 85\n timesMatched: 12\n timesResolved: 10\n timesRecurred: 2\n```\n\nSix resolution strategies exist: `retry` (transient failure), `fallback` (use alternative path), `fix-data` (data issue), `fix-code` (bug), `ignore` (known harmless), and `escalate` (needs human decision). Pattern priority ranges from `low` through `medium` and `high` to `critical`.\n\nPatterns come from four sources: `manual` (team-created), `suggested` (Sentinel auto-generated from groups), `imported` (from another project), and `community` (shared patterns). Paradigm ships 26 seed patterns covering common failures like incomplete flows, gate bypasses, state race conditions, and unhandled signals.\n\n## The Triage Workflow\n\nSentinel follows a defined lifecycle for incidents:\n\n1. **Record** — `paradigm_sentinel_record` creates the incident with error details, symbolic context, and optional flow position. The incident starts as `open`.\n\n2. **Triage** — `paradigm_sentinel_triage` lists incidents filtered by status, symbol, environment, or error text. The matcher automatically suggests patterns that fit each incident.\n\n3. **Investigate** — `paradigm_sentinel_show` with `includeTimeline: true` shows the full flow timeline — every gate passed, signal emitted, and state change leading up to the failure. With `includeSimilar: true`, it surfaces related incidents that may share a root cause.\n\n4. **Resolve** — `paradigm_sentinel_resolve` closes the incident with a resolution: which pattern applied (if any), the fix commit hash, PR URL, and notes. Resolved incidents feed back into pattern confidence scores.\n\n5. **Pattern** — `paradigm_sentinel_add_pattern` creates new patterns from resolved incidents. When you fix a novel failure, capture the fix as a pattern so the next occurrence resolves faster.\n\nThe sequence is: **record → triage → show → resolve → add pattern**. This cycle builds institutional knowledge with every incident.\n\n## Stats and Health Metrics\n\n`paradigm_sentinel_stats` provides operational intelligence for a given time period: total incidents, open vs resolved counts, incidents by environment and day, pattern effectiveness (which patterns resolve most incidents vs which recur), symbol hotspots (components with the highest incident rates), and resolution metrics (average time to resolve, pattern vs manual resolution rates).\n\nThe `symbolHealth` view shows per-symbol incident history — use it to identify which components need hardening or refactoring.\n\n## Logger Transports\n\nSentinel integrates with the Paradigm logger through a transport layer. The `LogTransport` interface defines a simple contract: a transport receives structured log entries and delivers them somewhere — a file, a remote API, a database, or Sentinel's ingestion endpoint.\n\n```typescript\ninterface LogTransport {\n name: string;\n send(entry: LogEntry): void | Promise<void>;\n}\n```\n\nThe logger supports multiple transports simultaneously via `addTransport(transport)` and `removeTransport(name)`. By default, logs go to the console. Adding a `SentinelTransport` sends them to Sentinel's server as well, without changing any of your existing logging calls.\n\n## The SentinelTransport Bridge\n\nConnecting the Paradigm logger to Sentinel is a one-liner:\n\n```typescript\nimport { enableSentinel } from '@a-company/sentinel';\n\nenableSentinel({ endpoint: 'http://localhost:3001' });\n```\n\nThis call creates a `SentinelTransport` instance and registers it with the logger via `addTransport`. From that point forward, every `log.component(...)`, `log.gate(...)`, and `log.signal(...)` call is forwarded to Sentinel as a structured log entry. Error-level logs are automatically promoted to incident candidates.\n\nThe beauty of this design is zero code changes to your application. Your existing logger calls remain unchanged — the transport layer silently bridges them to Sentinel's observability pipeline.\n\n## Metrics API\n\nSentinel's server exposes a metrics API for recording and querying application metrics:\n\n**POST /api/metrics** — Record a metric data point. Supports three metric types:\n- `counter` — Monotonically increasing values (e.g., request count, error count)\n- `gauge` — Point-in-time values that can go up or down (e.g., active connections, queue depth)\n- `histogram` — Distribution of values over time (e.g., response latency, payload size)\n\n```json\n{\n \"name\": \"api.requests.total\",\n \"type\": \"counter\",\n \"value\": 1,\n \"labels\": { \"method\": \"POST\", \"route\": \"/api/payments\" },\n \"timestamp\": \"2026-02-21T14:30:00Z\"\n}\n```\n\n**GET /api/metrics** — Query metrics with optional filters by name, type, labels, and time range. Returns aggregated data suitable for dashboards and alerting.\n\n## Traces API\n\nSentinel supports distributed tracing through span trees:\n\n**POST /api/traces** — Record a trace span. Each span has a `traceId`, `spanId`, optional `parentSpanId`, `operationName`, `startTime`, `endTime`, and `tags`. Spans with the same `traceId` form a tree — the root span has no parent, and child spans reference their parent via `parentSpanId`.\n\n**GET /api/traces** — Query traces by operation name, service, time range, or minimum duration. Returns full span trees with timing breakdowns.\n\n## Service Registry\n\nSentinel maintains a live registry of services reporting data:\n\n**POST /api/services** — Register or update a service. Each service entry includes name, version, environment, health status, and last-seen timestamp.\n\n**GET /api/services** — List all registered services with their current health status and metadata. This provides a real-time view of what is running and where.",
|
|
92
93
|
"keyConcepts": [
|
|
93
94
|
"Symbolic incident records capture component, flow, gate, and signal context",
|
|
94
95
|
"Flow position tracking shows exactly where in a flow a failure occurred",
|
|
@@ -97,7 +98,12 @@
|
|
|
97
98
|
"Six resolution strategies: retry, fallback, fix-data, fix-code, ignore, escalate",
|
|
98
99
|
"Triage lifecycle: record → triage → show → resolve → add pattern",
|
|
99
100
|
"26 seed patterns ship with Paradigm covering common failure modes",
|
|
100
|
-
"Stats surface symbol hotspots, pattern effectiveness, and resolution rates"
|
|
101
|
+
"Stats surface symbol hotspots, pattern effectiveness, and resolution rates",
|
|
102
|
+
"LogTransport interface enables pluggable log delivery via addTransport/removeTransport",
|
|
103
|
+
"enableSentinel() one-liner bridges the Paradigm logger to Sentinel with zero code changes",
|
|
104
|
+
"Metrics API supports counter, gauge, and histogram metric types",
|
|
105
|
+
"Traces API records distributed span trees with parent-child relationships",
|
|
106
|
+
"Service registry provides live health status for all reporting services"
|
|
101
107
|
],
|
|
102
108
|
"quiz": [
|
|
103
109
|
{
|
|
@@ -164,22 +170,130 @@
|
|
|
164
170
|
},
|
|
165
171
|
"correct": "C",
|
|
166
172
|
"explanation": "The 0.6 similarity threshold means 60% of symbolic context must overlap. Sharing a component and error type provides some overlap, but different flows reduce it. Whether they cross 0.6 depends on other shared context — same gate, same environment, similar error message. Grouping is automatic but similarity-driven, not based on any single field."
|
|
173
|
+
},
|
|
174
|
+
{
|
|
175
|
+
"id": "q6",
|
|
176
|
+
"question": "How do you connect the Paradigm logger to Sentinel's observability pipeline?",
|
|
177
|
+
"choices": {
|
|
178
|
+
"A": "Replace all `log.component()` calls with `sentinel.log()` calls throughout your codebase",
|
|
179
|
+
"B": "Call `enableSentinel({ endpoint: '...' })` once — it registers a SentinelTransport via addTransport with zero changes to existing logging code",
|
|
180
|
+
"C": "Configure a `sentinel` key in `.paradigm/config.yaml` and restart the application",
|
|
181
|
+
"D": "Import SentinelTransport in every file that uses the logger",
|
|
182
|
+
"E": "Set the `SENTINEL_ENDPOINT` environment variable — the logger auto-detects it"
|
|
183
|
+
},
|
|
184
|
+
"correct": "B",
|
|
185
|
+
"explanation": "The SentinelTransport bridge is designed for zero-code-change adoption. Calling `enableSentinel()` once creates a SentinelTransport and registers it with the logger via `addTransport`. From that point, all existing `log.component()`, `log.gate()`, and `log.signal()` calls are automatically forwarded to Sentinel. No changes to individual logging calls are needed."
|
|
186
|
+
},
|
|
187
|
+
{
|
|
188
|
+
"id": "q7",
|
|
189
|
+
"question": "You want to track API response latency in Sentinel. Which metric type should you use?",
|
|
190
|
+
"choices": {
|
|
191
|
+
"A": "`counter` — increment it by the latency value on each request",
|
|
192
|
+
"B": "`gauge` — set it to the current response time",
|
|
193
|
+
"C": "`histogram` — record each response time to build a distribution over time",
|
|
194
|
+
"D": "`timer` — Sentinel has a dedicated timer metric type for latency",
|
|
195
|
+
"E": "`counter` with a `latency` label containing the value"
|
|
196
|
+
},
|
|
197
|
+
"correct": "C",
|
|
198
|
+
"explanation": "Histogram is the correct metric type for distributions like response latency. A histogram records individual values and lets you compute percentiles (p50, p95, p99), averages, and distributions over time. A counter only tracks cumulative totals, a gauge only captures point-in-time snapshots, and there is no dedicated timer type — histograms serve that purpose."
|
|
199
|
+
}
|
|
200
|
+
]
|
|
201
|
+
},
|
|
202
|
+
{
|
|
203
|
+
"id": "aspect-graph-internals",
|
|
204
|
+
"title": "Aspect Graph Internals",
|
|
205
|
+
"content": "## SQLite Schema\n\nThe aspect graph database at `.paradigm/aspect-graph.db` uses six core tables that model the full aspect ecosystem:\n\n**`aspects`** — The primary table storing aspect metadata. Columns include `id` (the aspect symbol, e.g., `token-expiry-24h`), `description`, `value`, `category` (rule/decision/constraint/configuration/invariant), `severity` (low/medium/high/critical), and `content_hash` (SHA-256 of the combined anchor code for drift detection). Each row represents one aspect from a `.purpose` file.\n\n**`anchors`** — Stores code anchor locations. Columns: `aspect_id` (foreign key to aspects), `file_path`, `start_line`, `end_line`, and `content_hash` (SHA-256 of the code at those lines). An aspect can have multiple anchors across different files.\n\n**`edges`** — The graph edges connecting aspects to other symbols. Columns: `source` (the aspect), `target` (any symbol), `relation` (enforced-by, depends-on, contradicts, supersedes, related-to), `weight` (numeric confidence, default 1.0 for explicit edges), and `origin` (explicit, inferred, or learned). This table is what makes the aspect system a graph rather than a flat list.\n\n**`lore_links`** — Connects aspects to lore entries. Columns: `aspect_id` and `lore_id`. These links are materialized from the `lore` field in aspect YAML definitions, and additional links are inferred when two aspects share lore references.\n\n**`search_weights`** — The learning system's memory. Columns: `query` (the search string), `aspect_id` (the result), and `weight` (accumulated confidence). This table powers Tier 1 of the three-tier search — when a query matches a stored mapping with sufficient weight, the result is returned immediately without FTS5 or fuzzy matching.\n\n**`heatmap`** — Tracks aspect access patterns. Columns: `aspect_id`, `access_type` (search, ripple, navigate, direct), `count`, and `last_accessed`. This data drives the `paradigm_aspect_heatmap` tool, revealing which aspects are most frequently referenced and how they are typically discovered.\n\n## Recursive Ripple\n\nThe aspect graph enables recursive ripple analysis — when you call `paradigm_ripple` on a symbol that has aspect edges, the ripple follows those edges to discover indirect impacts. The algorithm uses weighted breadth-first search (BFS) with three configurable parameters:\n\n- **maxDepth** — How many hops to traverse. Default is 5, maximum is 10. Each hop follows one edge in the graph. At depth 1, you see direct connections. At depth 5, you see connections five edges away.\n- **minWeight** — The minimum cumulative weight to continue traversal. Default is 0.1. As the BFS traverses edges, it multiplies the current weight by each edge's weight (multiplicative decay). When the cumulative weight drops below minWeight, that branch is pruned.\n- **Queue limit** — Maximum BFS queue size: 1000 nodes. This prevents runaway traversals in densely connected graphs. If the queue exceeds 1000 entries, the oldest entries are dropped.\n\nThe multiplicative decay is the key mechanism. An explicit edge with weight 1.0 passes full confidence to the next hop. An inferred edge with weight 0.5 halves the confidence. After two inferred edges, the weight is 0.25 — and after four, it drops to 0.0625, below the default minWeight threshold. This naturally limits traversal depth through low-confidence paths while allowing full traversal through high-confidence ones.\n\n## Heatmap Tracking\n\nEvery time an aspect is accessed through any MCP tool, the heatmap table records the access. Four access types are tracked:\n\n- **search** — The aspect was found via `paradigm_aspect_search`\n- **ripple** — The aspect was encountered during `paradigm_ripple` traversal\n- **navigate** — The aspect was discovered via `paradigm_navigate`\n- **direct** — The aspect was accessed by ID via `paradigm_aspect_get`\n\nThe heatmap serves two purposes. First, it powers the `paradigm_aspect_heatmap` tool, which ranks aspects by access frequency and reveals usage patterns. Second, it provides data for project health analysis — aspects that are never accessed may be stale or poorly named, while aspects accessed frequently across multiple types are clearly central to the project.\n\n## Materialization Pipeline\n\nThe aspect graph is rebuilt during `paradigm_reindex` through a five-step pipeline:\n\n1. **openAspectGraph** — Opens (or creates) the SQLite database at `.paradigm/aspect-graph.db`. If the database exists, all tables are cleared for a fresh rebuild. This ensures the graph always reflects the current state of YAML files.\n\n2. **materializeAspects** — Reads all `.purpose` files, extracts aspect definitions, and writes them to the `aspects`, `anchors`, and `edges` tables. For each anchor, the pipeline reads the actual source code at the specified line range and computes a SHA-256 content hash. Explicit edges from the YAML `edges` field are written with origin `explicit` and weight 1.0. Inferred edges from `applies-to` references are written with origin `inferred` and weight 0.5.\n\n3. **materializeLoreLinks** — Reads the `lore` field from each aspect and creates entries in the `lore_links` table connecting aspects to their referenced lore entries.\n\n4. **inferLoreEdges** — Scans the `lore_links` table for aspects that share lore references. When two aspects both reference the same lore entry, a learned edge is created between them with origin `learned` and a weight proportional to the number of shared references. This discovers implicit relationships that were not explicitly declared.\n\n5. **closeAspectGraph** — Commits all changes, runs ANALYZE for query optimization, and closes the database connection.\n\nBecause the entire graph is rebuilt from YAML on every reindex, there is no migration or versioning concern. If the schema changes in a future Paradigm version, the next reindex simply creates the new schema.\n\n## Category Inference\n\nWhen an aspect definition omits the `category` field, the materialization pipeline attempts to infer it from the description using keyword matching:\n\n- Descriptions containing \"must\", \"always\", \"never\", \"required\" suggest `rule`\n- Descriptions containing \"decided\", \"chosen\", \"selected\", \"opted\" suggest `decision`\n- Descriptions containing \"limit\", \"maximum\", \"minimum\", \"cannot exceed\" suggest `constraint`\n- Descriptions containing \"configured\", \"set to\", \"defaults to\", \"environment\" suggest `configuration`\n- Descriptions containing \"always true\", \"never negative\", \"invariant\", \"guarantee\" suggest `invariant`\n\nSimilarly, severity can be inferred from tags: aspects tagged `[critical]` or `[security]` default to `high` severity, aspects tagged `[compliance]` default to `critical`, and untagged aspects default to `medium`.\n\nInference is a fallback — explicit `category` and `severity` fields in YAML always take precedence.\n\n## Weight Decay in Search Learning\n\nThe search learning system uses a reinforcement model. When `paradigm_aspect_confirm` is called with a query and aspect ID:\n\n1. The selected result's weight for that query gets **+1.0** added to its current weight in the `search_weights` table. If no entry exists, one is created with weight 1.0.\n2. All other aspects that were previously returned for the same query get their weights multiplied by **0.95** (a 5% decay). This applies only to aspects that have existing `search_weights` entries for this query — it does not penalize aspects that were never returned for this query.\n\nThis mechanism is self-correcting. If result A is consistently confirmed for a query, its weight grows (1.0, 2.0, 3.0, ...) while alternatives decay (1.0, 0.95, 0.9025, ...). After enough confirmations, the learned mapping becomes dominant and Tier 1 returns it instantly. But if the user later confirms a different result B for the same query, B starts climbing while A begins decaying — the system adapts to changing preferences without requiring manual intervention.",
|
|
206
|
+
"keyConcepts": [
|
|
207
|
+
"Six SQLite tables: aspects, anchors, edges, lore_links, search_weights, heatmap",
|
|
208
|
+
"Recursive ripple uses weighted BFS with multiplicative decay across edges",
|
|
209
|
+
"Default maxDepth is 5 (max 10), default minWeight is 0.1, queue limit is 1000",
|
|
210
|
+
"Four heatmap access types: search, ripple, navigate, direct",
|
|
211
|
+
"Five-step materialization pipeline: open, materialize aspects, materialize lore links, infer lore edges, close",
|
|
212
|
+
"Category inference uses description keywords when category is not explicitly set",
|
|
213
|
+
"Search weight decay: +1.0 to confirmed result, *0.95 to all others for the same query",
|
|
214
|
+
"Inferred edges from applies-to have weight 0.5 and origin 'inferred'"
|
|
215
|
+
],
|
|
216
|
+
"quiz": [
|
|
217
|
+
{
|
|
218
|
+
"id": "q1",
|
|
219
|
+
"question": "What is the default maxDepth for recursive ripple in the aspect graph?",
|
|
220
|
+
"choices": {
|
|
221
|
+
"A": "3 — to keep traversals fast and focused",
|
|
222
|
+
"B": "5 — balancing depth of discovery with performance",
|
|
223
|
+
"C": "10 — the maximum allowed value",
|
|
224
|
+
"D": "Unlimited — ripple traverses until minWeight is reached",
|
|
225
|
+
"E": "1 — only direct connections are followed by default"
|
|
226
|
+
},
|
|
227
|
+
"correct": "B",
|
|
228
|
+
"explanation": "The default maxDepth for recursive ripple is 5, with a maximum configurable value of 10. This default balances discovery depth with performance — at 5 hops, you see a meaningful neighborhood without traversing the entire graph. The minWeight threshold (default 0.1) provides additional pruning by cutting off low-confidence paths before they reach maxDepth."
|
|
229
|
+
},
|
|
230
|
+
{
|
|
231
|
+
"id": "q2",
|
|
232
|
+
"question": "What happens to search weights when a result is confirmed via paradigm_aspect_confirm?",
|
|
233
|
+
"choices": {
|
|
234
|
+
"A": "The confirmed result gets +0.5 weight and all others are deleted",
|
|
235
|
+
"B": "The confirmed result gets +1.0 weight and all other results for the same query decay by *0.95",
|
|
236
|
+
"C": "All results for the query get +1.0 weight to reinforce the entire set",
|
|
237
|
+
"D": "The confirmed result is permanently pinned and decay is disabled",
|
|
238
|
+
"E": "The confirmed result replaces all other entries for that query"
|
|
239
|
+
},
|
|
240
|
+
"correct": "B",
|
|
241
|
+
"explanation": "The search learning system adds +1.0 to the confirmed result's weight for that query and multiplies all other existing results for the same query by 0.95 (a 5% decay). This self-correcting mechanism lets the best result rise to the top over time while alternatives gradually fade. The decay only applies to aspects that have existing search_weights entries for the query — it does not penalize unrelated aspects."
|
|
242
|
+
},
|
|
243
|
+
{
|
|
244
|
+
"id": "q3",
|
|
245
|
+
"question": "Which SQLite table stores aspect access frequency for the heatmap tool?",
|
|
246
|
+
"choices": {
|
|
247
|
+
"A": "aspects — in an access_count column",
|
|
248
|
+
"B": "edges — access frequency is tracked per edge",
|
|
249
|
+
"C": "search_weights — all access types feed into search weights",
|
|
250
|
+
"D": "heatmap — with columns for aspect_id, access_type, count, and last_accessed",
|
|
251
|
+
"E": "anchors — access is tracked per anchor location"
|
|
252
|
+
},
|
|
253
|
+
"correct": "D",
|
|
254
|
+
"explanation": "The `heatmap` table stores aspect access frequency with columns for `aspect_id`, `access_type` (search, ripple, navigate, direct), `count`, and `last_accessed`. This dedicated table allows the `paradigm_aspect_heatmap` tool to rank aspects by usage frequency and break down how each aspect is typically discovered — whether through search, ripple analysis, navigation, or direct access."
|
|
255
|
+
},
|
|
256
|
+
{
|
|
257
|
+
"id": "q4",
|
|
258
|
+
"question": "What is the queue limit for recursive ripple BFS traversal?",
|
|
259
|
+
"choices": {
|
|
260
|
+
"A": "100 nodes — to keep memory usage minimal",
|
|
261
|
+
"B": "500 nodes — a balance between coverage and performance",
|
|
262
|
+
"C": "1000 nodes — preventing runaway traversals in dense graphs",
|
|
263
|
+
"D": "Unlimited — the queue grows until maxDepth is reached",
|
|
264
|
+
"E": "10000 nodes — large enough for enterprise-scale graphs"
|
|
265
|
+
},
|
|
266
|
+
"correct": "C",
|
|
267
|
+
"explanation": "The BFS queue limit is 1000 nodes. This prevents runaway traversals in densely connected aspect graphs where the number of reachable nodes could grow exponentially with depth. When the queue exceeds 1000 entries, the oldest entries are dropped, ensuring the algorithm completes in bounded time and memory regardless of graph density."
|
|
268
|
+
},
|
|
269
|
+
{
|
|
270
|
+
"id": "q5",
|
|
271
|
+
"question": "How are aspect edges inferred from existing data during materialization?",
|
|
272
|
+
"choices": {
|
|
273
|
+
"A": "By analyzing import statements in source code files",
|
|
274
|
+
"B": "From applies-to references with weight 0.5 and origin 'inferred', and from shared lore references with origin 'learned'",
|
|
275
|
+
"C": "By running static analysis on anchor code blocks",
|
|
276
|
+
"D": "From git commit history showing which aspects changed together",
|
|
277
|
+
"E": "Only explicit edges are created — no inference occurs"
|
|
278
|
+
},
|
|
279
|
+
"correct": "B",
|
|
280
|
+
"explanation": "The materialization pipeline creates inferred edges in two ways. First, `materializeAspects` generates edges from `applies-to` references with weight 0.5 and origin 'inferred' — when an aspect applies to a component, a relationship edge is created. Second, `inferLoreEdges` scans for aspects sharing lore references and creates edges with origin 'learned' and weight proportional to the overlap. These supplement explicit YAML edges to build a richer graph."
|
|
167
281
|
}
|
|
168
282
|
]
|
|
169
283
|
},
|
|
170
284
|
{
|
|
171
285
|
"id": "habits-practice",
|
|
172
286
|
"title": "Habits & Practice",
|
|
173
|
-
"content": "## Instinct vs Habit\n\nWhen you first learn to drive, you consciously think about every action — check mirrors, signal, check blind spot, change lanes. After thousands of miles, these become habits: automatic behaviors you execute without conscious effort. The Habits system brings this concept to AI-assisted development.\n\nWithout habits, an agent must be told every time: \"check ripple before modifying,\" \"validate flows after changing gates,\" \"record lore for significant sessions.\" With habits, these checks become automatic behavioral triggers — the system evaluates them at defined points and reports compliance. Over time, agents internalize the patterns, and the habit checks become confirmation rather than correction.\n\n## Habit Definitions\n\nEach habit is a structured rule with six fields:\n\n```yaml\nid: ripple-before-modify\nname: Check Ripple Before Modifying\ndescription: Always call paradigm_ripple before modifying any symbol\ncategory: discovery\ntrigger: preflight\nseverity: advisory\ncheck:\n type: tool-called\n params:\n tools: [paradigm_ripple]\nenabled: true\n```\n\n**Categories** classify what kind of discipline the habit enforces. There are six:\n- `discovery` — Exploring before acting (ripple, navigate, search)\n- `verification` — Validating after implementing (postflight, reindex)\n- `testing` — Ensuring test coverage for new code\n- `documentation` — Keeping .purpose files and lore entries current\n- `collaboration` — Checking team wisdom and expert knowledge\n- `security` — Validating gates and portal.yaml compliance\n\n**Triggers** define when the habit is evaluated. There are four:\n- `preflight` — Before starting implementation\n- `postflight` — After completing implementation\n- `on-commit` — Before committing changes\n- `on-stop` — Before the session ends (stop hook)\n\n**Severity** determines what happens when a habit is violated:\n- `advisory` — Log a note, don't block anything\n- `warn` — Show a warning to the agent/user\n- `block` — Prevent session completion until resolved (enforced by stop hook)\n\n## Check Types\n\nHabits verify compliance through
|
|
287
|
+
"content": "## Instinct vs Habit\n\nWhen you first learn to drive, you consciously think about every action — check mirrors, signal, check blind spot, change lanes. After thousands of miles, these become habits: automatic behaviors you execute without conscious effort. The Habits system brings this concept to AI-assisted development.\n\nWithout habits, an agent must be told every time: \"check ripple before modifying,\" \"validate flows after changing gates,\" \"record lore for significant sessions.\" With habits, these checks become automatic behavioral triggers — the system evaluates them at defined points and reports compliance. Over time, agents internalize the patterns, and the habit checks become confirmation rather than correction.\n\n## Habit Definitions\n\nEach habit is a structured rule with six fields:\n\n```yaml\nid: ripple-before-modify\nname: Check Ripple Before Modifying\ndescription: Always call paradigm_ripple before modifying any symbol\ncategory: discovery\ntrigger: preflight\nseverity: advisory\ncheck:\n type: tool-called\n params:\n tools: [paradigm_ripple]\nenabled: true\n```\n\n**Categories** classify what kind of discipline the habit enforces. There are six:\n- `discovery` — Exploring before acting (ripple, navigate, search)\n- `verification` — Validating after implementing (postflight, reindex)\n- `testing` — Ensuring test coverage for new code\n- `documentation` — Keeping .purpose files and lore entries current\n- `collaboration` — Checking team wisdom and expert knowledge\n- `security` — Validating gates and portal.yaml compliance\n\n**Triggers** define when the habit is evaluated. There are four:\n- `preflight` — Before starting implementation\n- `postflight` — After completing implementation\n- `on-commit` — Before committing changes\n- `on-stop` — Before the session ends (stop hook)\n\n**Severity** determines what happens when a habit is violated:\n- `advisory` — Log a note, don't block anything\n- `warn` — Show a warning to the agent/user\n- `block` — Prevent session completion until resolved (enforced by stop hook)\n\n## Check Types\n\nHabits verify compliance through twelve check types:\n\n| Check Type | What It Verifies |\n|---|---|\n| `tool-called` | Specified MCP tools were invoked during the session |\n| `file-exists` | Files matching glob patterns exist (e.g., test files) |\n| `file-modified` | Files matching patterns were modified during session |\n| `lore-recorded` | A lore entry was created (for 3+ file sessions) |\n| `symbols-registered` | New code is registered in .purpose files |\n| `gates-declared` | Routes have corresponding gates in portal.yaml |\n| `tests-exist` | Test files exist for modified components |\n| `git-clean` | Git working tree is clean — all changes committed |\n| `commit-message-format` | Commit messages match regex patterns (default: conventional commit prefix + Symbols: trailer) |\n| `flow-coverage` | Changes spanning 3+ components have a documented $flow |\n| `context-checked` | Session context/recovery tools (paradigm_context_check, paradigm_session_recover) were called |\n| `aspect-anchored` | Touched aspects (~) have valid code anchors verified via paradigm_aspect_check |\n\n## The 14 Seed Habits\n\nParadigm ships with 14 built-in habits that establish baseline discipline:\n\n1. **explore-before-implement** (preflight/advisory/discovery) — Called paradigm_ripple, paradigm_navigate, paradigm_search, or paradigm_related before coding\n2. **ripple-before-modify** (preflight/advisory/discovery) — Called paradigm_ripple specifically before modifying symbols\n3. **check-fragility** (preflight/advisory/discovery) — Called paradigm_history_fragility before touching symbols\n4. **wisdom-before-implement** (preflight/advisory/collaboration) — Checked paradigm_wisdom_context or paradigm_wisdom_expert\n5. **verify-before-done** (on-stop/warn/verification) — Called paradigm_pm_postflight before finishing\n6. **postflight-compliance** (on-stop/advisory/verification) — Ran postflight and reindex\n7. **test-new-components** (postflight/advisory/testing) — Test files exist for new components\n8. **purpose-coverage** (postflight/warn/documentation) — .purpose files cover modified directories\n9. **record-lore-for-significant** (on-stop/warn/documentation) — Lore recorded for 3+ file sessions\n10. **gates-for-routes** (postflight/warn/security) — Routes have portal.yaml gate coverage\n11. **commit-message-symbols** (on-commit/advisory/documentation) — Commit messages follow type(#symbol): format with Symbols: trailer\n12. **flow-coverage-for-multi-component** (postflight/advisory/documentation) — Changes spanning 3+ components have a documented $flow\n13. **context-session-awareness** (preflight/advisory/discovery) — Session recovery or context check tools were called for continuity\n14. **aspect-anchors-valid** (postflight/advisory/verification) — Aspects touched during the session have valid code anchors\n\n## Habit Loading and Overrides\n\nHabits load from three sources, merged in order (later wins):\n\n1. **Seed habits** — The 10 built-in habits (always present)\n2. **Global habits** — `~/.paradigm/habits.yaml` (optional, applies to all projects)\n3. **Project habits** — `.paradigm/habits.yaml` (optional, project-specific)\n\nOverrides let you adjust severity or disable habits without redefining them:\n\n```yaml\n# .paradigm/habits.yaml\noverrides:\n ripple-before-modify:\n severity: block # Upgrade from advisory to blocking\n test-new-components:\n enabled: false # Disable for this project\ncustom:\n - id: check-migrations\n name: Verify DB Migrations\n category: verification\n trigger: on-commit\n severity: warn\n check:\n type: file-exists\n params:\n patterns: [\"migrations/*.sql\"]\n```\n\n## Practice Profiles\n\nEvery habit evaluation is recorded as a practice event with a result: `followed`, `skipped`, or `partial`. These events accumulate into practice profiles that show compliance rates over time.\n\n`paradigm_habits_status` returns a practice profile with: overall compliance rate, strongest and weakest categories, per-category breakdowns, trend analysis (improving/declining/stable), and incident correlations — habits whose skipped evaluations correlate with higher incident rates.\n\nThe incident correlation is powerful: if skipping `ripple-before-modify` correlates with a 3x higher incident rate for the modified symbols, that is concrete evidence for upgrading the habit's severity.\n\n## MCP Tools\n\n**`paradigm_habits_check`** — Evaluate habits for a trigger point. Pass the trigger (`preflight`, `postflight`, `on-stop`), optionally with `filesModified` and `symbolsTouched` for context. Returns evaluations with follow/skip/partial results and whether any blocking violations exist.\n\n**`paradigm_habits_status`** — Get the practice profile for an engineer over a time period (7d, 30d, 90d, or all). Shows compliance rates, category breakdowns, trends, and incident correlations.\n\n**`paradigm_practice_context`** — Before modifying symbols, get habit-aware warnings. Pass the symbols you are about to touch, and it returns relevant habits, recent compliance rates, and suggestions based on your weak areas.\n\n## CLI Commands\n\nThe CLI provides full habit management:\n\n- `paradigm habits list` — List all habits with trigger, severity, and enabled status\n- `paradigm habits add` — Add a custom habit with check type, patterns, and tools\n- `paradigm habits edit <id>` — Edit habit fields (for seed habits: severity and enabled only)\n- `paradigm habits remove <id>` — Remove a custom habit\n- `paradigm habits enable/disable <id>` — Toggle a habit on or off\n- `paradigm habits check --trigger <trigger>` — Evaluate compliance for a specific trigger\n- `paradigm habits status` — Practice profile with compliance rates and trends\n- `paradigm habits init` — Initialize a habits.yaml file for the project\n\n## Platform Targeting\n\nHabits support a `platforms` field to restrict evaluation to specific platforms. For example, a habit with `platforms: ['claude', 'cursor']` will only be evaluated when running in those environments. A habit with `platforms: ['cli']` will only fire during CLI-driven workflows. When `platforms` is omitted, the habit applies everywhere.",
|
|
174
288
|
"keyConcepts": [
|
|
175
289
|
"Six categories: discovery, verification, testing, documentation, collaboration, security",
|
|
176
290
|
"Four triggers: preflight, postflight, on-commit, on-stop",
|
|
177
291
|
"Three severity levels: advisory (note), warn (visible), block (prevents completion)",
|
|
178
|
-
"
|
|
292
|
+
"14 seed habits establish baseline discipline across all categories",
|
|
179
293
|
"Three-layer loading: seed → global (~/.paradigm/) → project (.paradigm/)",
|
|
180
294
|
"Practice profiles track compliance rates and trend direction",
|
|
181
295
|
"Incident correlations link skipped habits to higher incident rates",
|
|
182
|
-
"
|
|
296
|
+
"Twelve check types: tool-called, file-exists, file-modified, lore-recorded, symbols-registered, gates-declared, tests-exist, git-clean, commit-message-format, flow-coverage, context-checked, aspect-anchored"
|
|
183
297
|
],
|
|
184
298
|
"quiz": [
|
|
185
299
|
{
|
|
@@ -481,6 +595,90 @@
|
|
|
481
595
|
"explanation": "High habit compliance means the behavioral discipline is fine — agents are doing the right things. If incidents persist despite good practices, the issue is likely in the code or architecture, not the process. Sentinel's pattern analysis (`paradigm_sentinel_patterns`) can reveal if the same failure keeps recurring despite resolutions, and `paradigm_sentinel_stats` can show the symbol's incident rate and resolution effectiveness. The answer lives in the incident data, not the compliance data."
|
|
482
596
|
}
|
|
483
597
|
]
|
|
598
|
+
},
|
|
599
|
+
{
|
|
600
|
+
"id": "aspect-graph-advanced",
|
|
601
|
+
"title": "The Aspect Graph at Scale",
|
|
602
|
+
"content": "## Beyond the Basics\n\nPARA 201 introduced the Aspect Graph's internals — the SQLite schema, materialization pipeline, and recursive ripple. This lesson takes you deeper: building custom detectors, advanced graph queries, drift detection in CI/CD, search learning optimization, and governing aspects at enterprise scale.\n\n## Building Custom Aspect Detection Patterns\n\nParadigm ships with 8 built-in detectors that `paradigm_aspect_suggest_scan` uses to find undocumented aspects in source code:\n\n1. **Magic numbers** — Numeric literals that aren't 0 or 1 (e.g., `timeout: 30000`, `maxRetries: 3`)\n2. **Hardcoded strings** — String literals used in conditionals or assignments that smell like configuration (e.g., `'production'`, `'us-east-1'`)\n3. **Rate limits** — Patterns like `rateLimit(100)`, `throttle(1000)`, or variable names containing `limit`, `throttle`, `quota`\n4. **Time values** — Durations, timeouts, TTLs, and expiry values (e.g., `86400`, `24 * 60 * 60`)\n5. **Environment checks** — `process.env`, `std::env`, `os.environ` patterns that branch on environment variables\n6. **Feature flags** — Conditional logic gated on feature names (e.g., `isEnabled('new-checkout')`, `featureFlags.get()`)\n7. **Regex patterns** — Regular expressions used for validation (e.g., email patterns, URL matchers)\n8. **Assertion guards** — Invariant checks using `assert`, `invariant()`, `expect()` that enforce guarantees\n\nTo extend the detection system, you define custom detectors in `.paradigm/aspect-detectors.yaml`:\n\n```yaml\ndetectors:\n - id: compliance-annotation\n name: Compliance Annotations\n description: Detects SOC2/GDPR compliance annotations in code\n patterns:\n - regex: \"@(SOC2|GDPR|PCI|HIPAA)\"\n languages: [typescript, javascript, java]\n - regex: \"#\\[compliance\\(\"\n languages: [rust]\n suggestedCategory: rule\n suggestedSeverity: critical\n suggestedTags: [compliance, security]\n\n - id: retry-policy\n name: Retry Policies\n description: Detects retry/backoff configurations\n patterns:\n - regex: \"(retryPolicy|backoff|maxAttempts|retryCount)\"\n languages: [typescript, javascript, python]\n suggestedCategory: configuration\n suggestedSeverity: medium\n```\n\nCustom detectors are loaded alongside the built-in 8 during `paradigm_aspect_suggest_scan`. They follow the same interface: match source code patterns, suggest a category and severity, and let the user decide whether to formalize the finding as a `~aspect`.\n\n## Graph Querying Strategies\n\nThe aspect graph supports three primary querying patterns, each suited to different use cases:\n\n### BFS Traversal (Neighborhood Analysis)\n\n`paradigm_aspect_graph` uses breadth-first search to explore the neighborhood of a symbol. The `hops` parameter controls how far to traverse:\n\n- **1 hop** — Direct connections only. Use this when you need to know what a single aspect directly relates to. Fast, focused, minimal noise.\n- **2 hops** — Friends-of-friends. Reveals indirect relationships: \"this aspect relates to that aspect, which relates to that component.\" The sweet spot for most queries.\n- **3+ hops** — Extended neighborhood. Useful for understanding how distant parts of the codebase connect through aspects. Gets noisy in dense graphs.\n\nThe multiplicative weight decay means that each hop reduces confidence. An explicit edge (weight 1.0) followed by an inferred edge (weight 0.5) produces a path weight of 0.5. Two inferred edges produce 0.25. The `minWeight` threshold (default 0.1) prunes low-confidence paths automatically.\n\n### Heatmap-Driven Exploration\n\n`paradigm_aspect_heatmap` ranks aspects by access frequency. This is not about what aspects ARE important — it is about what aspects are USED most. The distinction matters:\n\n- An aspect accessed 50 times via search but never via ripple might have a discoverability problem — people search for it because it is hard to find through the graph.\n- An aspect accessed primarily via ripple has good graph connectivity — it naturally surfaces during impact analysis.\n- An aspect with zero access across all types may be stale, poorly named, or irrelevant.\n\nHeatmap data is the starting point for governance reviews. Aspects that nobody accesses should be evaluated for removal or renaming.\n\n### Edge-Filtered Queries\n\nWhen calling `paradigm_aspect_graph`, you can filter by edge relation to narrow results:\n\n- `enforced-by` — Find all aspects that enforce a given component. Useful when changing a component to know what rules apply.\n- `depends-on` — Find dependency chains. If `~token-expiry-24h` depends-on `~jwt-signing-rs256`, changing JWT signing affects token expiry.\n- `contradicts` — Find conflicting aspects. Two aspects that contradict each other signal an architectural tension that needs resolution.\n- `supersedes` — Find deprecated-but-still-referenced aspects. The superseding aspect should be the authoritative one.\n- `related-to` — The weakest relation. Useful for discovery but not for impact analysis.\n\n## Drift Detection in CI/CD\n\nAspect drift occurs when the code at an anchor location changes without updating the aspect definition. The `paradigm_aspect_drift` tool detects this using SHA-256 content hashes.\n\nDuring materialization, the pipeline computes a SHA-256 hash of the code at each anchor's line range and stores it in the `anchors.content_hash` column. When `paradigm_aspect_drift` runs later, it re-reads the code at those line ranges, computes a new hash, and compares. A mismatch means the code changed — the anchor is drifted.\n\nFor CI/CD integration, add drift detection as a pipeline step:\n\n```yaml\n# .github/workflows/paradigm.yml\nsteps:\n - name: Check aspect drift\n run: |\n paradigm scan --quiet\n paradigm doctor --strict --json | jq '.aspects.drifted'\n if [ $(paradigm doctor --json | jq '.aspects.drifted | length') -gt 0 ]; then\n echo \"::error::Aspect anchors have drifted\"\n exit 1\n fi\n```\n\nThe `--strict` flag treats drifted anchors as errors rather than warnings. In a mature project, you want drift detection to block merges — it ensures that aspect documentation stays synchronized with code changes.\n\nDrift detection is also available per-aspect via the MCP tool:\n\n```\nparadigm_aspect_drift({ aspectId: 'token-expiry-24h' })\n```\n\nThis returns: the aspect ID, each anchor with its stored hash vs current hash, whether each anchor has drifted, and the specific lines that changed. Use this during code review to verify that refactors updated their aspect anchors.\n\n## Search Learning Loop Optimization\n\nThe three-tier search system improves over time through the confirm-and-decay mechanism. Here is how to optimize it:\n\n### Tier Priority\n\n1. **Tier 1: Learned mappings** — Query-to-aspect weights in the `search_weights` table. If a query matches a stored mapping with weight >= 1.0, the result is returned immediately. This is instant because it is a simple key-value lookup.\n2. **Tier 2: FTS5 full-text search** — SQLite's FTS5 engine searches aspect descriptions, values, and categories. Returns results ranked by BM25 relevance. Accurate but slower than Tier 1.\n3. **Tier 3: Fuzzy matching** — Levenshtein distance-based matching with a configurable threshold. Catches typos and partial matches. Slowest but most forgiving.\n\n### Warming the Learning System\n\nA new project's search starts cold — no learned mappings exist. Every search falls through to Tier 2 or 3. To warm the system:\n\n1. Run common queries for your project's domain (e.g., search for 'expiry', 'rate limit', 'auth')\n2. Confirm the best result with `paradigm_aspect_confirm` for each query\n3. After 3-5 confirmations per query, the learned weight exceeds the Tier 1 threshold\n\nThe decay mechanism (confirmed +1.0, others *0.95) means that a single confirmation is enough to create a Tier 1 entry. But multiple confirmations build a stronger mapping that resists displacement.\n\n### Diagnosing Search Issues\n\nWhen search returns unexpected results:\n\n- Check `search_weights` table entries for the query — are stale mappings dominating?\n- Verify aspect descriptions contain the keywords you are searching for (FTS5 searches descriptions)\n- Check for typos in the query that might prevent Tier 2 matches but trigger Tier 3 fuzzy results\n- Use `paradigm_aspect_heatmap` to see if the expected aspect is ever accessed — a zero-access aspect might have a discovery problem\n\n## Aspect Governance at Scale\n\nWhen a project exceeds 100 aspects, governance becomes critical. Without it, aspects accumulate as stale documentation, anchor drift goes undetected, and the graph becomes noisy rather than useful.\n\n### The Governance Review Cycle\n\nRun quarterly aspect reviews using this process:\n\n1. **Heatmap analysis** — `paradigm_aspect_heatmap({ limit: 0 })` returns ALL aspects ranked by access. The bottom 20% are candidates for removal or consolidation.\n2. **Drift audit** — `paradigm doctor --strict` catches all drifted anchors. Drifted aspects either need anchor updates or should be marked stale.\n3. **Category distribution** — Check that aspect categories are balanced. A project with 80 rules and 2 decisions might be over-documenting constraints while missing strategic choices.\n4. **Edge health** — Check for orphaned aspects (no edges to any other symbol). An aspect with zero edges is either standalone (legitimate but rare) or poorly connected.\n5. **Search weight review** — Check the `search_weights` table for queries with multiple high-weight mappings, which indicate ambiguous terminology.\n\n### Naming Conventions at Scale\n\nWith 100+ aspects, naming collisions and ambiguity become real problems. Establish conventions:\n\n- **Category prefix** — Prefix aspects with their category: `~rule-no-console-log`, `~decision-use-redis`, `~constraint-max-upload-10mb`\n- **Domain grouping** — Group related aspects by domain: `~auth-token-expiry`, `~auth-session-timeout`, `~auth-refresh-rotation`\n- **Version suffix** — When aspects evolve: `~rate-limit-v2` supersedes `~rate-limit-v1` with an explicit `supersedes` edge\n\n### Delegation and Ownership\n\nFor large teams, assign aspect ownership:\n\n```yaml\n~payment-idempotency:\n description: Payment operations must be idempotent\n owner: payments-team\n reviewers: [platform-team, security-team]\n```\n\nThe `owner` field indicates who maintains the aspect, and `reviewers` lists teams that should be consulted when the aspect changes. This is purely metadata — Paradigm does not enforce it — but it guides humans and AI agents when modifications are needed.",
|
|
603
|
+
"keyConcepts": [
|
|
604
|
+
"8 built-in detectors: magic numbers, hardcoded strings, rate limits, time values, env checks, feature flags, regex patterns, assertion guards",
|
|
605
|
+
"Custom detectors defined in .paradigm/aspect-detectors.yaml extend the suggest-scan system",
|
|
606
|
+
"BFS traversal with multiplicative weight decay prunes low-confidence paths automatically",
|
|
607
|
+
"Heatmap-driven exploration reveals usage patterns vs importance assumptions",
|
|
608
|
+
"Five edge relations for filtered queries: enforced-by, depends-on, contradicts, supersedes, related-to",
|
|
609
|
+
"Drift detection uses SHA-256 content hashes comparing stored vs current code at anchor line ranges",
|
|
610
|
+
"CI/CD integration via paradigm doctor --strict --json blocks merges on drifted anchors",
|
|
611
|
+
"Three-tier search: learned mappings (instant) -> FTS5 (accurate) -> fuzzy (forgiving)",
|
|
612
|
+
"Warm the learning system with 3-5 confirmations per common query",
|
|
613
|
+
"Governance review cycle: heatmap analysis, drift audit, category distribution, edge health, search weight review"
|
|
614
|
+
],
|
|
615
|
+
"quiz": [
|
|
616
|
+
{
|
|
617
|
+
"id": "q1",
|
|
618
|
+
"question": "How many built-in detectors does paradigm_aspect_suggest_scan use, and which of these is NOT one of them?",
|
|
619
|
+
"choices": {
|
|
620
|
+
"A": "8 built-in detectors; 'database schema' is not one of them",
|
|
621
|
+
"B": "6 built-in detectors; 'magic numbers' is not one of them",
|
|
622
|
+
"C": "8 built-in detectors; 'rate limits' IS one of them (trick question)",
|
|
623
|
+
"D": "10 built-in detectors; 'feature flags' is not one of them",
|
|
624
|
+
"E": "5 built-in detectors; 'environment checks' is not one of them"
|
|
625
|
+
},
|
|
626
|
+
"correct": "A",
|
|
627
|
+
"explanation": "paradigm_aspect_suggest_scan uses 8 built-in detectors: magic numbers, hardcoded strings, rate limits, time values, environment checks, feature flags, regex patterns, and assertion guards. 'Database schema' is not among them. Custom detectors can be added via .paradigm/aspect-detectors.yaml to extend the detection system."
|
|
628
|
+
},
|
|
629
|
+
{
|
|
630
|
+
"id": "q2",
|
|
631
|
+
"question": "You want to find all rules that enforce constraints on #payment-service through the aspect graph. Which query approach is most effective?",
|
|
632
|
+
"choices": {
|
|
633
|
+
"A": "paradigm_aspect_search({ query: 'payment rules' }) to find them by text",
|
|
634
|
+
"B": "paradigm_aspect_graph({ symbol: '#payment-service', hops: 1 }) filtered by 'enforced-by' edge relation",
|
|
635
|
+
"C": "paradigm_aspect_heatmap({ limit: 100 }) and manually scan for payment-related aspects",
|
|
636
|
+
"D": "paradigm_aspect_drift({ aspectId: '#payment-service' }) to find stale rules",
|
|
637
|
+
"E": "paradigm_ripple({ symbol: '#payment-service' }) without any graph filtering"
|
|
638
|
+
},
|
|
639
|
+
"correct": "B",
|
|
640
|
+
"explanation": "An edge-filtered graph query at 1 hop with the 'enforced-by' relation is the most direct approach. It returns exactly the aspects that enforce rules on the target component. Search (A) finds by text, not by graph relationship. Heatmap (C) ranks by usage, not by target. Drift (D) checks anchor freshness, not relationships."
|
|
641
|
+
},
|
|
642
|
+
{
|
|
643
|
+
"id": "q3",
|
|
644
|
+
"question": "Your CI pipeline should fail when aspect anchors have drifted. Which command configuration achieves this?",
|
|
645
|
+
"choices": {
|
|
646
|
+
"A": "paradigm doctor with no flags — drift is always a blocking error",
|
|
647
|
+
"B": "paradigm doctor --strict — treats drifted anchors as errors that cause a non-zero exit code",
|
|
648
|
+
"C": "paradigm scan --fix — automatically fixes drifted anchors",
|
|
649
|
+
"D": "paradigm_aspect_drift with no arguments — checks all aspects and exits non-zero on drift",
|
|
650
|
+
"E": "paradigm lint --strict — lint checks include drift detection"
|
|
651
|
+
},
|
|
652
|
+
"correct": "B",
|
|
653
|
+
"explanation": "paradigm doctor --strict treats warnings (including drifted anchors) as errors, producing a non-zero exit code that fails the CI step. Without --strict, drifted anchors are warnings that do not block. paradigm scan rebuilds the index but does not check drift. paradigm lint checks .purpose file structure, not anchor content hashes."
|
|
654
|
+
},
|
|
655
|
+
{
|
|
656
|
+
"id": "q4",
|
|
657
|
+
"question": "A new project's aspect search always falls to Tier 3 (fuzzy matching). How do you warm the learning system so common queries use Tier 1?",
|
|
658
|
+
"choices": {
|
|
659
|
+
"A": "Manually edit the search_weights SQLite table to insert mappings",
|
|
660
|
+
"B": "Run paradigm_reindex with a --warm-search flag",
|
|
661
|
+
"C": "Run common queries with paradigm_aspect_search, then confirm the best results with paradigm_aspect_confirm for each query",
|
|
662
|
+
"D": "Wait for 100+ searches to accumulate — Tier 1 learns automatically without confirmation",
|
|
663
|
+
"E": "Set limits.searchLearningRate to a higher value in config.yaml"
|
|
664
|
+
},
|
|
665
|
+
"correct": "C",
|
|
666
|
+
"explanation": "The learning system requires explicit confirmation via paradigm_aspect_confirm. When you search for a term and confirm the best result, the confirmed aspect gets +1.0 weight for that query. After 3-5 confirmations, the weight exceeds the Tier 1 threshold and future queries return instantly. There is no automatic learning without confirmation — the system relies on user feedback to improve."
|
|
667
|
+
},
|
|
668
|
+
{
|
|
669
|
+
"id": "q5",
|
|
670
|
+
"question": "During a quarterly governance review, the heatmap shows that 30 aspects out of 120 have zero access across all types (search, ripple, navigate, direct). What does this indicate and what should you do?",
|
|
671
|
+
"choices": {
|
|
672
|
+
"A": "These aspects are well-documented and need no changes — zero access means no issues",
|
|
673
|
+
"B": "Delete all 30 immediately — unused aspects are always stale",
|
|
674
|
+
"C": "These aspects may be stale, poorly named, or irrelevant — evaluate each for removal, renaming, or consolidation as part of the governance review",
|
|
675
|
+
"D": "Increase their severity to 'critical' to force agents to access them",
|
|
676
|
+
"E": "Move them to a separate 'archive' section in the .purpose files"
|
|
677
|
+
},
|
|
678
|
+
"correct": "C",
|
|
679
|
+
"explanation": "Zero-access aspects are candidates for review, not automatic deletion. Some may be legitimate but poorly named (rename to improve discoverability). Some may be truly stale with drifted anchors (remove or update). Some may have been superseded by newer aspects (consolidate with supersedes edges). The governance review evaluates each case individually."
|
|
680
|
+
}
|
|
681
|
+
]
|
|
484
682
|
}
|
|
485
683
|
]
|
|
486
684
|
}
|