@code-insights/cli 3.6.0 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (213) hide show
  1. package/CHANGELOG.md +53 -0
  2. package/README.md +40 -4
  3. package/dashboard-dist/assets/index-D1JDyyu5.js +660 -0
  4. package/dashboard-dist/assets/index-kwbCW1n2.css +1 -0
  5. package/dashboard-dist/index.html +2 -2
  6. package/dist/commands/reflect.d.ts +3 -0
  7. package/dist/commands/reflect.d.ts.map +1 -0
  8. package/dist/commands/reflect.js +457 -0
  9. package/dist/commands/reflect.js.map +1 -0
  10. package/dist/commands/reset.d.ts.map +1 -1
  11. package/dist/commands/reset.js +3 -1
  12. package/dist/commands/reset.js.map +1 -1
  13. package/dist/commands/stats/actions/patterns.d.ts +3 -0
  14. package/dist/commands/stats/actions/patterns.d.ts.map +1 -0
  15. package/dist/commands/stats/actions/patterns.js +140 -0
  16. package/dist/commands/stats/actions/patterns.js.map +1 -0
  17. package/dist/commands/stats/data/aggregation-helpers.d.ts +23 -0
  18. package/dist/commands/stats/data/aggregation-helpers.d.ts.map +1 -0
  19. package/dist/commands/stats/data/aggregation-helpers.js +128 -0
  20. package/dist/commands/stats/data/aggregation-helpers.js.map +1 -0
  21. package/dist/commands/stats/data/aggregation.d.ts +3 -35
  22. package/dist/commands/stats/data/aggregation.d.ts.map +1 -1
  23. package/dist/commands/stats/data/aggregation.js +8 -290
  24. package/dist/commands/stats/data/aggregation.js.map +1 -1
  25. package/dist/commands/stats/data/time-series.d.ts +24 -0
  26. package/dist/commands/stats/data/time-series.d.ts.map +1 -0
  27. package/dist/commands/stats/data/time-series.js +162 -0
  28. package/dist/commands/stats/data/time-series.js.map +1 -0
  29. package/dist/commands/stats/index.d.ts.map +1 -1
  30. package/dist/commands/stats/index.js +7 -1
  31. package/dist/commands/stats/index.js.map +1 -1
  32. package/dist/commands/sync.d.ts +19 -0
  33. package/dist/commands/sync.d.ts.map +1 -1
  34. package/dist/commands/sync.js +67 -1
  35. package/dist/commands/sync.js.map +1 -1
  36. package/dist/constants/llm-providers.js +1 -1
  37. package/dist/constants/llm-providers.js.map +1 -1
  38. package/dist/db/client.d.ts +7 -0
  39. package/dist/db/client.d.ts.map +1 -1
  40. package/dist/db/client.js +11 -1
  41. package/dist/db/client.js.map +1 -1
  42. package/dist/db/migrate.d.ts +10 -1
  43. package/dist/db/migrate.d.ts.map +1 -1
  44. package/dist/db/migrate.js +96 -0
  45. package/dist/db/migrate.js.map +1 -1
  46. package/dist/db/read.d.ts +5 -0
  47. package/dist/db/read.d.ts.map +1 -1
  48. package/dist/db/read.js +20 -3
  49. package/dist/db/read.js.map +1 -1
  50. package/dist/db/schema.d.ts +1 -1
  51. package/dist/db/schema.js +2 -2
  52. package/dist/db/schema.js.map +1 -1
  53. package/dist/db/write.d.ts.map +1 -1
  54. package/dist/db/write.js +8 -2
  55. package/dist/db/write.js.map +1 -1
  56. package/dist/index.js +39 -3
  57. package/dist/index.js.map +1 -1
  58. package/dist/parser/jsonl.d.ts +19 -1
  59. package/dist/parser/jsonl.d.ts.map +1 -1
  60. package/dist/parser/jsonl.js +109 -3
  61. package/dist/parser/jsonl.js.map +1 -1
  62. package/dist/providers/codex.js +4 -1
  63. package/dist/providers/codex.js.map +1 -1
  64. package/dist/providers/copilot-cli.js +3 -0
  65. package/dist/providers/copilot-cli.js.map +1 -1
  66. package/dist/providers/copilot.js +3 -0
  67. package/dist/providers/copilot.js.map +1 -1
  68. package/dist/providers/cursor.js +3 -0
  69. package/dist/providers/cursor.js.map +1 -1
  70. package/dist/types.d.ts +109 -0
  71. package/dist/types.d.ts.map +1 -1
  72. package/dist/utils/date-utils.d.ts +6 -0
  73. package/dist/utils/date-utils.d.ts.map +1 -0
  74. package/dist/utils/date-utils.js +26 -0
  75. package/dist/utils/date-utils.js.map +1 -0
  76. package/dist/utils/telemetry.d.ts +1 -1
  77. package/dist/utils/telemetry.d.ts.map +1 -1
  78. package/dist/utils/telemetry.js.map +1 -1
  79. package/package.json +2 -1
  80. package/server-dist/export/agent-rules.d.ts.map +1 -1
  81. package/server-dist/export/agent-rules.js +15 -4
  82. package/server-dist/export/agent-rules.js.map +1 -1
  83. package/server-dist/export/knowledge-base.d.ts.map +1 -1
  84. package/server-dist/export/knowledge-base.js +30 -4
  85. package/server-dist/export/knowledge-base.js.map +1 -1
  86. package/server-dist/index.d.ts.map +1 -1
  87. package/server-dist/index.js +4 -0
  88. package/server-dist/index.js.map +1 -1
  89. package/server-dist/llm/analysis-db.d.ts +51 -0
  90. package/server-dist/llm/analysis-db.d.ts.map +1 -0
  91. package/server-dist/llm/analysis-db.js +208 -0
  92. package/server-dist/llm/analysis-db.js.map +1 -0
  93. package/server-dist/llm/analysis-internal.d.ts +36 -0
  94. package/server-dist/llm/analysis-internal.d.ts.map +1 -0
  95. package/server-dist/llm/analysis-internal.js +23 -0
  96. package/server-dist/llm/analysis-internal.js.map +1 -0
  97. package/server-dist/llm/analysis-pricing.d.ts +25 -0
  98. package/server-dist/llm/analysis-pricing.d.ts.map +1 -0
  99. package/server-dist/llm/analysis-pricing.js +74 -0
  100. package/server-dist/llm/analysis-pricing.js.map +1 -0
  101. package/server-dist/llm/analysis-usage-db.d.ts +45 -0
  102. package/server-dist/llm/analysis-usage-db.d.ts.map +1 -0
  103. package/server-dist/llm/analysis-usage-db.js +35 -0
  104. package/server-dist/llm/analysis-usage-db.js.map +1 -0
  105. package/server-dist/llm/analysis.d.ts +9 -79
  106. package/server-dist/llm/analysis.d.ts.map +1 -1
  107. package/server-dist/llm/analysis.js +119 -375
  108. package/server-dist/llm/analysis.js.map +1 -1
  109. package/server-dist/llm/facet-extraction.d.ts +14 -0
  110. package/server-dist/llm/facet-extraction.d.ts.map +1 -0
  111. package/server-dist/llm/facet-extraction.js +91 -0
  112. package/server-dist/llm/facet-extraction.js.map +1 -0
  113. package/server-dist/llm/friction-normalize.d.ts +16 -0
  114. package/server-dist/llm/friction-normalize.d.ts.map +1 -0
  115. package/server-dist/llm/friction-normalize.js +54 -0
  116. package/server-dist/llm/friction-normalize.js.map +1 -0
  117. package/server-dist/llm/index.d.ts +3 -2
  118. package/server-dist/llm/index.d.ts.map +1 -1
  119. package/server-dist/llm/index.js +1 -1
  120. package/server-dist/llm/index.js.map +1 -1
  121. package/server-dist/llm/message-format.d.ts +32 -0
  122. package/server-dist/llm/message-format.d.ts.map +1 -0
  123. package/server-dist/llm/message-format.js +129 -0
  124. package/server-dist/llm/message-format.js.map +1 -0
  125. package/server-dist/llm/normalize-utils.d.ts +22 -0
  126. package/server-dist/llm/normalize-utils.d.ts.map +1 -0
  127. package/server-dist/llm/normalize-utils.js +71 -0
  128. package/server-dist/llm/normalize-utils.js.map +1 -0
  129. package/server-dist/llm/pattern-normalize.d.ts +19 -0
  130. package/server-dist/llm/pattern-normalize.d.ts.map +1 -0
  131. package/server-dist/llm/pattern-normalize.js +90 -0
  132. package/server-dist/llm/pattern-normalize.js.map +1 -0
  133. package/server-dist/llm/prompt-constants.d.ts +9 -0
  134. package/server-dist/llm/prompt-constants.d.ts.map +1 -0
  135. package/server-dist/llm/prompt-constants.js +169 -0
  136. package/server-dist/llm/prompt-constants.js.map +1 -0
  137. package/server-dist/llm/prompt-quality-analysis.d.ts +8 -0
  138. package/server-dist/llm/prompt-quality-analysis.d.ts.map +1 -0
  139. package/server-dist/llm/prompt-quality-analysis.js +133 -0
  140. package/server-dist/llm/prompt-quality-analysis.js.map +1 -0
  141. package/server-dist/llm/prompt-quality-normalize.d.ts +26 -0
  142. package/server-dist/llm/prompt-quality-normalize.d.ts.map +1 -0
  143. package/server-dist/llm/prompt-quality-normalize.js +116 -0
  144. package/server-dist/llm/prompt-quality-normalize.js.map +1 -0
  145. package/server-dist/llm/prompt-types.d.ts +124 -0
  146. package/server-dist/llm/prompt-types.d.ts.map +1 -0
  147. package/server-dist/llm/prompt-types.js +4 -0
  148. package/server-dist/llm/prompt-types.js.map +1 -0
  149. package/server-dist/llm/prompts.d.ts +57 -100
  150. package/server-dist/llm/prompts.d.ts.map +1 -1
  151. package/server-dist/llm/prompts.js +606 -232
  152. package/server-dist/llm/prompts.js.map +1 -1
  153. package/server-dist/llm/providers/anthropic.d.ts.map +1 -1
  154. package/server-dist/llm/providers/anthropic.js +12 -0
  155. package/server-dist/llm/providers/anthropic.js.map +1 -1
  156. package/server-dist/llm/providers/gemini.d.ts.map +1 -1
  157. package/server-dist/llm/providers/gemini.js +10 -2
  158. package/server-dist/llm/providers/gemini.js.map +1 -1
  159. package/server-dist/llm/providers/ollama.d.ts.map +1 -1
  160. package/server-dist/llm/providers/ollama.js +3 -1
  161. package/server-dist/llm/providers/ollama.js.map +1 -1
  162. package/server-dist/llm/providers/openai.d.ts.map +1 -1
  163. package/server-dist/llm/providers/openai.js +4 -1
  164. package/server-dist/llm/providers/openai.js.map +1 -1
  165. package/server-dist/llm/recurring-insights.d.ts +26 -0
  166. package/server-dist/llm/recurring-insights.d.ts.map +1 -0
  167. package/server-dist/llm/recurring-insights.js +119 -0
  168. package/server-dist/llm/recurring-insights.js.map +1 -0
  169. package/server-dist/llm/reflect-prompts.d.ts +55 -0
  170. package/server-dist/llm/reflect-prompts.d.ts.map +1 -0
  171. package/server-dist/llm/reflect-prompts.js +151 -0
  172. package/server-dist/llm/reflect-prompts.js.map +1 -0
  173. package/server-dist/llm/response-parsers.d.ts +8 -0
  174. package/server-dist/llm/response-parsers.d.ts.map +1 -0
  175. package/server-dist/llm/response-parsers.js +151 -0
  176. package/server-dist/llm/response-parsers.js.map +1 -0
  177. package/server-dist/llm/types.d.ts +23 -1
  178. package/server-dist/llm/types.d.ts.map +1 -1
  179. package/server-dist/llm/types.js +10 -1
  180. package/server-dist/llm/types.js.map +1 -1
  181. package/server-dist/routes/analysis.d.ts.map +1 -1
  182. package/server-dist/routes/analysis.js +107 -282
  183. package/server-dist/routes/analysis.js.map +1 -1
  184. package/server-dist/routes/analytics.d.ts.map +1 -1
  185. package/server-dist/routes/analytics.js +3 -1
  186. package/server-dist/routes/analytics.js.map +1 -1
  187. package/server-dist/routes/export.d.ts.map +1 -1
  188. package/server-dist/routes/export.js +19 -27
  189. package/server-dist/routes/export.js.map +1 -1
  190. package/server-dist/routes/facets.d.ts +4 -0
  191. package/server-dist/routes/facets.d.ts.map +1 -0
  192. package/server-dist/routes/facets.js +208 -0
  193. package/server-dist/routes/facets.js.map +1 -0
  194. package/server-dist/routes/insights.d.ts.map +1 -1
  195. package/server-dist/routes/insights.js +12 -11
  196. package/server-dist/routes/insights.js.map +1 -1
  197. package/server-dist/routes/reflect.d.ts +4 -0
  198. package/server-dist/routes/reflect.d.ts.map +1 -0
  199. package/server-dist/routes/reflect.js +332 -0
  200. package/server-dist/routes/reflect.js.map +1 -0
  201. package/server-dist/routes/route-helpers.d.ts +124 -0
  202. package/server-dist/routes/route-helpers.d.ts.map +1 -0
  203. package/server-dist/routes/route-helpers.js +242 -0
  204. package/server-dist/routes/route-helpers.js.map +1 -0
  205. package/server-dist/routes/sessions.d.ts.map +1 -1
  206. package/server-dist/routes/sessions.js +29 -5
  207. package/server-dist/routes/sessions.js.map +1 -1
  208. package/server-dist/routes/shared-aggregation.d.ts +82 -0
  209. package/server-dist/routes/shared-aggregation.d.ts.map +1 -0
  210. package/server-dist/routes/shared-aggregation.js +384 -0
  211. package/server-dist/routes/shared-aggregation.js.map +1 -0
  212. package/dashboard-dist/assets/index-C1mDRV7y.js +0 -607
  213. package/dashboard-dist/assets/index-_SWpRg6C.css +0 -1
@@ -0,0 +1,9 @@
1
+ export declare const FRICTION_CLASSIFICATION_GUIDANCE = "\nFRICTION CLASSIFICATION GUIDANCE:\n\nEach friction point captures WHAT went wrong (category + description), WHO contributed (attribution), and WHY you classified it that way (_reasoning).\n\nCATEGORIES \u2014 classify the TYPE of gap or obstacle:\n- \"wrong-approach\": A strategy was pursued that didn't fit the task \u2014 wrong architecture, wrong tool, wrong pattern\n- \"knowledge-gap\": Incorrect knowledge was applied about a library, API, framework, or language feature\n- \"stale-assumptions\": Work proceeded from assumptions about current state that were incorrect (stale files, changed config, different environment)\n- \"incomplete-requirements\": Instructions were missing critical context, constraints, or acceptance criteria\n- \"context-loss\": Prior decisions or constraints established earlier in the session were lost or forgotten\n- \"scope-creep\": Work expanded beyond the boundaries of the stated task\n- \"repeated-mistakes\": The same or similar error occurred multiple times despite earlier correction\n- \"documentation-gap\": Relevant docs existed but were inaccessible or unfindable during the session\n- \"tooling-limitation\": The tool genuinely lacked a needed capability\n\nWhen no category fits, create a specific kebab-case category. A precise novel category is better than a vague canonical one.\n\nATTRIBUTION \u2014 3-step decision tree (follow IN ORDER):\nStep 1: Is the cause external to the user-AI interaction? (missing docs, broken tooling, infra outage) \u2192 \"environmental\"\nStep 2: Could the USER have prevented this with better input? Evidence: vague prompt, missing context, no constraints, late requirements, ambiguous correction \u2192 \"user-actionable\"\nStep 3: User input was clear and the AI still failed \u2192 \"ai-capability\"\nWhen genuinely mixed between user-actionable and ai-capability, lean \"user-actionable\" \u2014 this tool helps users improve.\n\nDESCRIPTION RULES:\n- One neutral sentence describing the GAP, not the actor\n- Include specific details (file names, APIs, error messages)\n- Frame as \"Missing X caused Y\" NOT \"The AI failed to X\" or \"The user forgot to X\"\n- Let the attribution field carry the who";
2
+ export declare const CANONICAL_FRICTION_CATEGORIES: readonly ["wrong-approach", "knowledge-gap", "stale-assumptions", "incomplete-requirements", "context-loss", "scope-creep", "repeated-mistakes", "documentation-gap", "tooling-limitation"];
3
+ export declare const CANONICAL_PATTERN_CATEGORIES: readonly ["structured-planning", "incremental-implementation", "verification-workflow", "systematic-debugging", "self-correction", "context-gathering", "domain-expertise", "effective-tooling"];
4
+ export declare const CANONICAL_PQ_DEFICIT_CATEGORIES: readonly ["vague-request", "missing-context", "late-constraint", "unclear-correction", "scope-drift", "missing-acceptance-criteria", "assumption-not-surfaced"];
5
+ export declare const CANONICAL_PQ_STRENGTH_CATEGORIES: readonly ["precise-request", "effective-context", "productive-correction"];
6
+ export declare const CANONICAL_PQ_CATEGORIES: readonly ["vague-request", "missing-context", "late-constraint", "unclear-correction", "scope-drift", "missing-acceptance-criteria", "assumption-not-surfaced", "precise-request", "effective-context", "productive-correction"];
7
+ export declare const PROMPT_QUALITY_CLASSIFICATION_GUIDANCE = "\nPROMPT QUALITY CLASSIFICATION GUIDANCE:\n\nEach finding captures a specific moment where the user's prompting either caused friction (deficit) or enabled productivity (strength).\n\nDEFICIT CATEGORIES \u2014 classify prompting problems:\n- \"vague-request\": Request lacked specificity needed for the AI to act without guessing. Missing file paths, function names, expected behavior, or concrete details.\n NOT this category if the AI had enough context to succeed but failed anyway \u2014 that is an AI capability issue, not a prompting issue.\n\n- \"missing-context\": Critical background knowledge about architecture, conventions, dependencies, or current state was not provided.\n NOT this category if the information was available in the codebase and the AI could have found it by reading files \u2014 that is an AI context-gathering failure.\n\n- \"late-constraint\": A requirement or constraint was provided AFTER the AI had already started implementing a different approach, causing rework.\n NOT this category if the constraint was genuinely discovered during implementation (requirements changed). Only classify if the user KNEW the constraint before the session started.\n\n- \"unclear-correction\": The user told the AI its output was wrong without explaining what was wrong or why. \"That's not right\", \"try again\", \"no\" without context.\n NOT this category if the user gave a brief but sufficient correction (\"use map instead of forEach\" is clear enough).\n\n- \"scope-drift\": The session objective shifted mid-conversation, or multiple unrelated objectives were addressed in one session.\n NOT this category if the user is working through logically connected subtasks of one objective.\n\n- \"missing-acceptance-criteria\": The user did not define what successful completion looks like, leading to back-and-forth about whether the output meets expectations.\n NOT this category for exploratory sessions where the user is discovering what they want.\n\n- \"assumption-not-surfaced\": The user held an unstated assumption that the AI could not reasonably infer from code or conversation.\n NOT this category if the assumption was reasonable for the AI to make (e.g., standard coding conventions).\n\nSTRENGTH CATEGORIES \u2014 classify prompting successes (only when notably above average):\n- \"precise-request\": Request included enough specificity (file paths, function names, expected behavior, error messages) that the AI could act correctly on the first attempt.\n\n- \"effective-context\": User proactively shared architecture, conventions, prior decisions, or current state that the AI demonstrably used to make better decisions.\n\n- \"productive-correction\": When the AI went off track, the user provided a correction that included WHAT was wrong, WHY, and enough context for the AI to redirect effectively on the next response.\n\nCONTRASTIVE PAIRS:\n- vague-request vs missing-context: Was the problem in HOW THE TASK WAS DESCRIBED (vague-request) or WHAT BACKGROUND KNOWLEDGE WAS ABSENT (missing-context)?\n- late-constraint vs missing-context: Did the user EVENTUALLY provide it in the same session? Yes \u2192 late-constraint. Never \u2192 missing-context.\n- missing-context vs assumption-not-surfaced: Is this a FACT the user could have copy-pasted (missing-context), or a BELIEF/PREFERENCE they held (assumption-not-surfaced)?\n- scope-drift vs missing-acceptance-criteria: Did the user try to do TOO MANY THINGS (scope-drift) or ONE THING WITHOUT DEFINING SUCCESS (missing-acceptance-criteria)?\n- unclear-correction vs vague-request: Was this the user's FIRST MESSAGE about this task (vague-request) or a RESPONSE TO AI OUTPUT (unclear-correction)?\n\nDIMENSION SCORING (0-100):\n- context_provision: How well did the user provide relevant background upfront?\n 90+: Proactively shared architecture, constraints, conventions. 50-69: Notable gaps causing detours. <30: No context, AI working blind.\n- request_specificity: How precise were task requests?\n 90+: File paths, expected behavior, scope boundaries. 50-69: Mix of specific and vague. <30: Nearly all requests lacked detail.\n- scope_management: How focused was the session?\n 90+: Single clear objective, logical progression. 50-69: Some drift but primary goal met. <30: Unfocused, no clear objective.\n- information_timing: Were requirements provided when needed?\n 90+: All constraints front-loaded before implementation. 50-69: Some important requirements late. <30: Requirements drip-fed, constant corrections.\n- correction_quality: How well did the user redirect the AI?\n 90+: Corrections included what, why, and context. 50-69: Mix of clear and unclear. <30: Corrections gave almost no signal.\n Score 75 if no corrections were needed (absence of corrections in a successful session = good prompting).\n\nEDGE CASES:\n- Short sessions (<5 user messages): Score conservatively. Do not penalize for missing elements unnecessary in quick tasks.\n- Exploration sessions: Do not penalize for missing acceptance criteria or scope drift.\n- Sessions where AI performed well despite vague prompts: Still classify deficits. Impact should be \"low\" since no visible cost.\n- Agentic/delegation sessions: If the user gave a clear high-level directive and the AI autonomously planned and executed successfully, do not penalize for low message count or lack of micro-level specificity. Effective delegation IS good prompting. Focus on the quality of the initial delegation prompt.";
8
+ export declare const EFFECTIVE_PATTERN_CLASSIFICATION_GUIDANCE = "\nEFFECTIVE PATTERN CLASSIFICATION GUIDANCE:\n\nEach effective pattern captures a technique or approach that contributed to a productive session outcome.\n\nBASELINE EXCLUSION \u2014 do NOT classify these as patterns:\n- Routine file reads at session start (Read/Glob/Grep on <5 files before editing)\n- Following explicit user instructions (user said \"run tests\" \u2192 running tests is not a pattern)\n- Basic tool usage (single file edits, standard CLI commands)\n- Trivial self-corrections (typo fixes, minor syntax errors caught immediately)\nOnly classify behavior that is NOTABLY thorough, strategic, or beyond baseline expectations.\n\nCATEGORIES \u2014 classify the TYPE of effective pattern:\n- \"structured-planning\": Decomposed the task into explicit steps, defined scope boundaries, or established a plan BEFORE writing code. Signal: plan/task-list/scope-definition appears before implementation.\n- \"incremental-implementation\": Work progressed in small, verifiable steps with validation between them. Signal: multiple small edits with checks between, not one large batch.\n- \"verification-workflow\": Proactive correctness checks (builds, tests, linters, types) BEFORE considering work complete. Signal: test/build/lint commands when nothing was known broken.\n- \"systematic-debugging\": Methodical investigation using structured techniques (binary search, log insertion, reproduction isolation). Signal: multiple targeted diagnostic steps, not random guessing.\n- \"self-correction\": Recognized a wrong path and pivoted WITHOUT user correction. Signal: explicit acknowledgment of mistake + approach change. NOT this if the user pointed out the error.\n- \"context-gathering\": NOTABLY thorough investigation before changes \u2014 reading 5+ files, cross-module exploration, schema/type/config review. Signal: substantial Read/Grep/Glob usage spanning multiple directories before any Edit/Write.\n- \"domain-expertise\": Applied specific framework/API/language knowledge correctly on first attempt without searching. Signal: correct non-obvious API usage with no preceding search and no subsequent error. NOT this if files were read first \u2014 that is context-gathering.\n- \"effective-tooling\": Leveraged advanced tool capabilities that multiplied productivity \u2014 agent delegation, parallel work, multi-file coordination, strategic mode selection. Signal: use of tool features beyond basic read/write/edit.\n\nCONTRASTIVE PAIRS:\n- structured-planning vs incremental-implementation: Planning = DECIDING what to do (before). Incremental = HOW you execute (during). Can have one without the other.\n- context-gathering vs domain-expertise: Gathering = ACTIVE INVESTIGATION (reading files). Expertise = APPLYING EXISTING KNOWLEDGE without investigation. If files were read first \u2192 context-gathering.\n- verification-workflow vs systematic-debugging: Verification = PROACTIVE (checking working code). Debugging = REACTIVE (investigating a failure).\n- self-correction vs user-directed: Self-correction = AI caught own mistake unprompted. User said \"that's wrong\" \u2192 NOT self-correction.\n\nDRIVER \u2014 4-step decision tree (follow IN ORDER):\nStep 1: Did user infrastructure enable this? (CLAUDE.md rules, agent configs, hookify hooks, custom commands, system prompts) \u2192 \"user-driven\"\nStep 2: Did the user explicitly request this behavior? (asked for plan, requested tests, directed investigation) \u2192 \"user-driven\"\nStep 3: Did the AI exhibit this without any user prompting or infrastructure? \u2192 \"ai-driven\"\nStep 4: Both made distinct, identifiable contributions \u2192 \"collaborative\"\nUse \"collaborative\" ONLY when you can name what EACH party contributed. If uncertain, prefer the more specific label.\n\nWhen no canonical category fits, create a specific kebab-case category (a precise novel category is better than forcing a poor fit).";
9
+ //# sourceMappingURL=prompt-constants.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompt-constants.d.ts","sourceRoot":"","sources":["../../src/llm/prompt-constants.ts"],"names":[],"mappings":"AAMA,eAAO,MAAM,gCAAgC,wpEA4BH,CAAC;AAE3C,eAAO,MAAM,6BAA6B,6LAUhC,CAAC;AAEX,eAAO,MAAM,4BAA4B,kMAS/B,CAAC;AAEX,eAAO,MAAM,+BAA+B,iKAQlC,CAAC;AAEX,eAAO,MAAM,gCAAgC,4EAInC,CAAC;AAEX,eAAO,MAAM,uBAAuB,kOAG1B,CAAC;AAEX,eAAO,MAAM,sCAAsC,03KA0D8P,CAAC;AAElT,eAAO,MAAM,yCAAyC,u0HAmC+E,CAAC"}
@@ -0,0 +1,169 @@
1
+ // Canonical category arrays and classification guidance strings for LLM analysis.
2
+ // Extracted from prompts.ts — imported by normalizers and prompt generators.
3
+ // Shared guidance for friction category and attribution classification.
4
+ // Actor-neutral category definitions describe the gap, not the actor.
5
+ // Attribution field captures who contributed to the friction for actionability.
6
+ export const FRICTION_CLASSIFICATION_GUIDANCE = `
7
+ FRICTION CLASSIFICATION GUIDANCE:
8
+
9
+ Each friction point captures WHAT went wrong (category + description), WHO contributed (attribution), and WHY you classified it that way (_reasoning).
10
+
11
+ CATEGORIES — classify the TYPE of gap or obstacle:
12
+ - "wrong-approach": A strategy was pursued that didn't fit the task — wrong architecture, wrong tool, wrong pattern
13
+ - "knowledge-gap": Incorrect knowledge was applied about a library, API, framework, or language feature
14
+ - "stale-assumptions": Work proceeded from assumptions about current state that were incorrect (stale files, changed config, different environment)
15
+ - "incomplete-requirements": Instructions were missing critical context, constraints, or acceptance criteria
16
+ - "context-loss": Prior decisions or constraints established earlier in the session were lost or forgotten
17
+ - "scope-creep": Work expanded beyond the boundaries of the stated task
18
+ - "repeated-mistakes": The same or similar error occurred multiple times despite earlier correction
19
+ - "documentation-gap": Relevant docs existed but were inaccessible or unfindable during the session
20
+ - "tooling-limitation": The tool genuinely lacked a needed capability
21
+
22
+ When no category fits, create a specific kebab-case category. A precise novel category is better than a vague canonical one.
23
+
24
+ ATTRIBUTION — 3-step decision tree (follow IN ORDER):
25
+ Step 1: Is the cause external to the user-AI interaction? (missing docs, broken tooling, infra outage) → "environmental"
26
+ Step 2: Could the USER have prevented this with better input? Evidence: vague prompt, missing context, no constraints, late requirements, ambiguous correction → "user-actionable"
27
+ Step 3: User input was clear and the AI still failed → "ai-capability"
28
+ When genuinely mixed between user-actionable and ai-capability, lean "user-actionable" — this tool helps users improve.
29
+
30
+ DESCRIPTION RULES:
31
+ - One neutral sentence describing the GAP, not the actor
32
+ - Include specific details (file names, APIs, error messages)
33
+ - Frame as "Missing X caused Y" NOT "The AI failed to X" or "The user forgot to X"
34
+ - Let the attribution field carry the who`;
35
+ export const CANONICAL_FRICTION_CATEGORIES = [
36
+ 'wrong-approach',
37
+ 'knowledge-gap',
38
+ 'stale-assumptions',
39
+ 'incomplete-requirements',
40
+ 'context-loss',
41
+ 'scope-creep',
42
+ 'repeated-mistakes',
43
+ 'documentation-gap',
44
+ 'tooling-limitation',
45
+ ];
46
+ export const CANONICAL_PATTERN_CATEGORIES = [
47
+ 'structured-planning',
48
+ 'incremental-implementation',
49
+ 'verification-workflow',
50
+ 'systematic-debugging',
51
+ 'self-correction',
52
+ 'context-gathering',
53
+ 'domain-expertise',
54
+ 'effective-tooling',
55
+ ];
56
+ export const CANONICAL_PQ_DEFICIT_CATEGORIES = [
57
+ 'vague-request',
58
+ 'missing-context',
59
+ 'late-constraint',
60
+ 'unclear-correction',
61
+ 'scope-drift',
62
+ 'missing-acceptance-criteria',
63
+ 'assumption-not-surfaced',
64
+ ];
65
+ export const CANONICAL_PQ_STRENGTH_CATEGORIES = [
66
+ 'precise-request',
67
+ 'effective-context',
68
+ 'productive-correction',
69
+ ];
70
+ export const CANONICAL_PQ_CATEGORIES = [
71
+ ...CANONICAL_PQ_DEFICIT_CATEGORIES,
72
+ ...CANONICAL_PQ_STRENGTH_CATEGORIES,
73
+ ];
74
+ export const PROMPT_QUALITY_CLASSIFICATION_GUIDANCE = `
75
+ PROMPT QUALITY CLASSIFICATION GUIDANCE:
76
+
77
+ Each finding captures a specific moment where the user's prompting either caused friction (deficit) or enabled productivity (strength).
78
+
79
+ DEFICIT CATEGORIES — classify prompting problems:
80
+ - "vague-request": Request lacked specificity needed for the AI to act without guessing. Missing file paths, function names, expected behavior, or concrete details.
81
+ NOT this category if the AI had enough context to succeed but failed anyway — that is an AI capability issue, not a prompting issue.
82
+
83
+ - "missing-context": Critical background knowledge about architecture, conventions, dependencies, or current state was not provided.
84
+ NOT this category if the information was available in the codebase and the AI could have found it by reading files — that is an AI context-gathering failure.
85
+
86
+ - "late-constraint": A requirement or constraint was provided AFTER the AI had already started implementing a different approach, causing rework.
87
+ NOT this category if the constraint was genuinely discovered during implementation (requirements changed). Only classify if the user KNEW the constraint before the session started.
88
+
89
+ - "unclear-correction": The user told the AI its output was wrong without explaining what was wrong or why. "That's not right", "try again", "no" without context.
90
+ NOT this category if the user gave a brief but sufficient correction ("use map instead of forEach" is clear enough).
91
+
92
+ - "scope-drift": The session objective shifted mid-conversation, or multiple unrelated objectives were addressed in one session.
93
+ NOT this category if the user is working through logically connected subtasks of one objective.
94
+
95
+ - "missing-acceptance-criteria": The user did not define what successful completion looks like, leading to back-and-forth about whether the output meets expectations.
96
+ NOT this category for exploratory sessions where the user is discovering what they want.
97
+
98
+ - "assumption-not-surfaced": The user held an unstated assumption that the AI could not reasonably infer from code or conversation.
99
+ NOT this category if the assumption was reasonable for the AI to make (e.g., standard coding conventions).
100
+
101
+ STRENGTH CATEGORIES — classify prompting successes (only when notably above average):
102
+ - "precise-request": Request included enough specificity (file paths, function names, expected behavior, error messages) that the AI could act correctly on the first attempt.
103
+
104
+ - "effective-context": User proactively shared architecture, conventions, prior decisions, or current state that the AI demonstrably used to make better decisions.
105
+
106
+ - "productive-correction": When the AI went off track, the user provided a correction that included WHAT was wrong, WHY, and enough context for the AI to redirect effectively on the next response.
107
+
108
+ CONTRASTIVE PAIRS:
109
+ - vague-request vs missing-context: Was the problem in HOW THE TASK WAS DESCRIBED (vague-request) or WHAT BACKGROUND KNOWLEDGE WAS ABSENT (missing-context)?
110
+ - late-constraint vs missing-context: Did the user EVENTUALLY provide it in the same session? Yes → late-constraint. Never → missing-context.
111
+ - missing-context vs assumption-not-surfaced: Is this a FACT the user could have copy-pasted (missing-context), or a BELIEF/PREFERENCE they held (assumption-not-surfaced)?
112
+ - scope-drift vs missing-acceptance-criteria: Did the user try to do TOO MANY THINGS (scope-drift) or ONE THING WITHOUT DEFINING SUCCESS (missing-acceptance-criteria)?
113
+ - unclear-correction vs vague-request: Was this the user's FIRST MESSAGE about this task (vague-request) or a RESPONSE TO AI OUTPUT (unclear-correction)?
114
+
115
+ DIMENSION SCORING (0-100):
116
+ - context_provision: How well did the user provide relevant background upfront?
117
+ 90+: Proactively shared architecture, constraints, conventions. 50-69: Notable gaps causing detours. <30: No context, AI working blind.
118
+ - request_specificity: How precise were task requests?
119
+ 90+: File paths, expected behavior, scope boundaries. 50-69: Mix of specific and vague. <30: Nearly all requests lacked detail.
120
+ - scope_management: How focused was the session?
121
+ 90+: Single clear objective, logical progression. 50-69: Some drift but primary goal met. <30: Unfocused, no clear objective.
122
+ - information_timing: Were requirements provided when needed?
123
+ 90+: All constraints front-loaded before implementation. 50-69: Some important requirements late. <30: Requirements drip-fed, constant corrections.
124
+ - correction_quality: How well did the user redirect the AI?
125
+ 90+: Corrections included what, why, and context. 50-69: Mix of clear and unclear. <30: Corrections gave almost no signal.
126
+ Score 75 if no corrections were needed (absence of corrections in a successful session = good prompting).
127
+
128
+ EDGE CASES:
129
+ - Short sessions (<5 user messages): Score conservatively. Do not penalize for missing elements unnecessary in quick tasks.
130
+ - Exploration sessions: Do not penalize for missing acceptance criteria or scope drift.
131
+ - Sessions where AI performed well despite vague prompts: Still classify deficits. Impact should be "low" since no visible cost.
132
+ - Agentic/delegation sessions: If the user gave a clear high-level directive and the AI autonomously planned and executed successfully, do not penalize for low message count or lack of micro-level specificity. Effective delegation IS good prompting. Focus on the quality of the initial delegation prompt.`;
133
+ export const EFFECTIVE_PATTERN_CLASSIFICATION_GUIDANCE = `
134
+ EFFECTIVE PATTERN CLASSIFICATION GUIDANCE:
135
+
136
+ Each effective pattern captures a technique or approach that contributed to a productive session outcome.
137
+
138
+ BASELINE EXCLUSION — do NOT classify these as patterns:
139
+ - Routine file reads at session start (Read/Glob/Grep on <5 files before editing)
140
+ - Following explicit user instructions (user said "run tests" → running tests is not a pattern)
141
+ - Basic tool usage (single file edits, standard CLI commands)
142
+ - Trivial self-corrections (typo fixes, minor syntax errors caught immediately)
143
+ Only classify behavior that is NOTABLY thorough, strategic, or beyond baseline expectations.
144
+
145
+ CATEGORIES — classify the TYPE of effective pattern:
146
+ - "structured-planning": Decomposed the task into explicit steps, defined scope boundaries, or established a plan BEFORE writing code. Signal: plan/task-list/scope-definition appears before implementation.
147
+ - "incremental-implementation": Work progressed in small, verifiable steps with validation between them. Signal: multiple small edits with checks between, not one large batch.
148
+ - "verification-workflow": Proactive correctness checks (builds, tests, linters, types) BEFORE considering work complete. Signal: test/build/lint commands when nothing was known broken.
149
+ - "systematic-debugging": Methodical investigation using structured techniques (binary search, log insertion, reproduction isolation). Signal: multiple targeted diagnostic steps, not random guessing.
150
+ - "self-correction": Recognized a wrong path and pivoted WITHOUT user correction. Signal: explicit acknowledgment of mistake + approach change. NOT this if the user pointed out the error.
151
+ - "context-gathering": NOTABLY thorough investigation before changes — reading 5+ files, cross-module exploration, schema/type/config review. Signal: substantial Read/Grep/Glob usage spanning multiple directories before any Edit/Write.
152
+ - "domain-expertise": Applied specific framework/API/language knowledge correctly on first attempt without searching. Signal: correct non-obvious API usage with no preceding search and no subsequent error. NOT this if files were read first — that is context-gathering.
153
+ - "effective-tooling": Leveraged advanced tool capabilities that multiplied productivity — agent delegation, parallel work, multi-file coordination, strategic mode selection. Signal: use of tool features beyond basic read/write/edit.
154
+
155
+ CONTRASTIVE PAIRS:
156
+ - structured-planning vs incremental-implementation: Planning = DECIDING what to do (before). Incremental = HOW you execute (during). Can have one without the other.
157
+ - context-gathering vs domain-expertise: Gathering = ACTIVE INVESTIGATION (reading files). Expertise = APPLYING EXISTING KNOWLEDGE without investigation. If files were read first → context-gathering.
158
+ - verification-workflow vs systematic-debugging: Verification = PROACTIVE (checking working code). Debugging = REACTIVE (investigating a failure).
159
+ - self-correction vs user-directed: Self-correction = AI caught own mistake unprompted. User said "that's wrong" → NOT self-correction.
160
+
161
+ DRIVER — 4-step decision tree (follow IN ORDER):
162
+ Step 1: Did user infrastructure enable this? (CLAUDE.md rules, agent configs, hookify hooks, custom commands, system prompts) → "user-driven"
163
+ Step 2: Did the user explicitly request this behavior? (asked for plan, requested tests, directed investigation) → "user-driven"
164
+ Step 3: Did the AI exhibit this without any user prompting or infrastructure? → "ai-driven"
165
+ Step 4: Both made distinct, identifiable contributions → "collaborative"
166
+ Use "collaborative" ONLY when you can name what EACH party contributed. If uncertain, prefer the more specific label.
167
+
168
+ When no canonical category fits, create a specific kebab-case category (a precise novel category is better than forcing a poor fit).`;
169
+ //# sourceMappingURL=prompt-constants.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompt-constants.js","sourceRoot":"","sources":["../../src/llm/prompt-constants.ts"],"names":[],"mappings":"AAAA,kFAAkF;AAClF,6EAA6E;AAE7E,wEAAwE;AACxE,sEAAsE;AACtE,gFAAgF;AAChF,MAAM,CAAC,MAAM,gCAAgC,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;0CA4BN,CAAC;AAE3C,MAAM,CAAC,MAAM,6BAA6B,GAAG;IAC3C,gBAAgB;IAChB,eAAe;IACf,mBAAmB;IACnB,yBAAyB;IACzB,cAAc;IACd,aAAa;IACb,mBAAmB;IACnB,mBAAmB;IACnB,oBAAoB;CACZ,CAAC;AAEX,MAAM,CAAC,MAAM,4BAA4B,GAAG;IAC1C,qBAAqB;IACrB,4BAA4B;IAC5B,uBAAuB;IACvB,sBAAsB;IACtB,iBAAiB;IACjB,mBAAmB;IACnB,kBAAkB;IAClB,mBAAmB;CACX,CAAC;AAEX,MAAM,CAAC,MAAM,+BAA+B,GAAG;IAC7C,eAAe;IACf,iBAAiB;IACjB,iBAAiB;IACjB,oBAAoB;IACpB,aAAa;IACb,6BAA6B;IAC7B,yBAAyB;CACjB,CAAC;AAEX,MAAM,CAAC,MAAM,gCAAgC,GAAG;IAC9C,iBAAiB;IACjB,mBAAmB;IACnB,uBAAuB;CACf,CAAC;AAEX,MAAM,CAAC,MAAM,uBAAuB,GAAG;IACrC,GAAG,+BAA+B;IAClC,GAAG,gCAAgC;CAC3B,CAAC;AAEX,MAAM,CAAC,MAAM,sCAAsC,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iTA0D2P,CAAC;AAElT,MAAM,CAAC,MAAM,yCAAyC,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;qIAmC4E,CAAC"}
@@ -0,0 +1,8 @@
1
+ import type { SQLiteMessageRow } from './prompt-types.js';
2
+ import { type SessionData } from './analysis-db.js';
3
+ import { type AnalysisOptions, type AnalysisResult } from './analysis-internal.js';
4
+ /**
5
+ * Analyze prompt quality for a session.
6
+ */
7
+ export declare function analyzePromptQuality(session: SessionData, messages: SQLiteMessageRow[], options?: AnalysisOptions): Promise<AnalysisResult>;
8
+ //# sourceMappingURL=prompt-quality-analysis.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompt-quality-analysis.d.ts","sourceRoot":"","sources":["../../src/llm/prompt-quality-analysis.ts"],"names":[],"mappings":"AAMA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,mBAAmB,CAAC;AAI1D,OAAO,EAIL,KAAK,WAAW,EACjB,MAAM,kBAAkB,CAAC;AAC1B,OAAO,EAAsC,KAAK,eAAe,EAAE,KAAK,cAAc,EAAE,MAAM,wBAAwB,CAAC;AAEvH;;GAEG;AACH,wBAAsB,oBAAoB,CACxC,OAAO,EAAE,WAAW,EACpB,QAAQ,EAAE,gBAAgB,EAAE,EAC5B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,cAAc,CAAC,CAmIzB"}
@@ -0,0 +1,133 @@
1
+ // Prompt quality analysis — isolated from the main session analysis pipeline.
2
+ // Extracted from analysis.ts to keep each analysis type in its own focused module.
3
+ import { createLLMClient, isLLMConfigured, loadLLMConfig } from './client.js';
4
+ import { calculateAnalysisCost } from './analysis-pricing.js';
5
+ import { saveAnalysisUsage } from './analysis-usage-db.js';
6
+ import { formatMessagesForAnalysis, classifyStoredUserMessage } from './message-format.js';
7
+ import { parsePromptQualityResponse } from './response-parsers.js';
8
+ import { SHARED_ANALYST_SYSTEM_PROMPT, buildCacheableConversationBlock, buildPromptQualityInstructions } from './prompts.js';
9
+ import { convertPromptQualityToInsightRow, saveInsightsToDb, deleteSessionInsights, } from './analysis-db.js';
10
+ import { MAX_INPUT_TOKENS, buildSessionMeta } from './analysis-internal.js';
11
+ /**
12
+ * Analyze prompt quality for a session.
13
+ */
14
+ export async function analyzePromptQuality(session, messages, options) {
15
+ if (!isLLMConfigured()) {
16
+ return {
17
+ success: false,
18
+ insights: [],
19
+ error: 'LLM not configured. Run `code-insights config llm` to configure a provider.',
20
+ };
21
+ }
22
+ if (messages.length === 0) {
23
+ return {
24
+ success: false,
25
+ insights: [],
26
+ error: 'No messages found for this session.',
27
+ };
28
+ }
29
+ // Change 2: Filter to genuine human messages only (not tool-results or system artifacts).
30
+ // Pre-change: a session with 1 human + 50 tool-result rows passed the gate incorrectly.
31
+ // This prevented wasted LLM calls on sessions where there is nothing to evaluate.
32
+ const humanMessages = messages.filter(m => m.type === 'user' && classifyStoredUserMessage(m.content) === 'human');
33
+ if (humanMessages.length < 2) {
34
+ return {
35
+ success: false,
36
+ insights: [],
37
+ error: 'Not enough user messages to analyze prompt quality (need at least 2).',
38
+ };
39
+ }
40
+ try {
41
+ const startTime = Date.now();
42
+ const client = createLLMClient();
43
+ const formattedMessages = formatMessagesForAnalysis(messages);
44
+ let analysisInput = formattedMessages;
45
+ const estimatedTokens = client.estimateTokens(formattedMessages);
46
+ if (estimatedTokens > MAX_INPUT_TOKENS) {
47
+ const targetLength = Math.floor((MAX_INPUT_TOKENS / estimatedTokens) * formattedMessages.length * 0.8);
48
+ analysisInput = formattedMessages.slice(0, targetLength) + '\n\n[... conversation truncated for analysis ...]';
49
+ }
50
+ // Change 3: Pass structured session shape instead of raw message count.
51
+ // "Total messages: 51" misled the LLM when 43 of those were tool-result rows.
52
+ const assistantMessages = messages.filter(m => m.type === 'assistant');
53
+ const toolExchangeCount = messages.length - humanMessages.length - assistantMessages.length;
54
+ const sessionMeta = buildSessionMeta(session);
55
+ const sessionShape = {
56
+ humanMessageCount: humanMessages.length,
57
+ assistantMessageCount: assistantMessages.length,
58
+ toolExchangeCount,
59
+ };
60
+ options?.onProgress?.({ phase: 'analyzing' });
61
+ const response = await client.chat([
62
+ { role: 'system', content: SHARED_ANALYST_SYSTEM_PROMPT },
63
+ { role: 'user', content: [
64
+ buildCacheableConversationBlock(analysisInput),
65
+ { type: 'text', text: buildPromptQualityInstructions(session.project_name, sessionShape, sessionMeta) },
66
+ ] },
67
+ ], { signal: options?.signal });
68
+ const parsed = parsePromptQualityResponse(response.content);
69
+ if (!parsed.success) {
70
+ return {
71
+ success: false,
72
+ insights: [],
73
+ error: 'Failed to parse prompt quality analysis. Please try again.',
74
+ error_type: parsed.error.error_type,
75
+ response_length: parsed.error.response_length,
76
+ response_preview: parsed.error.response_preview,
77
+ };
78
+ }
79
+ options?.onProgress?.({ phase: 'saving' });
80
+ const insight = convertPromptQualityToInsightRow(parsed.data, session);
81
+ // Save new insight, then delete old prompt_quality insights
82
+ saveInsightsToDb([insight]);
83
+ deleteSessionInsights(session.id, {
84
+ includeOnlyTypes: ['prompt_quality'],
85
+ excludeIds: [insight.id],
86
+ });
87
+ // Record analysis cost to analysis_usage table (V7).
88
+ const llmConfig = loadLLMConfig();
89
+ if (llmConfig && response.usage) {
90
+ const costUsd = calculateAnalysisCost(llmConfig.provider, llmConfig.model, {
91
+ inputTokens: response.usage.inputTokens,
92
+ outputTokens: response.usage.outputTokens,
93
+ cacheCreationTokens: response.usage.cacheCreationTokens,
94
+ cacheReadTokens: response.usage.cacheReadTokens,
95
+ });
96
+ saveAnalysisUsage({
97
+ session_id: session.id,
98
+ analysis_type: 'prompt_quality',
99
+ provider: llmConfig.provider,
100
+ model: llmConfig.model,
101
+ input_tokens: response.usage.inputTokens,
102
+ output_tokens: response.usage.outputTokens,
103
+ cache_creation_tokens: response.usage.cacheCreationTokens,
104
+ cache_read_tokens: response.usage.cacheReadTokens,
105
+ estimated_cost_usd: costUsd,
106
+ duration_ms: Date.now() - startTime,
107
+ chunk_count: 1,
108
+ });
109
+ }
110
+ return {
111
+ success: true,
112
+ insights: [insight],
113
+ usage: response.usage ? {
114
+ inputTokens: response.usage.inputTokens,
115
+ outputTokens: response.usage.outputTokens,
116
+ ...(response.usage.cacheCreationTokens !== undefined && { cacheCreationTokens: response.usage.cacheCreationTokens }),
117
+ ...(response.usage.cacheReadTokens !== undefined && { cacheReadTokens: response.usage.cacheReadTokens }),
118
+ } : undefined,
119
+ };
120
+ }
121
+ catch (error) {
122
+ if (error instanceof Error && error.name === 'AbortError') {
123
+ return { success: false, insights: [], error: 'Analysis cancelled', error_type: 'abort' };
124
+ }
125
+ return {
126
+ success: false,
127
+ insights: [],
128
+ error: error instanceof Error ? error.message : 'Prompt quality analysis failed',
129
+ error_type: 'api_error',
130
+ };
131
+ }
132
+ }
133
+ //# sourceMappingURL=prompt-quality-analysis.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompt-quality-analysis.js","sourceRoot":"","sources":["../../src/llm/prompt-quality-analysis.ts"],"names":[],"mappings":"AAAA,8EAA8E;AAC9E,mFAAmF;AAEnF,OAAO,EAAE,eAAe,EAAE,eAAe,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAC9E,OAAO,EAAE,qBAAqB,EAAE,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAE3D,OAAO,EAAE,yBAAyB,EAAE,yBAAyB,EAAE,MAAM,qBAAqB,CAAC;AAC3F,OAAO,EAAE,0BAA0B,EAAE,MAAM,uBAAuB,CAAC;AACnE,OAAO,EAAE,4BAA4B,EAAE,+BAA+B,EAAE,8BAA8B,EAAE,MAAM,cAAc,CAAC;AAC7H,OAAO,EACL,gCAAgC,EAChC,gBAAgB,EAChB,qBAAqB,GAEtB,MAAM,kBAAkB,CAAC;AAC1B,OAAO,EAAE,gBAAgB,EAAE,gBAAgB,EAA6C,MAAM,wBAAwB,CAAC;AAEvH;;GAEG;AACH,MAAM,CAAC,KAAK,UAAU,oBAAoB,CACxC,OAAoB,EACpB,QAA4B,EAC5B,OAAyB;IAEzB,IAAI,CAAC,eAAe,EAAE,EAAE,CAAC;QACvB,OAAO;YACL,OAAO,EAAE,KAAK;YACd,QAAQ,EAAE,EAAE;YACZ,KAAK,EAAE,6EAA6E;SACrF,CAAC;IACJ,CAAC;IAED,IAAI,QAAQ,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;QAC1B,OAAO;YACL,OAAO,EAAE,KAAK;YACd,QAAQ,EAAE,EAAE;YACZ,KAAK,EAAE,qCAAqC;SAC7C,CAAC;IACJ,CAAC;IAED,0FAA0F;IAC1F,wFAAwF;IACxF,kFAAkF;IAClF,MAAM,aAAa,GAAG,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CACxC,CAAC,CAAC,IAAI,KAAK,MAAM,IAAI,yBAAyB,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,OAAO,CACtE,CAAC;IACF,IAAI,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAC7B,OAAO;YACL,OAAO,EAAE,KAAK;YACd,QAAQ,EAAE,EAAE;YACZ,KAAK,EAAE,uEAAuE;SAC/E,CAAC;IACJ,CAAC;IAED,IAAI,CAAC;QACH,MAAM,SAAS,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;QAC7B,MAAM,MAAM,GAAG,eAAe,EAAE,CAAC;QACjC,MAAM,iBAAiB,GAAG,yBAAyB,CAAC,QAAQ,CAAC,CAAC;QAE9D,IAAI,aAAa,GAAG,iBAAiB,CAAC;QACtC,MAAM,eAAe,GAAG,MAAM,CAAC,cAAc,CAAC,iBAAiB,CAAC,CAAC;QACjE,IAAI,eAAe,GAAG,gBAAgB,EAAE,CAAC;YACvC,MAAM,YAAY,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,gBAAgB,GAAG,eAAe,CAAC,GAAG,iBAAiB,CAAC,MAAM,GAAG,GAAG,CAAC,CAAC;YACvG,aAAa,GAAG,iBAAiB,CAAC,KAAK,CAAC,CAAC,EAAE,YAAY,CAAC,GAAG,mDAAmD,CAAC;QACjH,CAAC;QAED,wEAAwE;QACxE,8EAA8E;QAC9E,MAAM,iBAAiB,GAAG,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,CAAC,CAAC;QACvE,MAAM,iBAAiB,GAAG,QAAQ,CAAC,MAAM,GAAG,aAAa,CAAC,MAAM,GAAG,iBAAiB,CAAC,MAAM,CAAC;QAE5F,MAAM,WAAW,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;QAC9C,MAAM,YAAY,GAAG;YACnB,iBAAiB,EAAE,aAAa,CAAC,MAAM;YACvC,qBAAqB,EAAE,iBAAiB,CAAC,MAAM;YAC/C,iBAAiB;SAClB,CAAC;QAEF,OAAO,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,WAAW,EAAE,CAAC,CAAC;QAC9C,MAAM,QAAQ,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC;YACjC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,4BAA4B,EAAE;YACzD,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE;oBACvB,+BAA+B,CAAC,aAAa,CAAC;oBAC9C,EAAE,IAAI,EAAE,MAAe,EAAE,IAAI,EAAE,8BAA8B,CAAC,OAAO,CAAC,YAAY,EAAE,YAAY,EAAE,WAAW,CAAC,EAAE;iBACjH,EAAE;SACJ,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;QAEhC,MAAM,MAAM,GAAG,0BAA0B,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC;QAC5D,IAAI,CAAC,MAAM,CAAC,OAAO,EAAE,CAAC;YACpB,OAAO;gBACL,OAAO,EAAE,KAAK;gBACd,QAAQ,EAAE,EAAE;gBACZ,KAAK,EAAE,4DAA4D;gBACnE,UAAU,EAAE,MAAM,CAAC,KAAK,CAAC,UAAU;gBACnC,eAAe,EAAE,MAAM,CAAC,KAAK,CAAC,eAAe;gBAC7C,gBAAgB,EAAE,MAAM,CAAC,KAAK,CAAC,gBAAgB;aAChD,CAAC;QACJ,CAAC;QAED,OAAO,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,QAAQ,EAAE,CAAC,CAAC;QAC3C,MAAM,OAAO,GAAG,gCAAgC,CAAC,MAAM,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC;QAEvE,4DAA4D;QAC5D,gBAAgB,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;QAC5B,qBAAqB,CAAC,OAAO,CAAC,EAAE,EAAE;YAChC,gBAAgB,EAAE,CAAC,gBAAgB,CAAC;YACpC,UAAU,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC;SACzB,CAAC,CAAC;QAEH,qDAAqD;QACrD,MAAM,SAAS,GAAG,aAAa,EAAE,CAAC;QAClC,IAAI,SAAS,IAAI,QAAQ,CAAC,KAAK,EAAE,CAAC;YAChC,MAAM,OAAO,GAAG,qBAAqB,CAAC,SAAS,CAAC,QAAQ,EAAE,SAAS,CAAC,KAAK,EAAE;gBACzE,WAAW,EAAE,QAAQ,CAAC,KAAK,CAAC,WAAW;gBACvC,YAAY,EAAE,QAAQ,CAAC,KAAK,CAAC,YAAY;gBACzC,mBAAmB,EAAE,QAAQ,CAAC,KAAK,CAAC,mBAAmB;gBACvD,eAAe,EAAE,QAAQ,CAAC,KAAK,CAAC,eAAe;aAChD,CAAC,CAAC;YACH,iBAAiB,CAAC;gBAChB,UAAU,EAAE,OAAO,CAAC,EAAE;gBACtB,aAAa,EAAE,gBAAgB;gBAC/B,QAAQ,EAAE,SAAS,CAAC,QAAQ;gBAC5B,KAAK,EAAE,SAAS,CAAC,KAAK;gBACtB,YAAY,EAAE,QAAQ,CAAC,KAAK,CAAC,WAAW;gBACxC,aAAa,EAAE,QAAQ,CAAC,KAAK,CAAC,YAAY;gBAC1C,qBAAqB,EAAE,QAAQ,CAAC,KAAK,CAAC,mBAAmB;gBACzD,iBAAiB,EAAE,QAAQ,CAAC,KAAK,CAAC,eAAe;gBACjD,kBAAkB,EAAE,OAAO;gBAC3B,WAAW,EAAE,IAAI,CAAC,GAAG,EAAE,GAAG,SAAS;gBACnC,WAAW,EAAE,CAAC;aACf,CAAC,CAAC;QACL,CAAC;QAED,OAAO;YACL,OAAO,EAAE,IAAI;YACb,QAAQ,EAAE,CAAC,OAAO,CAAC;YACnB,KAAK,EAAE,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC;gBACtB,WAAW,EAAE,QAAQ,CAAC,KAAK,CAAC,WAAW;gBACvC,YAAY,EAAE,QAAQ,CAAC,KAAK,CAAC,YAAY;gBACzC,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,mBAAmB,KAAK,SAAS,IAAI,EAAE,mBAAmB,EAAE,QAAQ,CAAC,KAAK,CAAC,mBAAmB,EAAE,CAAC;gBACpH,GAAG,CAAC,QAAQ,CAAC,KAAK,CAAC,eAAe,KAAK,SAAS,IAAI,EAAE,eAAe,EAAE,QAAQ,CAAC,KAAK,CAAC,eAAe,EAAE,CAAC;aACzG,CAAC,CAAC,CAAC,SAAS;SACd,CAAC;IACJ,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,IAAI,KAAK,YAAY,KAAK,IAAI,KAAK,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YAC1D,OAAO,EAAE,OAAO,EAAE,KAAK,EAAE,QAAQ,EAAE,EAAE,EAAE,KAAK,EAAE,oBAAoB,EAAE,UAAU,EAAE,OAAO,EAAE,CAAC;QAC5F,CAAC;QACD,OAAO;YACL,OAAO,EAAE,KAAK;YACd,QAAQ,EAAE,EAAE;YACZ,KAAK,EAAE,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,gCAAgC;YAChF,UAAU,EAAE,WAAW;SACxB,CAAC;IACJ,CAAC;AACH,CAAC"}
@@ -0,0 +1,26 @@
1
+ export declare const PQ_CATEGORY_LABELS: Record<string, string>;
2
+ /**
3
+ * Normalize a prompt quality category to the closest canonical category.
4
+ * Returns the original category if no close match is found.
5
+ *
6
+ * Matching rules (in order):
7
+ * 1. Exact match against canonical list → return as-is
8
+ * 1.5. Explicit alias match → return alias target (may be non-canonical)
9
+ * 2. Levenshtein distance <= 2 → return canonical match
10
+ * 3. Substring match (category contains canonical or vice versa) → return canonical
11
+ * 4. No match → return original (novel category)
12
+ *
13
+ * Note: alias targets in PQ_ALIASES bypass the canonical check intentionally.
14
+ */
15
+ export declare function normalizePromptQualityCategory(category: string): string;
16
+ /**
17
+ * Get a human-readable label for a prompt quality category.
18
+ * Falls back to Title Case conversion for novel categories.
19
+ */
20
+ export declare function getPQCategoryLabel(category: string): string;
21
+ /**
22
+ * Get the type (deficit or strength) for a prompt quality category.
23
+ * Novel categories default to deficit.
24
+ */
25
+ export declare function getPQCategoryType(category: string): 'deficit' | 'strength';
26
+ //# sourceMappingURL=prompt-quality-normalize.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompt-quality-normalize.d.ts","sourceRoot":"","sources":["../../src/llm/prompt-quality-normalize.ts"],"names":[],"mappings":"AAQA,eAAO,MAAM,kBAAkB,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAWrD,CAAC;AA6EF;;;;;;;;;;;;GAYG;AACH,wBAAgB,8BAA8B,CAAC,QAAQ,EAAE,MAAM,GAAG,MAAM,CAKvE;AAED;;;GAGG;AACH,wBAAgB,kBAAkB,CAAC,QAAQ,EAAE,MAAM,GAAG,MAAM,CAE3D;AAED;;;GAGG;AACH,wBAAgB,iBAAiB,CAAC,QAAQ,EAAE,MAAM,GAAG,SAAS,GAAG,UAAU,CAE1E"}
@@ -0,0 +1,116 @@
1
+ // Prompt quality category normalization.
2
+ // Clusters similar free-form categories to canonical ones during aggregation.
3
+ // Delegates to normalize-utils.ts for the shared levenshtein/normalizeCategory algorithm.
4
+ import { CANONICAL_PQ_CATEGORIES, CANONICAL_PQ_STRENGTH_CATEGORIES } from './prompt-constants.js';
5
+ import { normalizeCategory, kebabToTitleCase } from './normalize-utils.js';
6
+ // Human-readable labels for each canonical category.
7
+ export const PQ_CATEGORY_LABELS = {
8
+ 'vague-request': 'Vague Request',
9
+ 'missing-context': 'Missing Context',
10
+ 'late-constraint': 'Late Constraint',
11
+ 'unclear-correction': 'Unclear Correction',
12
+ 'scope-drift': 'Scope Drift',
13
+ 'missing-acceptance-criteria': 'Missing Acceptance Criteria',
14
+ 'assumption-not-surfaced': 'Assumption Not Surfaced',
15
+ 'precise-request': 'Precise Request',
16
+ 'effective-context': 'Effective Context',
17
+ 'productive-correction': 'Productive Correction',
18
+ };
19
+ const STRENGTH_SET = new Set(CANONICAL_PQ_STRENGTH_CATEGORIES);
20
+ // Explicit alias map for clustering emergent category variants.
21
+ // Targets don't need to be in CANONICAL_PQ_CATEGORIES —
22
+ // this clusters semantically-equivalent novel categories together.
23
+ // Alias lookup runs AFTER exact canonical match but BEFORE Levenshtein,
24
+ // so well-known emergent variants are clustered deterministically.
25
+ const PQ_ALIASES = {
26
+ // vague-request variants
27
+ 'vague-instructions': 'vague-request',
28
+ 'unclear-request': 'vague-request',
29
+ 'imprecise-prompting': 'vague-request',
30
+ 'ambiguous-request': 'vague-request',
31
+ 'incomplete-request': 'vague-request',
32
+ 'generic-request': 'vague-request',
33
+ // missing-context variants
34
+ 'missing-information': 'missing-context',
35
+ 'insufficient-context': 'missing-context',
36
+ 'no-context': 'missing-context',
37
+ 'lack-of-context': 'missing-context',
38
+ 'missing-background': 'missing-context',
39
+ // late-constraint variants
40
+ 'late-context': 'late-constraint',
41
+ 'late-requirements': 'late-constraint',
42
+ 'piecemeal-requirements': 'late-constraint',
43
+ 'drip-fed-requirements': 'late-constraint',
44
+ 'incremental-requirements': 'late-constraint',
45
+ 'late-specification': 'late-constraint',
46
+ // unclear-correction variants
47
+ 'unclear-feedback': 'unclear-correction',
48
+ 'vague-correction': 'unclear-correction',
49
+ 'unhelpful-correction': 'unclear-correction',
50
+ 'vague-feedback': 'unclear-correction',
51
+ // scope-drift variants
52
+ 'context-drift': 'scope-drift',
53
+ 'objective-bloat': 'scope-drift',
54
+ 'session-bloat': 'scope-drift',
55
+ 'topic-switching': 'scope-drift',
56
+ 'scope-creep': 'scope-drift',
57
+ // missing-acceptance-criteria variants
58
+ 'no-acceptance-criteria': 'missing-acceptance-criteria',
59
+ 'undefined-done': 'missing-acceptance-criteria',
60
+ 'no-definition-of-done': 'missing-acceptance-criteria',
61
+ 'unclear-success-criteria': 'missing-acceptance-criteria',
62
+ // assumption-not-surfaced variants
63
+ 'hidden-assumption': 'assumption-not-surfaced',
64
+ 'unstated-assumption': 'assumption-not-surfaced',
65
+ 'implicit-assumption': 'assumption-not-surfaced',
66
+ 'unspoken-expectation': 'assumption-not-surfaced',
67
+ // precise-request variants (strengths)
68
+ 'clear-request': 'precise-request',
69
+ 'specific-request': 'precise-request',
70
+ 'well-specified-request': 'precise-request',
71
+ 'detailed-request': 'precise-request',
72
+ // effective-context variants (strengths)
73
+ 'good-context': 'effective-context',
74
+ 'upfront-context': 'effective-context',
75
+ 'proactive-context': 'effective-context',
76
+ 'rich-context': 'effective-context',
77
+ // productive-correction variants (strengths)
78
+ 'clear-correction': 'productive-correction',
79
+ 'effective-feedback': 'productive-correction',
80
+ 'helpful-correction': 'productive-correction',
81
+ 'constructive-feedback': 'productive-correction',
82
+ };
83
+ /**
84
+ * Normalize a prompt quality category to the closest canonical category.
85
+ * Returns the original category if no close match is found.
86
+ *
87
+ * Matching rules (in order):
88
+ * 1. Exact match against canonical list → return as-is
89
+ * 1.5. Explicit alias match → return alias target (may be non-canonical)
90
+ * 2. Levenshtein distance <= 2 → return canonical match
91
+ * 3. Substring match (category contains canonical or vice versa) → return canonical
92
+ * 4. No match → return original (novel category)
93
+ *
94
+ * Note: alias targets in PQ_ALIASES bypass the canonical check intentionally.
95
+ */
96
+ export function normalizePromptQualityCategory(category) {
97
+ return normalizeCategory(category, {
98
+ canonicalCategories: CANONICAL_PQ_CATEGORIES,
99
+ aliases: PQ_ALIASES,
100
+ });
101
+ }
102
+ /**
103
+ * Get a human-readable label for a prompt quality category.
104
+ * Falls back to Title Case conversion for novel categories.
105
+ */
106
+ export function getPQCategoryLabel(category) {
107
+ return PQ_CATEGORY_LABELS[category] ?? kebabToTitleCase(category);
108
+ }
109
+ /**
110
+ * Get the type (deficit or strength) for a prompt quality category.
111
+ * Novel categories default to deficit.
112
+ */
113
+ export function getPQCategoryType(category) {
114
+ return STRENGTH_SET.has(category) ? 'strength' : 'deficit';
115
+ }
116
+ //# sourceMappingURL=prompt-quality-normalize.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompt-quality-normalize.js","sourceRoot":"","sources":["../../src/llm/prompt-quality-normalize.ts"],"names":[],"mappings":"AAAA,yCAAyC;AACzC,8EAA8E;AAC9E,0FAA0F;AAE1F,OAAO,EAAE,uBAAuB,EAAE,gCAAgC,EAAE,MAAM,uBAAuB,CAAC;AAClG,OAAO,EAAE,iBAAiB,EAAE,gBAAgB,EAAE,MAAM,sBAAsB,CAAC;AAE3E,qDAAqD;AACrD,MAAM,CAAC,MAAM,kBAAkB,GAA2B;IACxD,eAAe,EAAE,eAAe;IAChC,iBAAiB,EAAE,iBAAiB;IACpC,iBAAiB,EAAE,iBAAiB;IACpC,oBAAoB,EAAE,oBAAoB;IAC1C,aAAa,EAAE,aAAa;IAC5B,6BAA6B,EAAE,6BAA6B;IAC5D,yBAAyB,EAAE,yBAAyB;IACpD,iBAAiB,EAAE,iBAAiB;IACpC,mBAAmB,EAAE,mBAAmB;IACxC,uBAAuB,EAAE,uBAAuB;CACjD,CAAC;AAEF,MAAM,YAAY,GAAG,IAAI,GAAG,CAAS,gCAAgC,CAAC,CAAC;AAEvE,gEAAgE;AAChE,wDAAwD;AACxD,mEAAmE;AACnE,wEAAwE;AACxE,mEAAmE;AACnE,MAAM,UAAU,GAA2B;IACzC,yBAAyB;IACzB,oBAAoB,EAAE,eAAe;IACrC,iBAAiB,EAAE,eAAe;IAClC,qBAAqB,EAAE,eAAe;IACtC,mBAAmB,EAAE,eAAe;IACpC,oBAAoB,EAAE,eAAe;IACrC,iBAAiB,EAAE,eAAe;IAElC,2BAA2B;IAC3B,qBAAqB,EAAE,iBAAiB;IACxC,sBAAsB,EAAE,iBAAiB;IACzC,YAAY,EAAE,iBAAiB;IAC/B,iBAAiB,EAAE,iBAAiB;IACpC,oBAAoB,EAAE,iBAAiB;IAEvC,2BAA2B;IAC3B,cAAc,EAAE,iBAAiB;IACjC,mBAAmB,EAAE,iBAAiB;IACtC,wBAAwB,EAAE,iBAAiB;IAC3C,uBAAuB,EAAE,iBAAiB;IAC1C,0BAA0B,EAAE,iBAAiB;IAC7C,oBAAoB,EAAE,iBAAiB;IAEvC,8BAA8B;IAC9B,kBAAkB,EAAE,oBAAoB;IACxC,kBAAkB,EAAE,oBAAoB;IACxC,sBAAsB,EAAE,oBAAoB;IAC5C,gBAAgB,EAAE,oBAAoB;IAEtC,uBAAuB;IACvB,eAAe,EAAE,aAAa;IAC9B,iBAAiB,EAAE,aAAa;IAChC,eAAe,EAAE,aAAa;IAC9B,iBAAiB,EAAE,aAAa;IAChC,aAAa,EAAE,aAAa;IAE5B,uCAAuC;IACvC,wBAAwB,EAAE,6BAA6B;IACvD,gBAAgB,EAAE,6BAA6B;IAC/C,uBAAuB,EAAE,6BAA6B;IACtD,0BAA0B,EAAE,6BAA6B;IAEzD,mCAAmC;IACnC,mBAAmB,EAAE,yBAAyB;IAC9C,qBAAqB,EAAE,yBAAyB;IAChD,qBAAqB,EAAE,yBAAyB;IAChD,sBAAsB,EAAE,yBAAyB;IAEjD,uCAAuC;IACvC,eAAe,EAAE,iBAAiB;IAClC,kBAAkB,EAAE,iBAAiB;IACrC,wBAAwB,EAAE,iBAAiB;IAC3C,kBAAkB,EAAE,iBAAiB;IAErC,yCAAyC;IACzC,cAAc,EAAE,mBAAmB;IACnC,iBAAiB,EAAE,mBAAmB;IACtC,mBAAmB,EAAE,mBAAmB;IACxC,cAAc,EAAE,mBAAmB;IAEnC,6CAA6C;IAC7C,kBAAkB,EAAE,uBAAuB;IAC3C,oBAAoB,EAAE,uBAAuB;IAC7C,oBAAoB,EAAE,uBAAuB;IAC7C,uBAAuB,EAAE,uBAAuB;CACjD,CAAC;AAEF;;;;;;;;;;;;GAYG;AACH,MAAM,UAAU,8BAA8B,CAAC,QAAgB;IAC7D,OAAO,iBAAiB,CAAC,QAAQ,EAAE;QACjC,mBAAmB,EAAE,uBAAuB;QAC5C,OAAO,EAAE,UAAU;KACpB,CAAC,CAAC;AACL,CAAC;AAED;;;GAGG;AACH,MAAM,UAAU,kBAAkB,CAAC,QAAgB;IACjD,OAAO,kBAAkB,CAAC,QAAQ,CAAC,IAAI,gBAAgB,CAAC,QAAQ,CAAC,CAAC;AACpE,CAAC;AAED;;;GAGG;AACH,MAAM,UAAU,iBAAiB,CAAC,QAAgB;IAChD,OAAO,YAAY,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;AAC7D,CAAC"}