@adia-ai/a2ui-retrieval 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,135 @@
1
+ /**
2
+ * Pattern Promotion — Automatically identifies generations that should
3
+ * become reusable patterns based on quality signals and user feedback.
4
+ *
5
+ * Promotion criteria (all must be met):
6
+ * - Generation score >= 95
7
+ * - Feedback rating >= 4 (if feedback exists)
8
+ * - Intent alignment score >= 0.8 (if checked)
9
+ * - User didn't heavily edit the output
10
+ * - Explicit shouldBePattern flag (from feedback) OR auto-detected
11
+ *
12
+ * Auto-detection: a generation is auto-promoted if it scores high on all
13
+ * dimensions and uses a novel combination of components not already in
14
+ * the pattern library.
15
+ */
16
+
17
+ import { getAllPatterns } from './pattern-library.js';
18
+
19
+ /**
20
+ * Evaluate whether a generation should be promoted to a named pattern.
21
+ *
22
+ * @param {object} opts
23
+ * @param {number} opts.score — Validation score (0-100)
24
+ * @param {object} [opts.feedback] — User feedback { rating, userEdited }
25
+ * @param {object} [opts.alignment] — Intent alignment { score }
26
+ * @param {string[]} opts.componentTypes — Component types used in generation
27
+ * @param {string} opts.intent — Original intent
28
+ * @returns {{ shouldPromote: boolean, confidence: number, reason: string, suggestedName: string }}
29
+ */
30
+ export function evaluatePromotion({ score, feedback, alignment, componentTypes, intent }) {
31
+ let confidence = 0;
32
+ const reasons = [];
33
+
34
+ // ── Hard gates ──
35
+ if (score < 95) {
36
+ return { shouldPromote: false, confidence: 0, reason: `Score ${score} < 95`, suggestedName: '' };
37
+ }
38
+
39
+ // Score contribution (0.3 weight)
40
+ confidence += (score / 100) * 0.3;
41
+ reasons.push(`score: ${score}/100`);
42
+
43
+ // ── Feedback signals (0.3 weight) ──
44
+ if (feedback) {
45
+ if (feedback.rating >= 4) {
46
+ confidence += (feedback.rating / 5) * 0.2;
47
+ reasons.push(`rating: ${feedback.rating}/5`);
48
+ } else if (feedback.rating > 0) {
49
+ return { shouldPromote: false, confidence, reason: `Rating ${feedback.rating} < 4`, suggestedName: '' };
50
+ }
51
+
52
+ if (feedback.userEdited) {
53
+ confidence -= 0.15;
54
+ reasons.push('user edited (penalty)');
55
+ }
56
+
57
+ if (feedback.shouldBePattern) {
58
+ confidence += 0.15;
59
+ reasons.push('user flagged as pattern');
60
+ }
61
+ } else {
62
+ // No feedback — partial confidence from score alone
63
+ confidence += 0.1;
64
+ }
65
+
66
+ // ── Alignment signals (0.2 weight) ──
67
+ if (alignment) {
68
+ if (alignment.score >= 0.8) {
69
+ confidence += alignment.score * 0.2;
70
+ reasons.push(`alignment: ${Math.round(alignment.score * 100)}%`);
71
+ } else {
72
+ confidence -= 0.1;
73
+ reasons.push(`low alignment: ${Math.round(alignment.score * 100)}%`);
74
+ }
75
+ }
76
+
77
+ // ── Novelty bonus (0.2 weight) ──
78
+ const novelty = assessNovelty(componentTypes);
79
+ if (novelty > 0.5) {
80
+ confidence += novelty * 0.2;
81
+ reasons.push(`novelty: ${Math.round(novelty * 100)}%`);
82
+ }
83
+
84
+ // ── Decision ──
85
+ const shouldPromote = confidence >= 0.5;
86
+ const suggestedName = generatePatternName(intent);
87
+
88
+ return {
89
+ shouldPromote,
90
+ confidence: Math.round(confidence * 100) / 100,
91
+ reason: reasons.join(', '),
92
+ suggestedName,
93
+ };
94
+ }
95
+
96
+ /**
97
+ * Assess how novel a component combination is compared to existing patterns.
98
+ * Higher = more novel (not well-covered by existing patterns).
99
+ *
100
+ * @param {string[]} componentTypes
101
+ * @returns {number} — 0 (duplicate) to 1 (completely novel)
102
+ */
103
+ function assessNovelty(componentTypes) {
104
+ const existing = getAllPatterns();
105
+ if (existing.length === 0) return 1;
106
+
107
+ const typeSet = new Set(componentTypes);
108
+ let bestOverlap = 0;
109
+
110
+ for (const pattern of existing) {
111
+ const patternTypes = new Set(pattern.components || []);
112
+ const intersection = [...typeSet].filter(t => patternTypes.has(t)).length;
113
+ const union = new Set([...typeSet, ...patternTypes]).size;
114
+ const jaccard = union > 0 ? intersection / union : 0;
115
+ bestOverlap = Math.max(bestOverlap, jaccard);
116
+ }
117
+
118
+ // Invert: high overlap = low novelty
119
+ return 1 - bestOverlap;
120
+ }
121
+
122
+ /**
123
+ * Generate a suggested pattern name from an intent string.
124
+ *
125
+ * @param {string} intent
126
+ * @returns {string}
127
+ */
128
+ function generatePatternName(intent) {
129
+ return (intent || 'unnamed')
130
+ .toLowerCase()
131
+ .replace(/\b(create|build|make|show|add|generate|a|an|the|with|and|for|of)\b/g, '')
132
+ .replace(/[^a-z0-9]+/g, '-')
133
+ .replace(/^-|-$/g, '')
134
+ .slice(0, 40) || 'unnamed-pattern';
135
+ }
@@ -0,0 +1,211 @@
1
+ /**
2
+ * Prompt Analyzer — baseline ingestion stage for the gen-ui pipeline.
3
+ *
4
+ * Every user intent enters through here, regardless of engine (monolithic,
5
+ * zettel) or mode (instant, pro, thinking). The analyzer extracts structured
6
+ * signals from the raw prompt — what concepts the user is gesturing at, which
7
+ * AdiaUI components are implied, what implicit requirements a good designer
8
+ * would add — and produces a "steelmanned" enriched brief that downstream
9
+ * stages reason against.
10
+ *
11
+ * Output schema:
12
+ * {
13
+ * raw: string // user's original intent, verbatim
14
+ * concepts: string[] // 2-5 short concept tags ("commerce", "auth", "data-display")
15
+ * entities: string[] // explicit nouns from the prompt ("image", "title", "price")
16
+ * impliedComponents: string[] // 4-12 AdiaUI component names ("Card", "Image", "Button")
17
+ * styleHints: string[] // visual/style hints ("3D", "minimal", "dense"), [] if none
18
+ * steelman: string // 1-2 sentence enriched brief
19
+ * analyzed: boolean // true if LLM call succeeded; false if fell back to passthrough
20
+ * }
21
+ *
22
+ * Failure mode: if the LLM call throws or returns unparseable JSON, we
23
+ * gracefully degrade to a passthrough analysis (steelman = raw intent, no
24
+ * extracted signals). Generation continues; quality may suffer but nothing
25
+ * breaks. See diagnosis report 2026-04-19 for why silent failure is the right
26
+ * default here (vs. raising) — the analyzer is enrichment, not a gate.
27
+ *
28
+ * Latency budget: one Haiku call, typically 800-1500ms. This is the new floor
29
+ * for every generation regardless of mode.
30
+ */
31
+
32
+ // Component vocabulary derived from the catalog. Two artifacts:
33
+ // - displayList: human-readable list shown to the LLM, with aliases inlined
34
+ // ("Swiper (also: Carousel, Slideshow)") so the LLM knows it can use any.
35
+ // - allowedSet: canonical AND alias names accepted in impliedComponents,
36
+ // because the runtime registry already maps aliases to the right tag.
37
+ // Loaded once at module init from the catalog. Changes require server restart.
38
+ let _vocab = null;
39
+ async function getVocab() {
40
+ if (_vocab) return _vocab;
41
+ let catalog = {};
42
+ try {
43
+ const IS_NODE = typeof process !== 'undefined' && process.versions?.node;
44
+ if (IS_NODE) {
45
+ const fs = await import(/* @vite-ignore */ 'node:fs/promises');
46
+ const path = await import(/* @vite-ignore */ 'node:path');
47
+ const url = await import(/* @vite-ignore */ 'node:url');
48
+ const __dirname = path.dirname(url.fileURLToPath(import.meta.url));
49
+ const raw = await fs.readFile(path.join(__dirname, '../corpus/patterns/_components.json'), 'utf8');
50
+ catalog = JSON.parse(raw);
51
+ } else {
52
+ const resp = await fetch(new URL('../corpus/patterns/_components.json', import.meta.url));
53
+ if (resp.ok) catalog = await resp.json();
54
+ }
55
+ } catch { /* empty vocab — analyzer will still work, just unconstrained */ }
56
+
57
+ const displayList = [];
58
+ const allowedSet = new Set();
59
+ for (const [name, data] of Object.entries(catalog)) {
60
+ allowedSet.add(name);
61
+ const aliases = Array.isArray(data?.aliases) ? data.aliases : [];
62
+ for (const a of aliases) allowedSet.add(a);
63
+ displayList.push(aliases.length ? `${name} (also: ${aliases.join(', ')})` : name);
64
+ }
65
+ _vocab = { displayList, allowedSet };
66
+ return _vocab;
67
+ }
68
+
69
+ /**
70
+ * Analyze a user prompt — extract concepts, entities, implied components,
71
+ * style hints, and produce a steelmanned brief.
72
+ *
73
+ * @param {object} opts
74
+ * @param {string} opts.intent — Raw user intent
75
+ * @param {object} [opts.llmAdapter] — LLM adapter (must implement .complete())
76
+ * @param {object} [opts.domain] — Domain classification from domain-router (optional context)
77
+ * @returns {Promise<object>} Analysis object (see module docstring for schema)
78
+ */
79
+ export async function analyzePrompt({ intent, llmAdapter, domain }) {
80
+ const passthrough = {
81
+ raw: intent,
82
+ concepts: [],
83
+ entities: [],
84
+ impliedComponents: [],
85
+ styleHints: [],
86
+ steelman: intent,
87
+ analyzed: false,
88
+ };
89
+
90
+ if (!intent || !llmAdapter) return passthrough;
91
+
92
+ const { displayList, allowedSet } = await getVocab();
93
+ const componentList = displayList.length
94
+ ? displayList.join(', ')
95
+ : 'Card, Section, Header, Footer, Column, Row, Grid, Button, Input, Text, Image, Badge, Icon, Avatar, Alert, Modal, Tabs, Accordion';
96
+
97
+ const domainHint = domain?.domain && domain.confidence > 0
98
+ ? `\nDomain classifier suggests: ${domain.domain} (confidence ${domain.confidence})`
99
+ : '';
100
+
101
+ const systemPrompt = `You are a UI brief enricher for the AdiaUI design system. Given a terse or under-specified user prompt, you extract structured signals and produce an enriched brief that a downstream UI generator can use to produce a polished, complete UI.
102
+
103
+ Your output is JSON with this exact shape — no markdown, no explanation:
104
+ {
105
+ "concepts": [string], // 2-5 short concept tags (e.g. "commerce", "authentication", "data-display", "navigation")
106
+ "entities": [string], // explicit nouns from the prompt (lowercase, singular)
107
+ "implied_components": [string],// 4-12 AdiaUI components a good designer would include
108
+ "style_hints": [string], // visual/style adjectives ("minimal", "dense", "3D", "playful"), [] if none stated
109
+ "steelman": string // 1-2 sentence enriched brief
110
+ }
111
+
112
+ AdiaUI components you may name in implied_components (use these names exactly): ${componentList}
113
+
114
+ Rules:
115
+ - Extract entities verbatim from the prompt — do not invent.
116
+ - implied_components should include both the obvious (Card, Button) AND the ones a thoughtful designer would add (Badge for sale tag on a product, Avatar for a user row, EmptyState for empty data).
117
+ - The steelman expands the prompt into what the user MEANT, not what they SAID. Add the implicit requirements: typical sub-elements, sensible defaults, a header, a footer, common variants. Do NOT add scope the user wouldn't want (e.g. don't turn "login form" into "login form with social auth" unless that's a near-universal expectation).
118
+ - Keep steelman under 250 characters.
119
+ - If the prompt is already well-specified (>15 words, multiple entities), the steelman is mostly a faithful restatement.
120
+
121
+ Examples:
122
+ Input: "product card"
123
+ Output: {"concepts":["commerce","product-display"],"entities":["product"],"implied_components":["Card","Image","Text","Badge","Button"],"style_hints":[],"steelman":"E-commerce product card with image, title, price, optional sale badge, and primary 'Add to cart' button. Card layout, single product."}
124
+
125
+ Input: "settings page with toggles for notifications, dark mode, and privacy"
126
+ Output: {"concepts":["settings","preferences"],"entities":["notifications","dark mode","privacy","toggles"],"implied_components":["Card","Header","Section","Toggle","Text","Divider"],"style_hints":[],"steelman":"Settings page with three toggle rows for notifications, dark mode, and privacy. Each row has a label, description, and toggle. Grouped under a header card."}
127
+
128
+ Input: "make it 3D"
129
+ Output: {"concepts":["style-modifier"],"entities":[],"implied_components":[],"style_hints":["3D","depth","shadow"],"steelman":"Modify the existing canvas to feel three-dimensional — add elevation, layered shadows, and depth-suggesting transforms. Apply to existing components without changing structure."}`;
130
+
131
+ const userMsg = `Prompt: "${intent}"${domainHint}`;
132
+
133
+ let response;
134
+ try {
135
+ response = await llmAdapter.complete({
136
+ messages: [{ role: 'user', content: userMsg }],
137
+ systemPrompt,
138
+ });
139
+ } catch {
140
+ return passthrough;
141
+ }
142
+
143
+ const text = response?.content;
144
+ if (!text) return passthrough;
145
+
146
+ // Truncation: if the analyzer LLM hit max_tokens we'd rather use the raw
147
+ // prompt than a half-parsed analysis that misleads downstream stages.
148
+ if (response.stopReason && response.stopReason !== 'end' && response.stopReason !== 'end_turn' && response.stopReason !== 'stop') {
149
+ return passthrough;
150
+ }
151
+
152
+ // Try direct parse, then code-fence extraction, then outermost JSON.
153
+ let parsed = null;
154
+ try { parsed = JSON.parse(text.trim()); } catch { /* fall through */ }
155
+ if (!parsed) {
156
+ const fence = text.match(/```(?:json)?\s*\n?([\s\S]*?)```/);
157
+ if (fence) try { parsed = JSON.parse(fence[1].trim()); } catch { /* fall through */ }
158
+ }
159
+ if (!parsed) {
160
+ const start = text.indexOf('{');
161
+ const end = text.lastIndexOf('}');
162
+ if (start >= 0 && end > start) {
163
+ try { parsed = JSON.parse(text.slice(start, end + 1)); } catch { /* fall through */ }
164
+ }
165
+ }
166
+ if (!parsed || typeof parsed !== 'object') return passthrough;
167
+
168
+ // Constrain impliedComponents to the allowed vocab (canonical names AND
169
+ // their aliases — runtime registry maps aliases to the right tag, and the
170
+ // LLM was told it can use either, so both are accepted here).
171
+ const impliedComponents = Array.isArray(parsed.implied_components)
172
+ ? parsed.implied_components.filter(c => typeof c === 'string' && (allowedSet.size === 0 || allowedSet.has(c)))
173
+ : [];
174
+
175
+ return {
176
+ raw: intent,
177
+ concepts: asStringArray(parsed.concepts).slice(0, 8),
178
+ entities: asStringArray(parsed.entities).slice(0, 16),
179
+ impliedComponents: impliedComponents.slice(0, 16),
180
+ styleHints: asStringArray(parsed.style_hints).slice(0, 8),
181
+ steelman: (typeof parsed.steelman === 'string' && parsed.steelman.trim()) || intent,
182
+ analyzed: true,
183
+ };
184
+ }
185
+
186
+ function asStringArray(v) {
187
+ if (!Array.isArray(v)) return [];
188
+ return v.filter(x => typeof x === 'string' && x.trim().length > 0).map(x => x.trim());
189
+ }
190
+
191
+ /**
192
+ * Format an analysis object for inclusion in a downstream system prompt.
193
+ * Keeps the shape compact so it doesn't blow the context budget.
194
+ */
195
+ export function formatAnalysisForPrompt(analysis) {
196
+ if (!analysis || !analysis.analyzed) return '';
197
+ const parts = [];
198
+ if (analysis.steelman && analysis.steelman !== analysis.raw) {
199
+ parts.push(`ENRICHED BRIEF: ${analysis.steelman}`);
200
+ }
201
+ if (analysis.impliedComponents?.length) {
202
+ parts.push(`EXPECTED COMPONENTS: ${analysis.impliedComponents.join(', ')}`);
203
+ }
204
+ if (analysis.concepts?.length) {
205
+ parts.push(`CONCEPTS: ${analysis.concepts.join(', ')}`);
206
+ }
207
+ if (analysis.styleHints?.length) {
208
+ parts.push(`STYLE HINTS: ${analysis.styleHints.join(', ')}`);
209
+ }
210
+ return parts.join('\n');
211
+ }