@adia-ai/a2ui-compose 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,229 @@
1
+ /**
2
+ * @adia-ai/a2ui-compose — monolithic engine, instant mode.
3
+ *
4
+ * Extracted from engine/generator.js per spec §11 Phase 2. Shares state
5
+ * (ArtifactStore, PipelineEngine) via engine/state.js. Pure helpers live
6
+ * in engines/monolithic/_shared.js.
7
+ */
8
+
9
+ import { validateSchema } from '../../../validator/validator.js';
10
+ import { getContext, searchBlocks, lookupDomain } from '../../engine/reference.js';
11
+ import { assessClarity } from '../../../retrieval/clarity.js';
12
+ import { feedbackStore } from '../../../retrieval/feedback-store.js';
13
+ import { store, engine } from '../../engine/state.js';
14
+ import { isRecording } from '../../../retrieval/dialog-recorder.js';
15
+ import {
16
+ generateSuggestions,
17
+ } from './_shared.js';
18
+
19
+ export async function generateInstant({ intent, executionId, storeId, analysis, priorComponentsFromPayload }) {
20
+ // Forward-compat: instant mode is pattern-match-only and doesn't iterate
21
+ // on a prior canvas yet. Accept the param so callers can pass uniformly.
22
+ void priorComponentsFromPayload;
23
+ const execId = executionId;
24
+ // Use the analyzer's steelman as the search query for denser keyword hits.
25
+ // Pattern matching is deterministic, so the analysis lifts retrieval quality
26
+ // without changing any other instant-mode behavior.
27
+ const searchQuery = analysis?.steelman || intent;
28
+
29
+ // ── Interpret stage ──
30
+ const domain = lookupDomain(intent);
31
+ engine.submitStage(execId, 'interpret', {
32
+ domain,
33
+ intent,
34
+ confidence: domain.confidence,
35
+ });
36
+
37
+ // ── Clarity gate (defense-in-depth) ──
38
+ if (domain.confidence === 0) {
39
+ const clarity = assessClarity(intent, domain);
40
+ if (!clarity.clear) {
41
+ engine.submitStage(execId, 'clarify', { clarity, reason: 'no-ui-signals' });
42
+ return {
43
+ executionId: storeId,
44
+ messages: [],
45
+ validation: { score: 0, errors: [], warnings: ['Intent does not describe a UI component'] },
46
+ suggestions: clarity.questions.map(q => q.text),
47
+ clarify: {
48
+ needed: true,
49
+ questions: clarity.questions,
50
+ score: clarity.score,
51
+ dimensions: clarity.dimensions,
52
+ summary: clarity.summary,
53
+ },
54
+ pipeline: engine.getState(execId),
55
+ };
56
+ }
57
+ }
58
+
59
+ // ── Analyze stage ──
60
+ const context = await getContext(intent, 1);
61
+ engine.submitStage(execId, 'analyze', {
62
+ context,
63
+ componentCount: context.components.length,
64
+ patternCount: context.patterns.length,
65
+ confidence: context.patterns.length > 0 ? 0.9 : 0.6,
66
+ });
67
+
68
+ // ── Plan stage ──
69
+ const patterns = searchBlocks(searchQuery, { domain: domain.domain });
70
+ // Quality gate: only use a pattern if an intent keyword matches a whole
71
+ // word in the pattern name. Prevents substring matches like "button" in
72
+ // "toolbar-buttons" from hijacking unrelated intents.
73
+ const GATE_STOPS = new Set(['the','and','with','for','from','that','this','its','are','all','can','has','each','show','using','based','into','like','make','your','type','just','only','also','more','most','some','very','much','many','will','about','been','when','they','them','what','would','could','should','different','simple','basic','custom','display','controls','group','selection','content','state']);
74
+ const intentWords = intent.toLowerCase().split(/\s+/).filter(w => w.length > 2 && !GATE_STOPS.has(w));
75
+ let bestMatch = patterns[0] || null;
76
+ if (bestMatch) {
77
+ const nameWords = bestMatch.name.toLowerCase().split(/[-_\s]+/);
78
+ const matchTags = (bestMatch.tags || []).map(t => t.toLowerCase());
79
+ const matchDomain = (bestMatch.domain || '').toLowerCase();
80
+ // Tiered matching: strong hit (exact word or 4+ char prefix), or weak hit (3-char word, domain match, substring)
81
+ // Strong hit = high confidence, weak hit = use as suggestion with lower confidence
82
+ const hasStrongHit = intentWords.some(w => {
83
+ if (w.length < 3) return false;
84
+ // Exact word match in name or tags (any length >= 3)
85
+ if (nameWords.includes(w) || matchTags.includes(w)) return true;
86
+ // Prefix match for longer words (4+ chars)
87
+ if (w.length >= 4) {
88
+ return nameWords.some(n => n.length >= 3 && (w.startsWith(n) || n.startsWith(w))) ||
89
+ matchTags.some(t => t.length >= 3 && (w.startsWith(t) || t.startsWith(w)));
90
+ }
91
+ return false;
92
+ });
93
+ const hasWeakHit = !hasStrongHit && intentWords.some(w => {
94
+ // Substring match — intent word appears inside a name/tag word or vice versa
95
+ return nameWords.some(n => n.length >= 3 && (n.includes(w) || w.includes(n))) ||
96
+ matchTags.some(t => t.length >= 3 && (t.includes(w) || w.includes(t))) ||
97
+ matchDomain.includes(w);
98
+ });
99
+ if (!hasStrongHit && !hasWeakHit) {
100
+ bestMatch = null;
101
+ }
102
+ }
103
+ engine.submitStage(execId, 'plan', {
104
+ strategy: bestMatch ? 'pattern-match' : 'fallback',
105
+ patternName: bestMatch?.name ?? null,
106
+ confidence: bestMatch ? 0.95 : 0.5,
107
+ });
108
+
109
+ // ── Generate stage ──
110
+ let components;
111
+ const suggestions = [];
112
+
113
+ if (bestMatch && bestMatch.template) {
114
+ components = bestMatch.template;
115
+ } else {
116
+ // No exact match — try to compose from partial matches
117
+ // Take top 2 patterns that passed domain filter (even if they failed the gate)
118
+ // and merge their component types into a reasonable Card layout
119
+ const partials = patterns.slice(0, 2);
120
+ const partialNames = partials.map(p => p.name);
121
+
122
+ if (partials.length > 0 && partials[0].template) {
123
+ // Use the best partial match as the base — it's imperfect but better than a stub
124
+ components = partials[0].template;
125
+ suggestions.push(`Adapted from "${partials[0].name}" pattern — may not be exact. Use "thinking" mode for better results.`);
126
+ } else {
127
+ // True fallback — build a card scaffolding from the intent
128
+ const titleText = intent.length > 50 ? intent.slice(0, 47) + '...' : intent;
129
+ components = [
130
+ { id: 'root', component: 'Card', children: ['hdr', 'sec'] },
131
+ { id: 'hdr', component: 'Header', children: ['title', 'desc'] },
132
+ { id: 'title', component: 'Text', slot: 'heading', textContent: titleText },
133
+ { id: 'desc', component: 'Text', slot: 'description', textContent: 'Generated from intent — use thinking mode for full fidelity.' },
134
+ { id: 'sec', component: 'Section', children: ['col'] },
135
+ { id: 'col', component: 'Column', children: ['body'], gap: '3' },
136
+ { id: 'body', component: 'Text', variant: 'body', textContent: `Describe what you'd like to see here.` },
137
+ ];
138
+ suggestions.push('Use "thinking" mode for LLM-powered generation');
139
+ if (partialNames.length > 0) {
140
+ suggestions.push(`Closest patterns: ${partialNames.join(', ')}`);
141
+ }
142
+ }
143
+ }
144
+
145
+ const messages = [
146
+ {
147
+ type: 'updateComponents',
148
+ surfaceId: 'default',
149
+ components,
150
+ },
151
+ ];
152
+
153
+ // Include wireComponents if the matched pattern has wiring data
154
+ if (bestMatch?.wiring) {
155
+ messages.push({
156
+ type: 'wireComponents',
157
+ surfaceId: 'default',
158
+ ...(bestMatch.wiring.data_sources ? { data: { sources: bestMatch.wiring.data_sources } } : {}),
159
+ ...(bestMatch.wiring.controllers ? { state: { controllers: bestMatch.wiring.controllers } } : {}),
160
+ ...(bestMatch.wiring.actions ? { actions: bestMatch.wiring.actions } : {}),
161
+ });
162
+ }
163
+
164
+ engine.submitStage(execId, 'generate', {
165
+ messages,
166
+ source: bestMatch ? 'pattern-library' : 'fallback',
167
+ confidence: bestMatch ? 0.95 : 0.4,
168
+ });
169
+
170
+ // ── Validate stage ──
171
+ const validation = validateSchema(messages, { intent });
172
+ engine.submitStage(execId, 'validate', {
173
+ ...validation,
174
+ confidence: validation.score / 100,
175
+ });
176
+
177
+ // ── Render stage (record artifact) ──
178
+ // Store under storeId (previous execution) for multi-turn history continuity.
179
+ // First-turn analysis is recorded so iteration turns can recover concepts +
180
+ // implied components for canvas-aware suggestions.
181
+ const artifactId = storeId || execId;
182
+ store.record(artifactId, {
183
+ messages,
184
+ summary: intent,
185
+ intent,
186
+ validation,
187
+ analysis,
188
+ });
189
+
190
+ engine.submitStage(execId, 'render', {
191
+ stored: true,
192
+ confidence: 1,
193
+ });
194
+
195
+ // Canvas-aware suggestions — see generate-pro.js for rationale.
196
+ const originalAnalysis = store.getOriginalAnalysis(artifactId);
197
+ const originalIntent = store.getOriginalIntent(artifactId);
198
+ const domainSuggestions = generateSuggestions({ intent, domain, messages, originalAnalysis, originalIntent });
199
+ suggestions.push(...domainSuggestions);
200
+
201
+ // Fire-and-forget feedback logging
202
+ feedbackStore.logExecution({
203
+ executionId: artifactId, intent, mode: 'instant',
204
+ domain: domain.domain, patternMatch: bestMatch?.name,
205
+ score: validation?.score, componentCount: components.length,
206
+ }).catch(() => {});
207
+
208
+ return {
209
+ executionId: artifactId,
210
+ messages,
211
+ validation,
212
+ suggestions,
213
+ pipeline: engine.getState(execId),
214
+ // Retrieval surface for eval harness / diff tooling
215
+ patternName: bestMatch?.name ?? null,
216
+ strategy: bestMatch ? 'pattern-match' : 'fallback',
217
+ // Dialog-recorder hook. Instant mode has no LLM call, so systemPrompt /
218
+ // rawLLMResponse are null — the recorded value is the matched pattern,
219
+ // which is the only "reasoning" instant mode does.
220
+ _debug: isRecording() ? {
221
+ systemPrompt: null,
222
+ rawLLMResponse: null,
223
+ tokens: null,
224
+ patterns: patterns.slice(0, 8).map(p => ({ name: p.name, score: p.score, keywords: p.keywords })),
225
+ strategy: bestMatch ? 'pattern-match' : 'fallback',
226
+ patternName: bestMatch?.name || null,
227
+ } : undefined,
228
+ };
229
+ }
@@ -0,0 +1,367 @@
1
+ /**
2
+ * @adia-ai/a2ui-compose — monolithic engine, pro mode.
3
+ *
4
+ * Extracted from engine/generator.js per spec §11 Phase 2. Shares state
5
+ * (ArtifactStore, PipelineEngine) via engine/state.js. Pure helpers live
6
+ * in engines/monolithic/_shared.js.
7
+ */
8
+
9
+ import { validateSchema } from '../../../validator/validator.js';
10
+ import { getContext, searchBlocks, lookupDomain } from '../../engine/reference.js';
11
+ import { decomposeIntent } from '../../../retrieval/decomposer.js';
12
+ import { assessClarity } from '../../../retrieval/clarity.js';
13
+ import { isConversational } from '../../../retrieval/intent-gate.js';
14
+ import { feedbackStore } from '../../../retrieval/feedback-store.js';
15
+ import { store, engine } from '../../engine/state.js';
16
+ import { isRecording } from '../../../retrieval/dialog-recorder.js';
17
+ import {
18
+ buildSystemPrompt,
19
+ buildChatMessages,
20
+ mergeCanvasDiff,
21
+ buildCanvasDiffPrompt,
22
+ parseA2UIResponse,
23
+ buildRepairPrompt,
24
+ generateSuggestions,
25
+ } from './_shared.js';
26
+
27
+ export async function generatePro({ intent, executionId, storeId, llmAdapter, analysis, priorComponentsFromPayload }) {
28
+ const execId = executionId;
29
+
30
+ // The steelman is the enriched brief produced by the prompt-analyzer in
31
+ // the baseline ingestion stage. Use it for pattern search (denser query =
32
+ // better hits) and inject the analysis signals into the system prompt so
33
+ // the LLM sees the implied component set + concept tags. Falls back to raw
34
+ // intent when analyzer didn't run (iteration turns) or fell back.
35
+ const searchQuery = analysis?.steelman || intent;
36
+
37
+ // Header prepended to every user message sent to the LLM. Surfaces the
38
+ // analyzer's structured signals so the LLM treats steelman + components +
39
+ // concepts as ground truth instead of having to re-derive them from a 3-word
40
+ // prompt. Empty string when no analyzer signal — no behavior change.
41
+ const analysisHint = (analysis?.analyzed && (
42
+ analysis.steelman !== analysis.raw ||
43
+ analysis.impliedComponents?.length ||
44
+ analysis.concepts?.length
45
+ ))
46
+ ? [
47
+ analysis.steelman && analysis.steelman !== analysis.raw ? `BRIEF: ${analysis.steelman}` : null,
48
+ analysis.impliedComponents?.length ? `EXPECTED COMPONENTS: ${analysis.impliedComponents.join(', ')}` : null,
49
+ analysis.concepts?.length ? `CONCEPTS: ${analysis.concepts.join(', ')}` : null,
50
+ analysis.styleHints?.length ? `STYLE: ${analysis.styleHints.join(', ')}` : null,
51
+ ].filter(Boolean).join('\n') + '\n\n'
52
+ : '';
53
+
54
+ // ── Stage 1: Interpret ──
55
+ const domain = lookupDomain(intent);
56
+ engine.submitStage(execId, 'interpret', { domain, intent, confidence: domain.confidence });
57
+
58
+ // ── Clarity gate (defense-in-depth) ──
59
+ // If intent has zero domain signals and clarity is below threshold,
60
+ // return a clarification response instead of hallucinating UI.
61
+ // This catches cases where the intent gate upstream didn't intercept
62
+ // (e.g. direct API calls bypassing isConversational).
63
+ // EXCEPTION: skip on iteration turns. Refinements like "make it 3D" or
64
+ // "remove the avatar" have zero domain signals but are valid modifications
65
+ // of the prior canvas — they should pass straight through to the LLM with
66
+ // history context. Iteration is detected by the presence of prior turns in
67
+ // the artifact store keyed by storeId. Same intent as F1 in generator.js.
68
+ const priorTurnCount = store.getAll(storeId || execId)?.length || 0;
69
+ const isIteration = priorTurnCount > 0;
70
+ const clarity = assessClarity(intent, domain);
71
+ if (!isIteration && !clarity.clear && domain.confidence === 0) {
72
+ // Note: 'clarify' is not a registered pipeline stage, so we skip submitStage
73
+ // here to avoid throwing — the clarification surfaces in the return payload.
74
+ return {
75
+ executionId: storeId,
76
+ messages: [],
77
+ validation: { score: 0, errors: [], warnings: ['Intent does not describe a UI component'] },
78
+ suggestions: clarity.questions.map(q => q.text),
79
+ clarify: {
80
+ needed: true,
81
+ questions: clarity.questions,
82
+ score: clarity.score,
83
+ dimensions: clarity.dimensions,
84
+ summary: clarity.summary,
85
+ },
86
+ pipeline: engine.getState(execId),
87
+ };
88
+ }
89
+
90
+ // ── Stage 2: Analyze ──
91
+ const context = await getContext(intent, 2);
92
+ engine.submitStage(execId, 'analyze', {
93
+ context, componentCount: context.components.length, patternCount: context.patterns.length,
94
+ confidence: 0.85,
95
+ });
96
+
97
+ // ── Stage 3: Plan — search for best pattern ──
98
+ const patterns = searchBlocks(searchQuery, { domain: domain.domain });
99
+ const bestPattern = patterns[0];
100
+
101
+ // ── Compositional reasoning: decompose multi-section intents ──
102
+ // ALWAYS attempt decomposition for compound intents — even when a single
103
+ // best pattern matches. This lets the LLM receive fragment patterns for
104
+ // each sub-section (e.g. "invoice table", "payment form") alongside the
105
+ // overall best pattern, producing richer multi-section output.
106
+ let fragmentPatterns = [];
107
+ let decomposition = decomposeIntent(intent);
108
+
109
+ // Determine if intent is compound: decomposer says so, OR 3+ meaningful words
110
+ const meaningfulWords = intent.replace(/\b(a|an|the|with|and|or|for|of|in|on|to|by)\b/gi, '').trim().split(/\s+/).filter(w => w.length > 2);
111
+ const isCompound = (decomposition.shouldDecompose && decomposition.subtasks.length >= 2)
112
+ || meaningfulWords.length >= 3;
113
+
114
+ if (isCompound && decomposition.subtasks.length >= 2) {
115
+ for (const subtask of decomposition.subtasks) {
116
+ const subPatterns = searchBlocks(subtask.intent, { domain: domain.domain });
117
+ const topMatch = subPatterns[0];
118
+ if (topMatch?.template) {
119
+ fragmentPatterns.push({
120
+ label: subtask.label,
121
+ subtaskIntent: subtask.intent,
122
+ pattern: topMatch,
123
+ });
124
+ }
125
+ }
126
+ }
127
+
128
+ const hasFragments = fragmentPatterns.length >= 2;
129
+ engine.submitStage(execId, 'plan', {
130
+ strategy: bestPattern?.template ? (hasFragments ? 'adapt-with-fragments' : 'adapt-pattern') : hasFragments ? 'compose-fragments' : 'generate-fresh',
131
+ patternName: bestPattern?.name ?? null,
132
+ fragments: hasFragments ? fragmentPatterns.map(f => f.pattern.name) : undefined,
133
+ layout: hasFragments ? decomposition?.layout : undefined,
134
+ confidence: bestPattern?.template ? 0.9 : hasFragments ? 0.85 : 0.7,
135
+ });
136
+
137
+ // ── Stage 4: Generate ──
138
+
139
+ // Check if this is a multi-turn iteration with existing canvas state.
140
+ // This runs BEFORE branching so ALL paths (adapt, compose, generate-fresh)
141
+ // can inject the current canvas when iterating.
142
+ // Priority order:
143
+ // 1. priorComponentsFromPayload — client-owned canvas (stateless path)
144
+ // 2. ArtifactStore lookup via storeId — server-stateful path (legacy)
145
+ // The payload path wins when both present so the client is the source of
146
+ // truth (it knows its current canvas; the store may be stale, e.g. after
147
+ // a server restart or when iterating across tabs).
148
+ let priorComponents = [];
149
+ if (Array.isArray(priorComponentsFromPayload) && priorComponentsFromPayload.length > 0) {
150
+ priorComponents = priorComponentsFromPayload;
151
+ } else {
152
+ const prevTurns = store.getAll(storeId || execId);
153
+ if (prevTurns && prevTurns.length > 0) {
154
+ const lastTurn = prevTurns[prevTurns.length - 1];
155
+ priorComponents = lastTurn.messages?.flatMap(m => m.components || []) || [];
156
+ }
157
+ }
158
+ const hasPriorCanvas = priorComponents.length > 0;
159
+
160
+ // ── Drift guard: detect scope creep and inject guardrails into prompt ──
161
+ const preDriftMetrics = hasPriorCanvas ? store.getDriftMetrics(storeId || execId) : null;
162
+ const originalIntent = preDriftMetrics?.originalIntent || null;
163
+ const driftGuard = preDriftMetrics?.driftScore >= 0.4
164
+ ? `\n\nSCOPE GUARD: This canvas has grown significantly from the original brief ("${preDriftMetrics.originalIntent}"). Component count: ${priorComponents.length}. Only make changes the user EXPLICITLY requested. Do NOT add new sections, cards, or structural elements beyond what the user asked for in this turn.`
165
+ : '';
166
+
167
+ let messages, systemPrompt;
168
+ // Captured for the dialog recorder when ADIA_LOG_DIALOGS is on. Holds the
169
+ // raw text from whichever LLM branch produced the final result. No-op when
170
+ // recording is off — the assignments are cheap; the size cost only matters
171
+ // when the recorder actually persists them to disk.
172
+ let lastRawResponse = null;
173
+ let lastTokens = null;
174
+ if (bestPattern && bestPattern.template) {
175
+ // PRO PATH: Adapt the matched pattern via LLM
176
+ systemPrompt = await buildSystemPrompt(context, patterns, '', intent);
177
+
178
+ const adaptPrompt = hasPriorCanvas
179
+ ? `Adapt this UI for the intent: "${intent}"${driftGuard}
180
+
181
+ ${buildCanvasDiffPrompt(intent, priorComponents, { originalIntent })}`
182
+ : hasFragments
183
+ ? `Adapt this UI for the intent: "${intent}"
184
+
185
+ BASE TEMPLATE (use for overall structure — modify, don't generate from scratch):
186
+ ${JSON.stringify(bestPattern.template, null, 2)}
187
+
188
+ Pattern: "${bestPattern.name}" — ${bestPattern.description}
189
+
190
+ FRAGMENT PATTERNS — incorporate these for specific sections:
191
+ ${fragmentPatterns.map(f => `## ${f.label} (from "${f.pattern.name}" pattern)\n${JSON.stringify(f.pattern.template, null, 1)}`).join('\n\n')}
192
+
193
+ Instructions:
194
+ - Use the base pattern for overall structure and layout
195
+ - Incorporate the fragment patterns above for their respective sections
196
+ - Change text content, labels, icons to match the new intent
197
+ - Add or remove components as needed for the intent
198
+ - Preserve slot attributes and Card anatomy rules
199
+ - Generate a COMPLETE, richly detailed UI with 20-40 components minimum
200
+ - Each section from the intent should have its own Card with Header, Section, and Footer
201
+ - Include realistic text content, labels, and placeholder data — not generic text
202
+ - Output ONLY the JSON array, no explanation`
203
+ : `Adapt this UI for the intent: "${intent}"
204
+
205
+ BASE TEMPLATE (modify this, don't generate from scratch):
206
+ ${JSON.stringify(bestPattern.template, null, 2)}
207
+
208
+ Pattern: "${bestPattern.name}" — ${bestPattern.description}
209
+
210
+ Instructions:
211
+ - Keep the structural skeleton (Card > Header + Section + Footer, nesting, layout)
212
+ - Change text content, labels, icons to match the new intent
213
+ - Add or remove components as needed for the intent
214
+ - Preserve slot attributes and Card anatomy rules
215
+ - Generate at least 15 components. Expand the template with additional fields, buttons, and content as appropriate for the intent.
216
+ - Output ONLY the JSON array, no explanation`;
217
+
218
+ const response = await llmAdapter.complete({
219
+ messages: [{ role: 'user', content: analysisHint + adaptPrompt }],
220
+ systemPrompt,
221
+ });
222
+ messages = parseA2UIResponse(response.content, { executionId: execId, mode: 'pro', intent, stopReason: response.stopReason });
223
+ if (isRecording()) { lastRawResponse = response.content; lastTokens = response.usage || null; }
224
+ } else if (hasFragments) {
225
+ // COMPOSE PATH: multiple fragment patterns found for subtasks — compose in a single LLM call
226
+ systemPrompt = await buildSystemPrompt(context, patterns, '', intent, { fragmentPatterns, decomposition });
227
+
228
+ const fragmentContext = fragmentPatterns.map(f =>
229
+ `## ${f.label} (from "${f.pattern.name}" pattern)\n${JSON.stringify(f.pattern.template, null, 1)}`
230
+ ).join('\n\n');
231
+
232
+ const layoutHint = decomposition.layout
233
+ ? `Use a ${decomposition.layout.component} as the root layout container.`
234
+ : 'Use a Column as the root layout container.';
235
+
236
+ const canvasContext = hasPriorCanvas
237
+ ? `\n${buildCanvasDiffPrompt(intent, priorComponents, { originalIntent })}${driftGuard}\n`
238
+ : '';
239
+
240
+ const composePrompt = `Compose a UI for: "${intent}"
241
+ ${canvasContext}
242
+ This intent has ${fragmentPatterns.length} sections. ${layoutHint}
243
+
244
+ FRAGMENT PATTERNS — use these as building blocks for each section:
245
+
246
+ ${fragmentContext}
247
+
248
+ Instructions:
249
+ - ${hasPriorCanvas ? 'Return ONLY diff objects (add/modify/delete) — unchanged components are preserved automatically' : 'Combine ALL fragments into a single cohesive UI under one root'}
250
+ - Re-id all components to avoid collisions (e.g., prefix with section name)
251
+ - ${decomposition.layout?.component === 'Tabs' ? 'Use Tabs as root with Tab children for each section — content panels are SIBLINGS of Tabs, not children' : `Use ${decomposition.layout?.component || 'Column'} as root, with each section as a child`}
252
+ - Adapt text content, labels, and icons to match the overall intent
253
+ - Add a page-level header above the sections if appropriate
254
+ - Preserve slot attributes and Card anatomy rules from each fragment
255
+ - Generate a COMPLETE, richly detailed UI with 20-40 components minimum. Each section should have its own Card with Header, Section, and Footer.
256
+ - Include realistic text content, labels, placeholder data, and all fields/buttons a real production UI would have.
257
+ - Output ONLY the JSON array, no explanation`;
258
+
259
+ const response = await llmAdapter.complete({
260
+ messages: [{ role: 'user', content: analysisHint + composePrompt }],
261
+ systemPrompt,
262
+ });
263
+ messages = parseA2UIResponse(response.content, { executionId: execId, mode: 'pro', intent, stopReason: response.stopReason });
264
+ if (isRecording()) { lastRawResponse = response.content; lastTokens = response.usage || null; }
265
+ } else {
266
+ // No pattern — fall through to full thinking-style generation
267
+ systemPrompt = await buildSystemPrompt(context, patterns, '', intent);
268
+
269
+ if (hasPriorCanvas) {
270
+ // Multi-turn: inject current canvas as explicit iteration context
271
+ const iteratePrompt = `Modify this UI for the intent: "${intent}"${driftGuard}
272
+
273
+ ${buildCanvasDiffPrompt(intent, priorComponents, { originalIntent })}`;
274
+
275
+ const response = await llmAdapter.complete({
276
+ messages: [{ role: 'user', content: analysisHint + iteratePrompt }],
277
+ systemPrompt,
278
+ });
279
+ messages = parseA2UIResponse(response.content, { executionId: execId, intent, mode: 'pro', stopReason: response.stopReason });
280
+ if (isRecording()) { lastRawResponse = response.content; lastTokens = response.usage || null; }
281
+ } else {
282
+ const chatMessages = buildChatMessages(intent, storeId || execId);
283
+ const response = await llmAdapter.complete({ messages: chatMessages, systemPrompt });
284
+ messages = parseA2UIResponse(response.content, { executionId: execId, intent, mode: 'pro', stopReason: response.stopReason });
285
+ if (isRecording()) { lastRawResponse = response.content; lastTokens = response.usage || null; }
286
+ }
287
+ }
288
+
289
+ // ── Canvas diff merge: apply LLM diff response onto prior canvas ──
290
+ // Only merge when using diff format (large canvases ≥50 components).
291
+ // For small canvases, the LLM was asked to return the COMPLETE modified array.
292
+ if (hasPriorCanvas && priorComponents.length >= 50 && messages && messages.length > 0) {
293
+ for (const msg of messages) {
294
+ if (msg.type === 'updateComponents' && msg.components) {
295
+ msg.components = mergeCanvasDiff(priorComponents, msg.components);
296
+ }
297
+ }
298
+ }
299
+
300
+ engine.submitStage(execId, 'generate', {
301
+ messages, source: bestPattern ? (hasFragments ? 'adapt-with-fragments' : 'adapt') : hasFragments ? 'compose' : 'llm', confidence: 0.85,
302
+ fragments: hasFragments ? fragmentPatterns.length : undefined,
303
+ });
304
+
305
+ // ── Stage 5: Validate + repair ──
306
+ let validation = validateSchema(messages, { intent });
307
+ if (!validation.valid) {
308
+ try {
309
+ const failedChecks = validation.checks.filter(c => !c.passed);
310
+ const repairResponse = await llmAdapter.complete({
311
+ messages: [{ role: 'user', content: buildRepairPrompt(failedChecks, messages) }],
312
+ systemPrompt,
313
+ });
314
+ messages = parseA2UIResponse(repairResponse.content, { executionId: execId, intent, mode: 'pro', stopReason: repairResponse.stopReason });
315
+ validation = validateSchema(messages, { intent });
316
+ } catch { /* keep original */ }
317
+ }
318
+ engine.submitStage(execId, 'validate', { ...validation, confidence: validation.score / 100 });
319
+
320
+ // ── Stage 6: Store ──
321
+ // First-turn analysis is recorded so iteration turns can recover concepts +
322
+ // implied components for canvas-aware suggestions. The store keeps only the
323
+ // first turn's analysis value (later passes are nulls — by design).
324
+ const artifactId = storeId || execId;
325
+ store.record(artifactId, { messages, summary: intent, validation, intent, analysis });
326
+ engine.submitStage(execId, 'render', { stored: true, success: validation.valid });
327
+
328
+ // Pull the original analyzer output so suggestions stay anchored to the
329
+ // initial canvas concept even on iteration turns. Without this, chip
330
+ // clicks drift the suggestion pool away from the original intent.
331
+ const originalAnalysis = store.getOriginalAnalysis(artifactId);
332
+ const suggestions = generateSuggestions({ intent, domain, messages, originalAnalysis, originalIntent });
333
+
334
+ // ── Drift detection: warn when canvas structure diverges significantly ──
335
+ const driftMetrics = store.getDriftMetrics(artifactId);
336
+ if (driftMetrics?.warning) {
337
+ console.warn(`[generateUI:pro] ${driftMetrics.warning}`);
338
+ }
339
+
340
+ // Fire-and-forget feedback logging
341
+ feedbackStore.logExecution({
342
+ executionId: artifactId, intent, mode: 'pro',
343
+ domain: domain.domain, patternMatch: bestPattern?.name,
344
+ composedFrom: hasFragments ? fragmentPatterns.map(f => f.pattern.name) : undefined,
345
+ score: validation?.score, componentCount: messages[0]?.components?.length || 0,
346
+ }).catch(() => {});
347
+
348
+ return {
349
+ executionId: artifactId,
350
+ messages,
351
+ validation,
352
+ suggestions,
353
+ drift: driftMetrics,
354
+ pipeline: engine.getState(execId),
355
+ // Debug payload for the dialog recorder. Only populated when
356
+ // ADIA_LOG_DIALOGS is on (the engine's capture sites guard with
357
+ // isRecording()). Underscore-prefixed so consumers can ignore it,
358
+ // and so it never accidentally appears in serialised API payloads
359
+ // unless the proxy explicitly forwards it.
360
+ _debug: isRecording() ? {
361
+ systemPrompt,
362
+ rawLLMResponse: lastRawResponse,
363
+ tokens: lastTokens,
364
+ patterns: patterns.slice(0, 8).map(p => ({ name: p.name, score: p.score, keywords: p.keywords })),
365
+ } : undefined,
366
+ };
367
+ }