@adia-ai/a2ui-compose 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +86 -0
- package/README.md +181 -0
- package/engine/artifacts.js +262 -0
- package/engine/constitution.md +78 -0
- package/engine/context-store.js +218 -0
- package/engine/generator.js +500 -0
- package/engine/pattern-export.js +149 -0
- package/engine/pipeline/engine.js +289 -0
- package/engine/pipeline/types.js +91 -0
- package/engine/reference.js +115 -0
- package/engine/state.js +15 -0
- package/engines/monolithic/_shared.js +1320 -0
- package/engines/monolithic/generate-instant.js +229 -0
- package/engines/monolithic/generate-pro.js +367 -0
- package/engines/monolithic/generate-thinking.js +211 -0
- package/engines/registry.js +195 -0
- package/engines/zettel/_smoke.js +37 -0
- package/engines/zettel/composer.js +146 -0
- package/engines/zettel/fragment-library.js +209 -0
- package/engines/zettel/generate.js +15 -0
- package/engines/zettel/generator-adapter.js +202 -0
- package/engines/zettel/session-store.js +121 -0
- package/engines/zettel/synthesizer.js +343 -0
- package/evals/harness.mjs +193 -0
- package/index.js +16 -0
- package/llm/adapters/anthropic.js +106 -0
- package/llm/adapters/gemini.js +99 -0
- package/llm/adapters/index.js +138 -0
- package/llm/adapters/openai.js +85 -0
- package/llm/adapters/sse.js +50 -0
- package/llm/llm-bridge.js +214 -0
- package/llm/llm-stub.js +69 -0
- package/package.json +41 -0
- package/transpiler/transpiler-maps.js +277 -0
- package/transpiler/transpiler.js +820 -0
|
@@ -0,0 +1,500 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* generate_ui — UI generation from natural language intent.
|
|
3
|
+
*
|
|
4
|
+
* Four modes:
|
|
5
|
+
* instant — pattern matching, no LLM (synchronous)
|
|
6
|
+
* pro — pattern search + LLM adaptation (async, blocking)
|
|
7
|
+
* thinking — A003 intelligence + LLM adapter (async, blocking)
|
|
8
|
+
* stream — same as thinking but yields progressive updates via async generator
|
|
9
|
+
*
|
|
10
|
+
* Multi-turn: pass executionId from a previous run to iterate on the same surface.
|
|
11
|
+
* The previous A2UI output is fed back as assistant context so the LLM can refine.
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { validateSchema } from '../../validator/validator.js';
|
|
15
|
+
import { getContext, searchBlocks, searchBlocksSemantic, lookupDomain, listPatterns } from './reference.js';
|
|
16
|
+
import { store, engine } from './state.js';
|
|
17
|
+
import { checkIntentAlignment } from '../../retrieval/intent-alignment.js';
|
|
18
|
+
import { decomposeIntent, composeSubtasks } from '../../retrieval/decomposer.js';
|
|
19
|
+
import { getWiringCatalog } from '../../retrieval/wiring-catalog.js';
|
|
20
|
+
import { getComponentData } from '../../retrieval/pattern-library.js';
|
|
21
|
+
|
|
22
|
+
import { StubLLMAdapter } from '../llm/llm-stub.js';
|
|
23
|
+
import { createAdapter } from '../llm/llm-bridge.js';
|
|
24
|
+
import { assessClarity } from '../../retrieval/clarity.js';
|
|
25
|
+
import { isConversational } from '../../retrieval/intent-gate.js';
|
|
26
|
+
import { classifyIntent } from '../../retrieval/domain-router.js';
|
|
27
|
+
import { researchIntent, detectReferences } from '../../retrieval/web-research.js';
|
|
28
|
+
import { feedbackStore } from '../../retrieval/feedback-store.js';
|
|
29
|
+
import { pick as pickEngine, registerMonolithicEngines } from '../engines/registry.js';
|
|
30
|
+
import { analyzePrompt } from '../../retrieval/prompt-analyzer.js';
|
|
31
|
+
import { recordTurn, isRecording } from '../../retrieval/dialog-recorder.js';
|
|
32
|
+
import {
|
|
33
|
+
buildSystemPrompt,
|
|
34
|
+
buildChatMessages,
|
|
35
|
+
tryParsePartial,
|
|
36
|
+
mergeCanvasDiff,
|
|
37
|
+
buildCanvasDiffPrompt,
|
|
38
|
+
parseA2UIResponse,
|
|
39
|
+
buildRepairPrompt,
|
|
40
|
+
generateSuggestions,
|
|
41
|
+
} from '../engines/monolithic/_shared.js';
|
|
42
|
+
import { generateInstant } from '../engines/monolithic/generate-instant.js';
|
|
43
|
+
import { generatePro } from '../engines/monolithic/generate-pro.js';
|
|
44
|
+
import { generateThinking } from '../engines/monolithic/generate-thinking.js';
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Generate a UI surface from a natural language intent.
|
|
49
|
+
*
|
|
50
|
+
* @param {object} opts
|
|
51
|
+
* @param {string} opts.intent — Natural language intent
|
|
52
|
+
* @param {'instant'|'pro'|'thinking'|'stream'} [opts.mode='instant'] — Generation mode
|
|
53
|
+
* @param {string} [opts.executionId] — Existing execution ID (for multi-turn iteration)
|
|
54
|
+
* @param {object} [opts.currentCanvas] — Client-owned canvas state, enables
|
|
55
|
+
* stateless iteration. Shape is a discriminated union — today:
|
|
56
|
+
* { components: A2UIComponent[] } — flat component array
|
|
57
|
+
* { messages: A2UIMessage[] } — full message sequence
|
|
58
|
+
* Future:
|
|
59
|
+
* { html: string } — raw HTML (parsed server-side)
|
|
60
|
+
* { a2ui: A2UIMessage[], originalIntent?: string, meta?: {} }
|
|
61
|
+
* When provided, the pipeline uses this as the iteration baseline instead
|
|
62
|
+
* of looking up prior turns in the ArtifactStore. This makes generation
|
|
63
|
+
* stateless — the client can iterate across server restarts, multiple tabs,
|
|
64
|
+
* or from a saved-canvas JSON file. Backward compatible: if omitted, the
|
|
65
|
+
* store lookup via `executionId` still works.
|
|
66
|
+
* @param {object} [opts.llmAdapter] — LLM adapter (defaults to auto-detected)
|
|
67
|
+
* @returns {Promise<{ executionId: string, messages: object[], validation: object, suggestions: string[] }>}
|
|
68
|
+
*/
|
|
69
|
+
export async function generateUI({ intent, engine: engineName = 'monolithic', mode = 'instant', executionId, currentCanvas, llmAdapter, model, sessionId }) {
|
|
70
|
+
// Iteration signal: either an executionId (server-stateful path) OR a
|
|
71
|
+
// currentCanvas (client-stateless path). Both mean "refine existing UI".
|
|
72
|
+
const hasClientCanvas = !!(currentCanvas && (currentCanvas.components?.length || currentCanvas.messages?.length || currentCanvas.html));
|
|
73
|
+
const isIteration = !!executionId || hasClientCanvas;
|
|
74
|
+
|
|
75
|
+
// ── Intent gate: reject non-UI prompts before entering the pipeline ──
|
|
76
|
+
// This prevents the LLM from hallucinating UIs from vague/abstract text.
|
|
77
|
+
// EXCEPTION: skip the gate on iteration turns. Modifiers like "make it 3D",
|
|
78
|
+
// "remove the avatar", "try a rose theme" rarely contain UI nouns the gate
|
|
79
|
+
// recognizes — they're refinements of the prior canvas, not fresh intents.
|
|
80
|
+
if (!isIteration) {
|
|
81
|
+
const classification = classifyIntent(intent);
|
|
82
|
+
const intentCheck = isConversational(intent, classification);
|
|
83
|
+
if (intentCheck.conversational) {
|
|
84
|
+
const clarity = assessClarity(intent, classification);
|
|
85
|
+
return {
|
|
86
|
+
executionId: executionId || 'rejected',
|
|
87
|
+
messages: [],
|
|
88
|
+
validation: { score: 0, valid: false, errors: [`Intent rejected: ${intentCheck.reason}`] },
|
|
89
|
+
suggestions: clarity.questions.map(q => q.text),
|
|
90
|
+
rejected: true,
|
|
91
|
+
reason: intentCheck.reason,
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// For multi-turn: keep the old executionId for artifact history,
|
|
97
|
+
// but always start a fresh pipeline execution for stage tracking.
|
|
98
|
+
const previousExecId = executionId;
|
|
99
|
+
executionId = engine.start({ intent, mode, previousExecId });
|
|
100
|
+
|
|
101
|
+
// ── Engine dispatch (Phase 5) ──
|
|
102
|
+
// Zettel engine: single mode (instant), fragment-graph composition.
|
|
103
|
+
// Monolithic engine: three modes (instant | pro | thinking | stream→thinking).
|
|
104
|
+
// Analyzer is BASELINE for every reasoning level — every mode gets an LLM
|
|
105
|
+
// adapter (instant included). The analyzer call adds ~1-2s to the floor but
|
|
106
|
+
// the structured signals it produces (steelman, concepts, impliedComponents)
|
|
107
|
+
// dramatically improve downstream pattern retrieval and LLM generation
|
|
108
|
+
// quality. Adapter is always created upfront so all modes can analyze.
|
|
109
|
+
const effectiveMode = mode === 'stream' ? 'thinking' : mode;
|
|
110
|
+
const adapter = llmAdapter || await createAdapter({ model });
|
|
111
|
+
|
|
112
|
+
// ── Stage 0: Prompt Analysis (baseline ingestion) ──
|
|
113
|
+
// Extract structured signals from the raw prompt so every downstream stage
|
|
114
|
+
// — pattern search, concept-mapping retrieval, system prompt construction
|
|
115
|
+
// — reasons against an enriched brief instead of the raw text.
|
|
116
|
+
// Skipped on iteration turns where the user's modifier ("make it 3D") is
|
|
117
|
+
// most useful as-is for the LLM iteration prompt.
|
|
118
|
+
let analysis = null;
|
|
119
|
+
if (!isIteration) {
|
|
120
|
+
analysis = await analyzePrompt({ intent, llmAdapter: adapter });
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// ── Normalize currentCanvas into a uniform priorComponents array ──
|
|
124
|
+
// Engines consume a single array; the shape union is collapsed here so
|
|
125
|
+
// engine code doesn't branch on input form. HTML → components conversion
|
|
126
|
+
// is a future tier-2 feature (see docstring) — for now HTML input falls
|
|
127
|
+
// through unchanged and the engine will treat it as "no prior canvas".
|
|
128
|
+
let priorComponentsFromPayload = null;
|
|
129
|
+
if (hasClientCanvas) {
|
|
130
|
+
if (Array.isArray(currentCanvas.components)) {
|
|
131
|
+
priorComponentsFromPayload = currentCanvas.components;
|
|
132
|
+
} else if (Array.isArray(currentCanvas.messages)) {
|
|
133
|
+
priorComponentsFromPayload = currentCanvas.messages.flatMap(m => m?.components || []);
|
|
134
|
+
}
|
|
135
|
+
// TODO(tier-2): if (currentCanvas.html) priorComponentsFromPayload = htmlToComponents(currentCanvas.html);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
const fn = pickEngine({ engine: engineName, mode: effectiveMode });
|
|
139
|
+
const t0 = Date.now();
|
|
140
|
+
const result = await fn({
|
|
141
|
+
intent,
|
|
142
|
+
executionId,
|
|
143
|
+
storeId: previousExecId || executionId,
|
|
144
|
+
llmAdapter: adapter,
|
|
145
|
+
sessionId,
|
|
146
|
+
analysis,
|
|
147
|
+
priorComponentsFromPayload,
|
|
148
|
+
});
|
|
149
|
+
const totalMs = Date.now() - t0;
|
|
150
|
+
|
|
151
|
+
// ── Dialog recording (fire-and-forget, gated by ADIA_LOG_DIALOGS) ──
|
|
152
|
+
// Captures the entire turn — intent, analysis, system prompt, raw LLM
|
|
153
|
+
// response, parsed messages, validation, drift, telemetry — to disk for
|
|
154
|
+
// debugging, regression analysis, and training-data bootstrapping. The
|
|
155
|
+
// recorder no-ops when the env var is unset; the engine's _debug payload
|
|
156
|
+
// is also only populated when recording is on (see generate-pro.js).
|
|
157
|
+
if (isRecording()) {
|
|
158
|
+
const debug = result._debug || {};
|
|
159
|
+
// Anything the engine put on _debug beyond the canonical fields lands
|
|
160
|
+
// in engineDebug — useful for engine-specific reasoning (zettel strategy
|
|
161
|
+
// / composition / fragmentsUsed; instant patternName / strategy).
|
|
162
|
+
const { systemPrompt, rawLLMResponse, tokens, patterns, ...engineDebug } = debug;
|
|
163
|
+
recordTurn({
|
|
164
|
+
sessionId: previousExecId || result.executionId || null,
|
|
165
|
+
intent,
|
|
166
|
+
mode: effectiveMode,
|
|
167
|
+
engine: engineName,
|
|
168
|
+
model: model || null,
|
|
169
|
+
analysis,
|
|
170
|
+
currentCanvas: hasClientCanvas ? currentCanvas : null,
|
|
171
|
+
patterns: patterns || [],
|
|
172
|
+
systemPrompt: systemPrompt || null,
|
|
173
|
+
rawLLMResponse: rawLLMResponse || null,
|
|
174
|
+
messages: result.messages || [],
|
|
175
|
+
validation: result.validation || null,
|
|
176
|
+
drift: result.drift || null,
|
|
177
|
+
suggestions: result.suggestions || [],
|
|
178
|
+
timing: { totalMs },
|
|
179
|
+
tokens: tokens || null,
|
|
180
|
+
engineDebug: Object.keys(engineDebug).length ? engineDebug : null,
|
|
181
|
+
isIteration,
|
|
182
|
+
}).catch(() => { /* logger swallows internally; this is just last-resort */ });
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// Strip the _debug payload before returning — it's an internal collaboration
|
|
186
|
+
// channel between engines and the recorder, not part of the public API.
|
|
187
|
+
// Without this strip the proxy would echo a 12KB+ system prompt to clients.
|
|
188
|
+
if (result._debug) delete result._debug;
|
|
189
|
+
return result;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* Streaming UI generation — yields progressive events as the LLM generates.
|
|
194
|
+
*
|
|
195
|
+
* Events:
|
|
196
|
+
* { type: 'status', stage, message } — pipeline stage transition
|
|
197
|
+
* { type: 'clarify', questions, score, dimensions } — intent needs clarification (consumer should pause)
|
|
198
|
+
* { type: 'text_delta', text, snapshot } — raw LLM text chunk
|
|
199
|
+
* { type: 'partial', messages } — parseable partial A2UI (render immediately)
|
|
200
|
+
* { type: 'complete', messages, validation, suggestions, executionId } — final result
|
|
201
|
+
* { type: 'error', error } — generation error
|
|
202
|
+
*
|
|
203
|
+
* @param {object} opts
|
|
204
|
+
* @param {string} opts.intent
|
|
205
|
+
* @param {string} [opts.executionId]
|
|
206
|
+
* @param {object} [opts.llmAdapter]
|
|
207
|
+
* @param {(query: string) => Promise<{ results: { title: string, snippet: string, url: string }[] }>} [opts.search] — Web search function
|
|
208
|
+
* @yields {object}
|
|
209
|
+
*/
|
|
210
|
+
export async function* generateUIStream({ intent, executionId, llmAdapter, model, search, currentCanvas }) {
|
|
211
|
+
// currentCanvas accepted for API parity with generateUI. The streaming
|
|
212
|
+
// path's downstream consumers don't yet read it, but parity prevents the
|
|
213
|
+
// client from having to branch its call shape per mode. Tier-2 will
|
|
214
|
+
// wire it through the same way generatePro does.
|
|
215
|
+
void currentCanvas;
|
|
216
|
+
// ── Intent gate: reject non-UI prompts before entering the pipeline ──
|
|
217
|
+
// Same iteration bypass as generateUI() — modifiers don't need to clear the
|
|
218
|
+
// conversational gate when there's a prior turn to refine.
|
|
219
|
+
const isIteration = !!executionId;
|
|
220
|
+
if (!isIteration) {
|
|
221
|
+
const classification = classifyIntent(intent);
|
|
222
|
+
const intentCheck = isConversational(intent, classification);
|
|
223
|
+
if (intentCheck.conversational) {
|
|
224
|
+
const clarity = assessClarity(intent, classification);
|
|
225
|
+
yield {
|
|
226
|
+
type: 'clarify',
|
|
227
|
+
questions: clarity.questions.length > 0
|
|
228
|
+
? clarity.questions
|
|
229
|
+
: [{ text: 'What would you like me to build?', dimension: 'domain', priority: 1 }],
|
|
230
|
+
score: clarity.score,
|
|
231
|
+
dimensions: clarity.dimensions,
|
|
232
|
+
summary: `Not a UI request (${intentCheck.reason}). ${clarity.summary}`,
|
|
233
|
+
rejected: true,
|
|
234
|
+
reason: intentCheck.reason,
|
|
235
|
+
};
|
|
236
|
+
return; // Do NOT continue to generation pipeline
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
const adapter = llmAdapter || await createAdapter({ model });
|
|
241
|
+
// For multi-turn: keep the old executionId for artifact history lookup,
|
|
242
|
+
// but always start a fresh pipeline execution for stage tracking.
|
|
243
|
+
const previousExecId = executionId;
|
|
244
|
+
executionId = engine.start({ intent, mode: 'stream', previousExecId });
|
|
245
|
+
|
|
246
|
+
// ── Stage 1: Interpret ──
|
|
247
|
+
const domain = lookupDomain(intent);
|
|
248
|
+
engine.submitStage(executionId, 'interpret', { domain, intent, confidence: domain.confidence });
|
|
249
|
+
yield { type: 'status', stage: 'interpret', message: `Domain: ${domain.domain}` };
|
|
250
|
+
|
|
251
|
+
// ── Clarity gate: yield clarify event if intent is vague ──
|
|
252
|
+
// Only for fresh intents (not multi-turn iterations which have prior context)
|
|
253
|
+
if (!previousExecId) {
|
|
254
|
+
const clarity = assessClarity(intent, domain);
|
|
255
|
+
if (!clarity.clear && clarity.questions.length > 0) {
|
|
256
|
+
yield {
|
|
257
|
+
type: 'clarify',
|
|
258
|
+
questions: clarity.questions,
|
|
259
|
+
score: clarity.score,
|
|
260
|
+
dimensions: clarity.dimensions,
|
|
261
|
+
summary: clarity.summary,
|
|
262
|
+
};
|
|
263
|
+
// The consumer decides whether to pause or continue.
|
|
264
|
+
// If they continue iterating, generation proceeds with what we have.
|
|
265
|
+
// If they stop iterating and re-submit with more detail, a new stream starts.
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
// ── Stage 2: Analyze ──
|
|
270
|
+
const context = await getContext(intent, 2);
|
|
271
|
+
engine.submitStage(executionId, 'analyze', {
|
|
272
|
+
context, componentCount: context.components.length, patternCount: context.patterns.length,
|
|
273
|
+
confidence: context.components.length > 0 ? 0.85 : 0.5,
|
|
274
|
+
});
|
|
275
|
+
yield { type: 'status', stage: 'analyze', message: `${context.components.length} components, ${context.patterns.length} patterns` };
|
|
276
|
+
|
|
277
|
+
// ── Research: web search enrichment (optional) ──
|
|
278
|
+
let researchContext = '';
|
|
279
|
+
const { references } = detectReferences(intent);
|
|
280
|
+
if (references.length > 0 || search) {
|
|
281
|
+
yield { type: 'status', stage: 'research', message: references.length > 0 ? `Researching: ${references.join(', ')}` : 'Searching for patterns...' };
|
|
282
|
+
try {
|
|
283
|
+
const research = await researchIntent(intent, { search, llmAdapter: adapter });
|
|
284
|
+
researchContext = research.context;
|
|
285
|
+
if (research.references.length > 0) {
|
|
286
|
+
yield { type: 'research', references: research.references, insights: research.insights };
|
|
287
|
+
}
|
|
288
|
+
} catch {
|
|
289
|
+
// Research failed — continue without it
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
// ── Stage 3: Plan ──
|
|
294
|
+
// Use semantic search in LLM modes — feeds pattern index to LLM for conceptual matching
|
|
295
|
+
const { matches: patterns, remix: remixSuggestion } = await searchBlocksSemantic(intent, { llmAdapter: adapter, remix: true });
|
|
296
|
+
engine.submitStage(executionId, 'plan', {
|
|
297
|
+
patterns: patterns.map(p => p.name),
|
|
298
|
+
strategy: patterns.length > 0 ? 'adapt-pattern' : 'generate-fresh',
|
|
299
|
+
confidence: patterns.length > 0 ? 0.9 : 0.7,
|
|
300
|
+
hasRemix: !!remixSuggestion,
|
|
301
|
+
});
|
|
302
|
+
yield { type: 'status', stage: 'plan', message: patterns.length > 0 ? `Adapting: ${patterns[0].name}${remixSuggestion ? ' (with remix)' : ''}` : 'Generating fresh' };
|
|
303
|
+
|
|
304
|
+
// ── Decomposition: split complex intents into subtasks ──
|
|
305
|
+
// Skip decomposition when a pattern match exists — the pattern already handles composition
|
|
306
|
+
const decomposition = patterns.length === 0 ? decomposeIntent(intent) : { shouldDecompose: false, subtasks: [], layout: null, original: intent };
|
|
307
|
+
if (decomposition.shouldDecompose && decomposition.subtasks.length >= 3) {
|
|
308
|
+
yield { type: 'status', stage: 'decompose', message: `Splitting into ${decomposition.subtasks.length} subtasks: ${decomposition.subtasks.map(s => s.label).join(', ')}` };
|
|
309
|
+
yield { type: 'decompose', subtasks: decomposition.subtasks, layout: decomposition.layout };
|
|
310
|
+
|
|
311
|
+
// Generate each subtask independently
|
|
312
|
+
const subtaskResults = [];
|
|
313
|
+
for (const subtask of decomposition.subtasks) {
|
|
314
|
+
yield { type: 'status', stage: 'generate', message: `Generating: ${subtask.label}...` };
|
|
315
|
+
try {
|
|
316
|
+
const subResult = await adapter.complete({
|
|
317
|
+
messages: [{ role: 'user', content: subtask.intent }],
|
|
318
|
+
systemPrompt: await buildSystemPrompt(context, patterns, researchContext, intent),
|
|
319
|
+
});
|
|
320
|
+
const subMessages = parseA2UIResponse(subResult.content, { executionId, mode: 'stream', intent: subtask.intent });
|
|
321
|
+
subtaskResults.push({ label: subtask.label, messages: subMessages });
|
|
322
|
+
} catch {
|
|
323
|
+
// Failed subtask — skip it
|
|
324
|
+
subtaskResults.push({ label: subtask.label, messages: [{ type: 'updateComponents', surfaceId: 'default', components: [{ id: 'root', component: 'Text', variant: 'body', textContent: `${subtask.label} (generation failed)` }] }] });
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Compose into layout
|
|
329
|
+
const composed = composeSubtasks(decomposition.layout, subtaskResults);
|
|
330
|
+
|
|
331
|
+
engine.submitStage(executionId, 'generate', {
|
|
332
|
+
messages: composed, source: 'decomposed', decomposed: true,
|
|
333
|
+
subtasks: decomposition.subtasks.length, confidence: 0.8,
|
|
334
|
+
});
|
|
335
|
+
|
|
336
|
+
const composedValidation = validateSchema(composed, { intent });
|
|
337
|
+
|
|
338
|
+
engine.submitStage(executionId, 'validate', {
|
|
339
|
+
...composedValidation, confidence: composedValidation.score / 100,
|
|
340
|
+
});
|
|
341
|
+
|
|
342
|
+
// Alignment check
|
|
343
|
+
const alignment = checkIntentAlignment(intent, composed[0]?.components || []);
|
|
344
|
+
|
|
345
|
+
const storeId = previousExecId || executionId;
|
|
346
|
+
store.record(storeId, { messages: composed, summary: intent, validation: composedValidation, intent });
|
|
347
|
+
engine.submitStage(executionId, 'render', { stored: true, decomposed: true, subtasks: decomposition.subtasks.length });
|
|
348
|
+
|
|
349
|
+
// Fire-and-forget feedback logging (decomposed stream)
|
|
350
|
+
feedbackStore.logExecution({
|
|
351
|
+
executionId: storeId, intent, mode: 'stream',
|
|
352
|
+
domain: domain.domain, patternMatch: null,
|
|
353
|
+
score: composedValidation?.score, componentCount: composed[0]?.components?.length || 0,
|
|
354
|
+
}).catch(() => {});
|
|
355
|
+
|
|
356
|
+
yield {
|
|
357
|
+
type: 'complete',
|
|
358
|
+
executionId: storeId,
|
|
359
|
+
messages: composed,
|
|
360
|
+
validation: composedValidation,
|
|
361
|
+
alignment,
|
|
362
|
+
suggestions: generateSuggestions(intent, domain, composed),
|
|
363
|
+
pipeline: engine.getState(executionId),
|
|
364
|
+
};
|
|
365
|
+
return;
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
// ── Stage 4: Generate via LLM stream ──
|
|
369
|
+
const systemPrompt = await buildSystemPrompt(context, patterns, researchContext, intent);
|
|
370
|
+
const chatMessages = buildChatMessages(intent, previousExecId || executionId);
|
|
371
|
+
|
|
372
|
+
yield { type: 'status', stage: 'generate', message: 'Streaming from LLM...' };
|
|
373
|
+
|
|
374
|
+
let fullText = '';
|
|
375
|
+
let lastPartialRender = '';
|
|
376
|
+
|
|
377
|
+
try {
|
|
378
|
+
if (typeof adapter.stream === 'function') {
|
|
379
|
+
for await (const chunk of adapter.stream({ messages: chatMessages, systemPrompt })) {
|
|
380
|
+
if (chunk.type === 'text') {
|
|
381
|
+
fullText += chunk.content;
|
|
382
|
+
yield { type: 'text_delta', text: chunk.content, snapshot: fullText };
|
|
383
|
+
|
|
384
|
+
// Try to parse partial JSON and render incrementally
|
|
385
|
+
const partial = tryParsePartial(fullText);
|
|
386
|
+
if (partial && JSON.stringify(partial) !== lastPartialRender) {
|
|
387
|
+
lastPartialRender = JSON.stringify(partial);
|
|
388
|
+
yield { type: 'partial', messages: partial };
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
} else {
|
|
393
|
+
// Fallback: non-streaming adapter
|
|
394
|
+
const response = await adapter.complete({ messages: chatMessages, systemPrompt });
|
|
395
|
+
fullText = response.content;
|
|
396
|
+
yield { type: 'text_delta', text: fullText, snapshot: fullText };
|
|
397
|
+
}
|
|
398
|
+
} catch (err) {
|
|
399
|
+
yield { type: 'error', error: err };
|
|
400
|
+
return;
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
// ── Stage 5: Parse final + validate ──
|
|
404
|
+
let messages = parseA2UIResponse(fullText, { executionId, mode: 'stream', intent });
|
|
405
|
+
engine.submitStage(executionId, 'generate', {
|
|
406
|
+
messages, source: 'llm-stream', confidence: 0.8,
|
|
407
|
+
});
|
|
408
|
+
|
|
409
|
+
let validation = validateSchema(messages, { intent });
|
|
410
|
+
|
|
411
|
+
// One repair attempt if invalid (non-streaming, quick fix)
|
|
412
|
+
if (!validation.valid) {
|
|
413
|
+
yield { type: 'status', stage: 'validate', message: `Score ${validation.score}/100 — repairing...` };
|
|
414
|
+
try {
|
|
415
|
+
const failedChecks = validation.checks.filter(c => !c.passed);
|
|
416
|
+
const repairResponse = await adapter.complete({
|
|
417
|
+
messages: [{ role: 'user', content: buildRepairPrompt(failedChecks, messages) }],
|
|
418
|
+
systemPrompt,
|
|
419
|
+
});
|
|
420
|
+
messages = parseA2UIResponse(repairResponse.content, { executionId, mode: 'stream', intent });
|
|
421
|
+
validation = validateSchema(messages, { intent });
|
|
422
|
+
} catch { /* keep original */ }
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
engine.submitStage(executionId, 'validate', {
|
|
426
|
+
...validation, confidence: validation.score / 100,
|
|
427
|
+
});
|
|
428
|
+
|
|
429
|
+
// ── Stage 6: Store artifact ──
|
|
430
|
+
// Store under the previous execution ID if iterating (keeps multi-turn history together)
|
|
431
|
+
const storeId = previousExecId || executionId;
|
|
432
|
+
store.record(storeId, { messages, summary: intent, validation, intent });
|
|
433
|
+
engine.submitStage(executionId, 'render', { stored: true, success: validation.valid, confidence: validation.valid ? 1 : 0.5 });
|
|
434
|
+
|
|
435
|
+
// ── Intent alignment check ──
|
|
436
|
+
const alignment = checkIntentAlignment(intent, messages[0]?.components || []);
|
|
437
|
+
|
|
438
|
+
const suggestions = generateSuggestions(intent, domain, messages);
|
|
439
|
+
|
|
440
|
+
// Fire-and-forget feedback logging (stream mode)
|
|
441
|
+
feedbackStore.logExecution({
|
|
442
|
+
executionId: storeId, intent, mode: 'stream',
|
|
443
|
+
domain: domain.domain, patternMatch: patterns[0]?.name,
|
|
444
|
+
score: validation?.score, componentCount: messages[0]?.components?.length || 0,
|
|
445
|
+
}).catch(() => {});
|
|
446
|
+
|
|
447
|
+
// ── Drift detection ──
|
|
448
|
+
const driftMetrics = store.getDriftMetrics(storeId);
|
|
449
|
+
|
|
450
|
+
yield {
|
|
451
|
+
type: 'complete',
|
|
452
|
+
executionId: storeId,
|
|
453
|
+
messages,
|
|
454
|
+
validation,
|
|
455
|
+
alignment,
|
|
456
|
+
drift: driftMetrics,
|
|
457
|
+
suggestions,
|
|
458
|
+
pipeline: engine.getState(executionId),
|
|
459
|
+
};
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
// ── Pro mode (pattern-aware LLM adaptation) ─────────────────────────
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
// ═══════════════════════════════════════════════════════════════════════
|
|
466
|
+
// SYSTEM PROMPT (Feature 2: improved with card-ui rules)
|
|
467
|
+
// ═══════════════════════════════════════════════════════════════════════
|
|
468
|
+
|
|
469
|
+
/**
|
|
470
|
+
* Build a concise system prompt for the LLM with component catalog,
|
|
471
|
+
* card-ui content model, layout rules, and example patterns.
|
|
472
|
+
*
|
|
473
|
+
* @param {object} context — Assembled context from A003
|
|
474
|
+
* @param {object[]} patterns — Matched patterns from pattern library
|
|
475
|
+
* @returns {string}
|
|
476
|
+
*/
|
|
477
|
+
|
|
478
|
+
/**
|
|
479
|
+
* Get the shared artifact store (for external inspection).
|
|
480
|
+
* @returns {ArtifactStore}
|
|
481
|
+
*/
|
|
482
|
+
export function getArtifactStore() {
|
|
483
|
+
return store;
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
/**
|
|
487
|
+
* Get the shared pipeline engine (for external inspection).
|
|
488
|
+
* @returns {PipelineEngine}
|
|
489
|
+
*/
|
|
490
|
+
export function getPipelineEngine() {
|
|
491
|
+
return engine;
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// ── Register the three monolithic engines with the dispatcher ──
|
|
495
|
+
// Done at module load so the registry is ready before the first generateUI() call.
|
|
496
|
+
registerMonolithicEngines({
|
|
497
|
+
instant: generateInstant,
|
|
498
|
+
pro: generatePro,
|
|
499
|
+
thinking: generateThinking,
|
|
500
|
+
});
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Pattern Export — Save generated UI as reusable named patterns.
|
|
3
|
+
*
|
|
4
|
+
* Produces two downloadable files:
|
|
5
|
+
* {name}.json — A2UI flat adjacency pattern (importable into pattern-library)
|
|
6
|
+
* {name}.html — Rendered HTML snapshot from the canvas
|
|
7
|
+
*
|
|
8
|
+
* Also supports importing patterns back into the runtime library.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { registerPattern } from '../../retrieval/pattern-library.js';
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Build a pattern-library-compatible JSON object from generation results.
|
|
15
|
+
*
|
|
16
|
+
* @param {string} name — Pattern name (kebab-case, e.g. "my-pricing-page")
|
|
17
|
+
* @param {object} opts
|
|
18
|
+
* @param {object[]} opts.components — Flat adjacency array from messages[0].components
|
|
19
|
+
* @param {string} opts.intent — Original user intent
|
|
20
|
+
* @param {string} [opts.domain] — Domain classification (forms, data, layout, agent, navigation)
|
|
21
|
+
* @param {string} [opts.description] — Human description (falls back to intent)
|
|
22
|
+
* @returns {object} — Pattern object matching pattern-library format
|
|
23
|
+
*/
|
|
24
|
+
export function buildPatternJSON(name, { components, intent, domain, description }) {
|
|
25
|
+
const slug = name.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-|-$/g, '');
|
|
26
|
+
|
|
27
|
+
// Extract unique component type names
|
|
28
|
+
const componentTypes = [...new Set(
|
|
29
|
+
components.map(c => c.component).filter(Boolean)
|
|
30
|
+
)];
|
|
31
|
+
|
|
32
|
+
// Clean components: strip internal fields
|
|
33
|
+
const template = components.map(c => {
|
|
34
|
+
const { _surfaceId, ...rest } = c;
|
|
35
|
+
return rest;
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
return {
|
|
39
|
+
name: slug,
|
|
40
|
+
description: description || intent || slug,
|
|
41
|
+
domain: domain || 'layout',
|
|
42
|
+
components: componentTypes,
|
|
43
|
+
template,
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Save a generation as a named pattern. Downloads .json + .html files.
|
|
49
|
+
*
|
|
50
|
+
* @param {string} name — Pattern name
|
|
51
|
+
* @param {object} opts
|
|
52
|
+
* @param {object[]} opts.messages — A2UI messages array
|
|
53
|
+
* @param {object} opts.validation — Validation result (must have score >= 95)
|
|
54
|
+
* @param {string} opts.intent — Original user intent
|
|
55
|
+
* @param {string} [opts.domain] — Domain classification
|
|
56
|
+
* @param {string} [opts.canvasHTML] — Rendered HTML from canvas
|
|
57
|
+
* @param {number} [opts.minScore=95] — Minimum score to allow save
|
|
58
|
+
* @returns {{ json: object, html: string }} — The saved payloads
|
|
59
|
+
*/
|
|
60
|
+
export function savePattern(name, { messages, validation, intent, domain, canvasHTML, minScore = 95 }) {
|
|
61
|
+
if (!validation || validation.score < minScore) {
|
|
62
|
+
throw new Error(`Score ${validation?.score ?? 0} is below minimum ${minScore} for pattern save`);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
const components = messages?.[0]?.components;
|
|
66
|
+
if (!components || !components.length) {
|
|
67
|
+
throw new Error('No components to save');
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const json = buildPatternJSON(name, { components, intent, domain });
|
|
71
|
+
|
|
72
|
+
// Download .json
|
|
73
|
+
downloadFile(`${json.name}.json`, JSON.stringify(json, null, 2), 'application/json');
|
|
74
|
+
|
|
75
|
+
// Download .html (if available)
|
|
76
|
+
const html = canvasHTML || '';
|
|
77
|
+
if (html) {
|
|
78
|
+
const htmlDoc = `<!DOCTYPE html>
|
|
79
|
+
<html lang="en">
|
|
80
|
+
<head>
|
|
81
|
+
<meta charset="UTF-8">
|
|
82
|
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
83
|
+
<title>${json.name} — A2UI Pattern</title>
|
|
84
|
+
<style>body { font-family: system-ui, sans-serif; padding: 2rem; }</style>
|
|
85
|
+
</head>
|
|
86
|
+
<body>
|
|
87
|
+
${html}
|
|
88
|
+
</body>
|
|
89
|
+
</html>`;
|
|
90
|
+
downloadFile(`${json.name}.html`, htmlDoc, 'text/html');
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// Also register in the runtime library
|
|
94
|
+
registerPattern(json);
|
|
95
|
+
|
|
96
|
+
return { json, html };
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
* Import a pattern JSON object into the runtime pattern library.
|
|
101
|
+
*
|
|
102
|
+
* @param {object|string} patternJSON — Pattern object or JSON string
|
|
103
|
+
* @returns {{ success: boolean, name?: string, error?: string }}
|
|
104
|
+
*/
|
|
105
|
+
export function importPattern(patternJSON) {
|
|
106
|
+
let pattern = patternJSON;
|
|
107
|
+
if (typeof pattern === 'string') {
|
|
108
|
+
try { pattern = JSON.parse(pattern); }
|
|
109
|
+
catch { return { success: false, error: 'Invalid JSON' }; }
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
if (!pattern?.name) return { success: false, error: 'Missing pattern name' };
|
|
113
|
+
if (!pattern?.template || !Array.isArray(pattern.template)) {
|
|
114
|
+
return { success: false, error: 'Missing or invalid template array' };
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
const registered = registerPattern(pattern);
|
|
118
|
+
if (!registered) {
|
|
119
|
+
return { success: false, error: `Pattern "${pattern.name}" already exists` };
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
return { success: true, name: pattern.name };
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Trigger a file download in the browser.
|
|
127
|
+
*
|
|
128
|
+
* @param {string} filename
|
|
129
|
+
* @param {string} content
|
|
130
|
+
* @param {string} [mimeType='application/json']
|
|
131
|
+
*/
|
|
132
|
+
export function downloadFile(filename, content, mimeType = 'application/json') {
|
|
133
|
+
if (typeof document === 'undefined') {
|
|
134
|
+
throw new Error('downloadFile requires a browser environment');
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
const blob = new Blob([content], { type: mimeType });
|
|
138
|
+
const url = URL.createObjectURL(blob);
|
|
139
|
+
const a = document.createElement('a');
|
|
140
|
+
a.href = url;
|
|
141
|
+
a.download = filename;
|
|
142
|
+
a.style.display = 'none';
|
|
143
|
+
// Prevent SPA router from intercepting the blob URL click
|
|
144
|
+
a.addEventListener('click', (e) => e.stopPropagation());
|
|
145
|
+
document.body.appendChild(a);
|
|
146
|
+
a.click();
|
|
147
|
+
document.body.removeChild(a);
|
|
148
|
+
URL.revokeObjectURL(url);
|
|
149
|
+
}
|