@adia-ai/a2ui-compose 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +86 -0
- package/README.md +181 -0
- package/engine/artifacts.js +262 -0
- package/engine/constitution.md +78 -0
- package/engine/context-store.js +218 -0
- package/engine/generator.js +500 -0
- package/engine/pattern-export.js +149 -0
- package/engine/pipeline/engine.js +289 -0
- package/engine/pipeline/types.js +91 -0
- package/engine/reference.js +115 -0
- package/engine/state.js +15 -0
- package/engines/monolithic/_shared.js +1320 -0
- package/engines/monolithic/generate-instant.js +229 -0
- package/engines/monolithic/generate-pro.js +367 -0
- package/engines/monolithic/generate-thinking.js +211 -0
- package/engines/registry.js +195 -0
- package/engines/zettel/_smoke.js +37 -0
- package/engines/zettel/composer.js +146 -0
- package/engines/zettel/fragment-library.js +209 -0
- package/engines/zettel/generate.js +15 -0
- package/engines/zettel/generator-adapter.js +202 -0
- package/engines/zettel/session-store.js +121 -0
- package/engines/zettel/synthesizer.js +343 -0
- package/evals/harness.mjs +193 -0
- package/index.js +16 -0
- package/llm/adapters/anthropic.js +106 -0
- package/llm/adapters/gemini.js +99 -0
- package/llm/adapters/index.js +138 -0
- package/llm/adapters/openai.js +85 -0
- package/llm/adapters/sse.js +50 -0
- package/llm/llm-bridge.js +214 -0
- package/llm/llm-stub.js +69 -0
- package/package.json +41 -0
- package/transpiler/transpiler-maps.js +277 -0
- package/transpiler/transpiler.js +820 -0
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Google Gemini generateContent API adapter.
|
|
3
|
+
* Endpoint: https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent
|
|
4
|
+
* Streaming: .../{model}:streamGenerateContent?alt=sse
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { readSSE } from './sse.js';
|
|
8
|
+
|
|
9
|
+
const API_URL = 'https://generativelanguage.googleapis.com/v1beta/models';
|
|
10
|
+
const DEFAULT_MAX_TOKENS = 4096;
|
|
11
|
+
|
|
12
|
+
export const gemini = {
|
|
13
|
+
name: 'gemini',
|
|
14
|
+
|
|
15
|
+
buildRequest(opts) {
|
|
16
|
+
const model = opts.model;
|
|
17
|
+
const contents = [];
|
|
18
|
+
for (const msg of opts.messages) {
|
|
19
|
+
contents.push({
|
|
20
|
+
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
21
|
+
parts: [{ text: msg.content }],
|
|
22
|
+
});
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
const body = { contents };
|
|
26
|
+
|
|
27
|
+
if (opts.system) {
|
|
28
|
+
body.systemInstruction = { parts: [{ text: opts.system }] };
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
const generationConfig = {
|
|
32
|
+
maxOutputTokens: opts.maxTokens || DEFAULT_MAX_TOKENS,
|
|
33
|
+
};
|
|
34
|
+
if (opts.temperature != null) generationConfig.temperature = opts.temperature;
|
|
35
|
+
body.generationConfig = generationConfig;
|
|
36
|
+
|
|
37
|
+
const action = opts.stream
|
|
38
|
+
? `streamGenerateContent?alt=sse`
|
|
39
|
+
: 'generateContent';
|
|
40
|
+
|
|
41
|
+
return {
|
|
42
|
+
url: opts.proxyUrl || `${API_URL}/${model}:${action}`,
|
|
43
|
+
headers: {
|
|
44
|
+
'content-type': 'application/json',
|
|
45
|
+
'x-goog-api-key': opts.apiKey,
|
|
46
|
+
},
|
|
47
|
+
body,
|
|
48
|
+
};
|
|
49
|
+
},
|
|
50
|
+
|
|
51
|
+
parseResponse(data) {
|
|
52
|
+
const parts = data.candidates?.[0]?.content?.parts ?? [];
|
|
53
|
+
const text = parts.map(p => p.text ?? '').join('');
|
|
54
|
+
return {
|
|
55
|
+
text,
|
|
56
|
+
usage: {
|
|
57
|
+
input: data.usageMetadata?.promptTokenCount ?? 0,
|
|
58
|
+
output: data.usageMetadata?.candidatesTokenCount ?? 0,
|
|
59
|
+
},
|
|
60
|
+
stopReason: data.candidates?.[0]?.finishReason === 'STOP' ? 'end' : 'end',
|
|
61
|
+
};
|
|
62
|
+
},
|
|
63
|
+
|
|
64
|
+
async *parseStream(response) {
|
|
65
|
+
let snapshot = '';
|
|
66
|
+
let usage = { input: 0, output: 0 };
|
|
67
|
+
let stopReason = 'end';
|
|
68
|
+
|
|
69
|
+
for await (const event of readSSE(response.body)) {
|
|
70
|
+
if (event.done) break;
|
|
71
|
+
let data;
|
|
72
|
+
try { data = JSON.parse(event.data); } catch { continue; }
|
|
73
|
+
|
|
74
|
+
if (data.usageMetadata) {
|
|
75
|
+
usage.input = data.usageMetadata.promptTokenCount ?? 0;
|
|
76
|
+
usage.output = data.usageMetadata.candidatesTokenCount ?? 0;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
const candidate = data.candidates?.[0];
|
|
80
|
+
if (!candidate) continue;
|
|
81
|
+
|
|
82
|
+
if (candidate.finishReason && candidate.finishReason !== 'STOP') {
|
|
83
|
+
stopReason = candidate.finishReason;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
const parts = candidate.content?.parts;
|
|
87
|
+
if (!parts?.length) continue;
|
|
88
|
+
|
|
89
|
+
for (const part of parts) {
|
|
90
|
+
if (part.text != null) {
|
|
91
|
+
snapshot += part.text;
|
|
92
|
+
yield { type: 'text', text: part.text, snapshot };
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
yield { type: 'done', text: snapshot, usage, stopReason };
|
|
98
|
+
},
|
|
99
|
+
};
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Client — Provider-agnostic chat interface.
|
|
3
|
+
*
|
|
4
|
+
* Usage:
|
|
5
|
+
* import { createClient, chat, streamChat } from './llm/index.js';
|
|
6
|
+
*
|
|
7
|
+
* // Quick use (provider auto-detected from model name)
|
|
8
|
+
* const reply = await chat({
|
|
9
|
+
* apiKey: 'sk-ant-...',
|
|
10
|
+
* model: 'claude-sonnet-4-20250514',
|
|
11
|
+
* messages: [{ role: 'user', content: 'Hello' }],
|
|
12
|
+
* });
|
|
13
|
+
*
|
|
14
|
+
* for await (const chunk of streamChat({
|
|
15
|
+
* apiKey: 'sk-...',
|
|
16
|
+
* model: 'gpt-4o',
|
|
17
|
+
* messages: [{ role: 'user', content: 'Hello' }],
|
|
18
|
+
* })) {
|
|
19
|
+
* if (chunk.type === 'text') process.stdout.write(chunk.text);
|
|
20
|
+
* }
|
|
21
|
+
*
|
|
22
|
+
* // Explicit provider
|
|
23
|
+
* const reply = await chat({ provider: 'gemini', apiKey: '...', model: 'gemini-2.5-flash', ... });
|
|
24
|
+
*
|
|
25
|
+
* // Reusable client instance
|
|
26
|
+
* const client = createClient({ provider: 'anthropic', apiKey: '...' });
|
|
27
|
+
* const reply = await client.chat({ model: 'claude-sonnet-4-20250514', messages: [...] });
|
|
28
|
+
* for await (const chunk of client.stream({ model: '...', messages: [...] })) { ... }
|
|
29
|
+
*
|
|
30
|
+
* Chunk types (streaming):
|
|
31
|
+
* { type: 'text', text: 'delta', snapshot: 'full text so far' }
|
|
32
|
+
* { type: 'thinking', text: 'thinking delta' }
|
|
33
|
+
* { type: 'done', text: 'full response', usage: { input, output }, stopReason }
|
|
34
|
+
* { type: 'error', error: Error }
|
|
35
|
+
*/
|
|
36
|
+
|
|
37
|
+
import { anthropic } from './anthropic.js';
|
|
38
|
+
import { openai } from './openai.js';
|
|
39
|
+
import { gemini } from './gemini.js';
|
|
40
|
+
|
|
41
|
+
// ── Provider registry ──
|
|
42
|
+
|
|
43
|
+
const providers = { anthropic, openai, gemini };
|
|
44
|
+
|
|
45
|
+
/** Detect provider from model name. */
|
|
46
|
+
function detectProvider(model) {
|
|
47
|
+
if (!model) return null;
|
|
48
|
+
const m = model.toLowerCase();
|
|
49
|
+
if (m.includes('claude') || m.startsWith('anthropic/')) return 'anthropic';
|
|
50
|
+
if (m.includes('gpt') || m.includes('o1') || m.includes('o3') || m.includes('o4') || m.startsWith('openai/')) return 'openai';
|
|
51
|
+
if (m.includes('gemini') || m.startsWith('google/')) return 'gemini';
|
|
52
|
+
return null;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
function resolveAdapter(opts) {
|
|
56
|
+
const name = opts.provider || detectProvider(opts.model);
|
|
57
|
+
if (!name) throw new Error(`Cannot detect provider for model "${opts.model}". Set provider explicitly.`);
|
|
58
|
+
const adapter = providers[name];
|
|
59
|
+
if (!adapter) throw new Error(`Unknown provider "${name}". Available: ${Object.keys(providers).join(', ')}`);
|
|
60
|
+
return adapter;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// ── Standalone functions ──
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Non-streaming chat completion.
|
|
67
|
+
* @returns {Promise<{text: string, usage: {input: number, output: number}, stopReason: string}>}
|
|
68
|
+
*/
|
|
69
|
+
export async function chat(opts) {
|
|
70
|
+
const adapter = resolveAdapter(opts);
|
|
71
|
+
const { url, headers, body } = adapter.buildRequest({ ...opts, stream: false });
|
|
72
|
+
|
|
73
|
+
const res = await fetch(url, {
|
|
74
|
+
method: 'POST',
|
|
75
|
+
headers,
|
|
76
|
+
body: JSON.stringify(body),
|
|
77
|
+
signal: opts.signal,
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
if (!res.ok) {
|
|
81
|
+
const err = await res.json().catch(() => ({}));
|
|
82
|
+
throw new Error(err?.error?.message || `${adapter.name} API error ${res.status}`);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
return adapter.parseResponse(await res.json());
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Streaming chat — yields chunks as they arrive.
|
|
90
|
+
* @returns {AsyncGenerator<{type: string, text?: string, snapshot?: string, usage?: object, error?: Error}>}
|
|
91
|
+
*/
|
|
92
|
+
export async function* streamChat(opts) {
|
|
93
|
+
const adapter = resolveAdapter(opts);
|
|
94
|
+
const { url, headers, body } = adapter.buildRequest({ ...opts, stream: true });
|
|
95
|
+
|
|
96
|
+
let res;
|
|
97
|
+
try {
|
|
98
|
+
res = await fetch(url, {
|
|
99
|
+
method: 'POST',
|
|
100
|
+
headers,
|
|
101
|
+
body: JSON.stringify(body),
|
|
102
|
+
signal: opts.signal,
|
|
103
|
+
});
|
|
104
|
+
} catch (err) {
|
|
105
|
+
yield { type: 'error', error: err };
|
|
106
|
+
return;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
if (!res.ok) {
|
|
110
|
+
const err = await res.json().catch(() => ({}));
|
|
111
|
+
yield { type: 'error', error: new Error(err?.error?.message || `${adapter.name} API error ${res.status}`) };
|
|
112
|
+
return;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
yield* adapter.parseStream(res);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// ── Client factory ──
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* Create a reusable client instance with defaults baked in.
|
|
122
|
+
*
|
|
123
|
+
* @param {object} defaults
|
|
124
|
+
* @param {string} defaults.provider — 'anthropic' | 'openai' | 'gemini'
|
|
125
|
+
* @param {string} defaults.apiKey
|
|
126
|
+
* @param {string} [defaults.model] — default model
|
|
127
|
+
* @param {string} [defaults.proxyUrl] — proxy URL (for CORS)
|
|
128
|
+
* @param {string} [defaults.system] — default system prompt
|
|
129
|
+
*/
|
|
130
|
+
export function createClient(defaults = {}) {
|
|
131
|
+
return {
|
|
132
|
+
chat: (opts) => chat({ ...defaults, ...opts }),
|
|
133
|
+
stream: (opts) => streamChat({ ...defaults, ...opts }),
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Re-export adapters for direct use
|
|
138
|
+
export { anthropic, openai, gemini };
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Chat Completions API adapter.
|
|
3
|
+
* Endpoint: https://api.openai.com/v1/chat/completions
|
|
4
|
+
* Also compatible with: Groq, Together, Mistral, any OpenAI-compatible API.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { readSSE } from './sse.js';
|
|
8
|
+
|
|
9
|
+
const API_URL = 'https://api.openai.com/v1/chat/completions';
|
|
10
|
+
const DEFAULT_MAX_TOKENS = 4096;
|
|
11
|
+
|
|
12
|
+
export const openai = {
|
|
13
|
+
name: 'openai',
|
|
14
|
+
|
|
15
|
+
buildRequest(opts) {
|
|
16
|
+
const messages = [];
|
|
17
|
+
if (opts.system) messages.push({ role: 'system', content: opts.system });
|
|
18
|
+
for (const msg of opts.messages) {
|
|
19
|
+
messages.push({ role: msg.role, content: msg.content });
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
const body = {
|
|
23
|
+
model: opts.model,
|
|
24
|
+
messages,
|
|
25
|
+
stream: !!opts.stream,
|
|
26
|
+
};
|
|
27
|
+
if (opts.maxTokens) body.max_tokens = opts.maxTokens;
|
|
28
|
+
if (opts.temperature != null) body.temperature = opts.temperature;
|
|
29
|
+
if (opts.stream) body.stream_options = { include_usage: true };
|
|
30
|
+
|
|
31
|
+
return {
|
|
32
|
+
url: opts.proxyUrl || API_URL,
|
|
33
|
+
headers: {
|
|
34
|
+
'content-type': 'application/json',
|
|
35
|
+
'authorization': `Bearer ${opts.apiKey}`,
|
|
36
|
+
},
|
|
37
|
+
body,
|
|
38
|
+
};
|
|
39
|
+
},
|
|
40
|
+
|
|
41
|
+
parseResponse(data) {
|
|
42
|
+
const choice = data.choices?.[0];
|
|
43
|
+
const text = choice?.message?.content ?? '';
|
|
44
|
+
return {
|
|
45
|
+
text,
|
|
46
|
+
usage: { input: data.usage?.prompt_tokens ?? 0, output: data.usage?.completion_tokens ?? 0 },
|
|
47
|
+
stopReason: choice?.finish_reason === 'stop' ? 'end' : (choice?.finish_reason ?? 'end'),
|
|
48
|
+
};
|
|
49
|
+
},
|
|
50
|
+
|
|
51
|
+
async *parseStream(response) {
|
|
52
|
+
let snapshot = '';
|
|
53
|
+
let usage = { input: 0, output: 0 };
|
|
54
|
+
let stopReason = 'end';
|
|
55
|
+
|
|
56
|
+
for await (const event of readSSE(response.body)) {
|
|
57
|
+
if (event.done) break;
|
|
58
|
+
let data;
|
|
59
|
+
try { data = JSON.parse(event.data); } catch { continue; }
|
|
60
|
+
|
|
61
|
+
if (data.usage) {
|
|
62
|
+
usage.input = data.usage.prompt_tokens ?? 0;
|
|
63
|
+
usage.output = data.usage.completion_tokens ?? 0;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
const choice = data.choices?.[0];
|
|
67
|
+
if (!choice) continue;
|
|
68
|
+
|
|
69
|
+
if (choice.finish_reason) {
|
|
70
|
+
stopReason = choice.finish_reason === 'stop' ? 'end' : choice.finish_reason;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const delta = choice.delta;
|
|
74
|
+
if (delta?.content) {
|
|
75
|
+
snapshot += delta.content;
|
|
76
|
+
yield { type: 'text', text: delta.content, snapshot };
|
|
77
|
+
}
|
|
78
|
+
if (delta?.reasoning_content) {
|
|
79
|
+
yield { type: 'thinking', text: delta.reasoning_content };
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
yield { type: 'done', text: snapshot, usage, stopReason };
|
|
84
|
+
},
|
|
85
|
+
};
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SSE Parser — shared by Anthropic, OpenAI, and Gemini adapters.
|
|
3
|
+
* Handles partial line buffering, double-newline splitting, and [DONE] detection.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
export async function* readSSE(body) {
|
|
7
|
+
const reader = body.getReader();
|
|
8
|
+
const decoder = new TextDecoder();
|
|
9
|
+
let buffer = '';
|
|
10
|
+
try {
|
|
11
|
+
while (true) {
|
|
12
|
+
const { done, value } = await reader.read();
|
|
13
|
+
if (done) break;
|
|
14
|
+
buffer += decoder.decode(value, { stream: true });
|
|
15
|
+
const { events, remainder } = parse(buffer);
|
|
16
|
+
buffer = remainder;
|
|
17
|
+
for (const event of events) yield event;
|
|
18
|
+
}
|
|
19
|
+
if (buffer.trim()) {
|
|
20
|
+
const { events } = parse(buffer + '\n\n');
|
|
21
|
+
for (const event of events) yield event;
|
|
22
|
+
}
|
|
23
|
+
} finally {
|
|
24
|
+
reader.releaseLock();
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
function parse(text) {
|
|
29
|
+
const events = [];
|
|
30
|
+
const parts = text.split(/\n\n|\r\n\r\n/);
|
|
31
|
+
const remainder = parts.pop() ?? '';
|
|
32
|
+
for (const part of parts) {
|
|
33
|
+
const trimmed = part.trim();
|
|
34
|
+
if (!trimmed) continue;
|
|
35
|
+
let eventType;
|
|
36
|
+
const dataLines = [];
|
|
37
|
+
for (const line of trimmed.split(/\r?\n/)) {
|
|
38
|
+
if (line.startsWith(':')) continue;
|
|
39
|
+
if (line.startsWith('event:')) eventType = line.slice(6).trim();
|
|
40
|
+
else if (line.startsWith('data:')) {
|
|
41
|
+
const v = line.slice(5);
|
|
42
|
+
dataLines.push(v.startsWith(' ') ? v.slice(1) : v);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
if (!dataLines.length) continue;
|
|
46
|
+
const data = dataLines.join('\n');
|
|
47
|
+
events.push({ event: eventType, data, done: data === '[DONE]' });
|
|
48
|
+
}
|
|
49
|
+
return { events, remainder };
|
|
50
|
+
}
|
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Bridge — Wraps AdiaUI's llm module into the AdiaUI createAdapter() API.
|
|
3
|
+
*
|
|
4
|
+
* This is the single integration point between the AdiaUI pipeline and the
|
|
5
|
+
* LLM module. It handles:
|
|
6
|
+
* - Env var reading (VITE_* in browser, process.env in Node)
|
|
7
|
+
* - CORS proxy routing in browser (Vite dev server at /api/llm/*)
|
|
8
|
+
* - API translation (AdiaUI's simple { messages, systemPrompt } → llm module's interface)
|
|
9
|
+
*
|
|
10
|
+
* Consumers call createAdapter() and get an object with .complete() and .stream()
|
|
11
|
+
* matching the AdiaUI pipeline interface.
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { StubLLMAdapter } from './llm-stub.js';
|
|
15
|
+
|
|
16
|
+
// Lazy-loaded — ../llm/index.js uses Vite aliases that don't resolve in Node
|
|
17
|
+
let _createClient = null;
|
|
18
|
+
async function getCreateClient() {
|
|
19
|
+
if (!_createClient) {
|
|
20
|
+
try {
|
|
21
|
+
const mod = await import('./adapters/index.js');
|
|
22
|
+
_createClient = mod.createClient;
|
|
23
|
+
} catch {
|
|
24
|
+
_createClient = null;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
return _createClient;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// ── Environment ──────────────────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
function getEnv(key) {
|
|
33
|
+
try {
|
|
34
|
+
const env = import.meta.env;
|
|
35
|
+
if (env) {
|
|
36
|
+
const val = env[`VITE_${key}`] || env[key];
|
|
37
|
+
if (val) return val;
|
|
38
|
+
}
|
|
39
|
+
} catch {}
|
|
40
|
+
if (typeof process !== 'undefined' && process.env) {
|
|
41
|
+
return process.env[key] || '';
|
|
42
|
+
}
|
|
43
|
+
return '';
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
const IS_BROWSER = typeof window !== 'undefined';
|
|
47
|
+
|
|
48
|
+
function resolveBaseUrl(provider) {
|
|
49
|
+
if (!IS_BROWSER) return undefined; // Let the module use its defaults
|
|
50
|
+
const proxyMap = {
|
|
51
|
+
anthropic: '/api/llm/anthropic/v1/messages',
|
|
52
|
+
openai: '/api/llm/openai/v1/chat/completions',
|
|
53
|
+
google: '/api/llm/google',
|
|
54
|
+
};
|
|
55
|
+
return proxyMap[provider];
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// ── Factory ──────────────────────────────────────────────────────────────
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Create an LLM adapter for the AdiaUI pipeline.
|
|
62
|
+
*
|
|
63
|
+
* Auto-detects provider from env vars. Returns an object with .complete()
|
|
64
|
+
* and .stream() that match the AdiaUI interface (simple messages + systemPrompt).
|
|
65
|
+
*
|
|
66
|
+
* @param {object} [opts]
|
|
67
|
+
* @param {string} [opts.provider] — 'anthropic' | 'openai' | 'google' | 'stub'
|
|
68
|
+
* @param {string} [opts.apiKey] — explicit API key (overrides env)
|
|
69
|
+
* @param {string} [opts.model] — model override
|
|
70
|
+
* @returns {StubLLMAdapter | AdiaUILLMBridge}
|
|
71
|
+
*/
|
|
72
|
+
export async function createAdapter(opts = {}) {
|
|
73
|
+
const provider = opts.provider || getEnv('LLM_PROVIDER') || detectProvider();
|
|
74
|
+
const model = opts.model || getEnv('LLM_MODEL') || undefined;
|
|
75
|
+
|
|
76
|
+
if (provider === 'stub') return new StubLLMAdapter();
|
|
77
|
+
|
|
78
|
+
// Resolve API key for the detected provider
|
|
79
|
+
const apiKey = opts.apiKey || getEnv(`${provider.toUpperCase()}_API_KEY`) || getEnv('ANTHROPIC_API_KEY') || getEnv('OPENAI_API_KEY') || getEnv('GOOGLE_API_KEY');
|
|
80
|
+
|
|
81
|
+
// No key found → fall back to stub
|
|
82
|
+
if (!apiKey) {
|
|
83
|
+
console.warn('LLM Bridge: No API keys found. Using stub adapter.');
|
|
84
|
+
return new StubLLMAdapter();
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const createClient = await getCreateClient();
|
|
88
|
+
if (!createClient) {
|
|
89
|
+
console.warn('LLM Bridge: LLM module not available. Using stub adapter.');
|
|
90
|
+
return new StubLLMAdapter();
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const proxyUrl = resolveBaseUrl(provider);
|
|
94
|
+
const client = createClient({
|
|
95
|
+
provider,
|
|
96
|
+
apiKey,
|
|
97
|
+
model: model || DEFAULT_MODELS[provider] || 'claude-sonnet-4-20250514',
|
|
98
|
+
...(proxyUrl ? { proxyUrl } : {}),
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
return new AdiaUILLMBridge(client, model || DEFAULT_MODELS[provider] || 'claude-sonnet-4-20250514', provider);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
function detectProvider() {
|
|
105
|
+
if (getEnv('ANTHROPIC_API_KEY')) return 'anthropic';
|
|
106
|
+
if (getEnv('OPENAI_API_KEY')) return 'openai';
|
|
107
|
+
if (getEnv('GOOGLE_API_KEY')) return 'google';
|
|
108
|
+
return 'stub';
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// ── Bridge class ─────────────────────────────────────────────────────────
|
|
112
|
+
|
|
113
|
+
/** Default models per provider */
|
|
114
|
+
const DEFAULT_MODELS = {
|
|
115
|
+
anthropic: 'claude-sonnet-4-20250514',
|
|
116
|
+
openai: 'gpt-4o',
|
|
117
|
+
google: 'gemini-2.0-flash',
|
|
118
|
+
};
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* Wraps the AdiaUI llm client to match the AdiaUI pipeline's simpler interface.
|
|
122
|
+
*
|
|
123
|
+
* AdiaUI calls: adapter.complete({ messages, systemPrompt })
|
|
124
|
+
* LLM module expects: client.chat({ model, messages, system, ... })
|
|
125
|
+
*/
|
|
126
|
+
class AdiaUILLMBridge {
|
|
127
|
+
#client;
|
|
128
|
+
#model;
|
|
129
|
+
#provider;
|
|
130
|
+
|
|
131
|
+
constructor(client, model, provider) {
|
|
132
|
+
this.#client = client;
|
|
133
|
+
this.#model = model;
|
|
134
|
+
this.#provider = provider;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Non-streaming completion. Matches AdiaUI interface.
|
|
139
|
+
*
|
|
140
|
+
* 32k max_tokens: A2UI JSON for moderately complex UIs (kanban, dashboard,
|
|
141
|
+
* pricing table) routinely exceeds 8k. Truncation produced silent fallbacks
|
|
142
|
+
* that the validator rubber-stamped at ~89/100 — see diagnosis report
|
|
143
|
+
* 2026-04-19. Modern Claude/GPT/Gemini all support ≥32k output cleanly.
|
|
144
|
+
*
|
|
145
|
+
* @param {{ messages: { role: string, content: string }[], systemPrompt?: string }} opts
|
|
146
|
+
* @returns {Promise<{ content: string, stopReason: string, usage: { inputTokens: number, outputTokens: number } }>}
|
|
147
|
+
*/
|
|
148
|
+
async complete({ messages, systemPrompt }) {
|
|
149
|
+
const response = await this.#client.chat({
|
|
150
|
+
model: this.#model,
|
|
151
|
+
messages,
|
|
152
|
+
system: systemPrompt,
|
|
153
|
+
maxTokens: 32768,
|
|
154
|
+
// Anthropic-only: mark the system prompt as a cache breakpoint. No-op
|
|
155
|
+
// on other providers (unknown opt silently ignored) and no-op below the
|
|
156
|
+
// model's minimum cacheable size.
|
|
157
|
+
cache: this.#provider === 'anthropic',
|
|
158
|
+
});
|
|
159
|
+
return {
|
|
160
|
+
content: response.text,
|
|
161
|
+
// 'max_tokens' / 'length' / 'MAX_TOKENS' (Gemini) signal truncation;
|
|
162
|
+
// downstream parser uses this to refuse silent fallback rendering.
|
|
163
|
+
stopReason: response.stopReason ?? 'end',
|
|
164
|
+
usage: {
|
|
165
|
+
inputTokens: response.usage?.input ?? 0,
|
|
166
|
+
outputTokens: response.usage?.output ?? 0,
|
|
167
|
+
cacheCreationTokens: response.usage?.cacheCreation ?? 0,
|
|
168
|
+
cacheReadTokens: response.usage?.cacheRead ?? 0,
|
|
169
|
+
},
|
|
170
|
+
};
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* Streaming completion. Matches AdiaUI interface.
|
|
175
|
+
*
|
|
176
|
+
* @param {{ messages: { role: string, content: string }[], systemPrompt?: string }} opts
|
|
177
|
+
* @yields {{ type: 'text', content: string } | { type: 'done', stopReason: string, usage: { inputTokens: number, outputTokens: number, cacheCreationTokens: number, cacheReadTokens: number } }}
|
|
178
|
+
*/
|
|
179
|
+
async *stream({ messages, systemPrompt }) {
|
|
180
|
+
for await (const chunk of this.#client.stream({
|
|
181
|
+
model: this.#model,
|
|
182
|
+
messages,
|
|
183
|
+
system: systemPrompt,
|
|
184
|
+
maxTokens: 32768,
|
|
185
|
+
cache: this.#provider === 'anthropic',
|
|
186
|
+
})) {
|
|
187
|
+
if (chunk.type === 'text') {
|
|
188
|
+
yield { type: 'text', content: chunk.text };
|
|
189
|
+
} else if (chunk.type === 'done') {
|
|
190
|
+
// Surface the terminal stopReason + cache telemetry so the consumer
|
|
191
|
+
// can detect max_tokens truncation and the dialog recorder can log
|
|
192
|
+
// cache hit-rate per turn.
|
|
193
|
+
yield {
|
|
194
|
+
type: 'done',
|
|
195
|
+
stopReason: chunk.stopReason ?? 'end',
|
|
196
|
+
usage: {
|
|
197
|
+
inputTokens: chunk.usage?.input ?? 0,
|
|
198
|
+
outputTokens: chunk.usage?.output ?? 0,
|
|
199
|
+
cacheCreationTokens: chunk.usage?.cacheCreation ?? 0,
|
|
200
|
+
cacheReadTokens: chunk.usage?.cacheRead ?? 0,
|
|
201
|
+
},
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
// Other chunk types (thinking, error) are still available on the
|
|
205
|
+
// underlying adapter but the AdiaUI pipeline doesn't consume them yet.
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
/** Expose the underlying client for advanced use. */
|
|
210
|
+
get adapter() { return this.#client; }
|
|
211
|
+
|
|
212
|
+
/** Expose provider name for detection. */
|
|
213
|
+
get provider() { return this.#provider; }
|
|
214
|
+
}
|
package/llm/llm-stub.js
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* StubLLMAdapter — Deterministic LLM adapter for testing.
|
|
3
|
+
*
|
|
4
|
+
* Returns canned A2UI responses for known prompts. Implements the same
|
|
5
|
+
* interface that a real LLM adapter would (complete, stream) so pipeline
|
|
6
|
+
* code can develop against it without API keys.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
export class StubLLMAdapter {
|
|
10
|
+
/**
|
|
11
|
+
* Complete a prompt and return a full A2UI response.
|
|
12
|
+
*
|
|
13
|
+
* @param {object} opts
|
|
14
|
+
* @param {object[]} opts.messages — Chat messages (system + user turns)
|
|
15
|
+
* @param {string} [opts.systemPrompt] — System prompt override
|
|
16
|
+
* @returns {Promise<{ content: string, usage: { inputTokens: number, outputTokens: number } }>}
|
|
17
|
+
*/
|
|
18
|
+
async complete({ messages, systemPrompt }) {
|
|
19
|
+
const lastMessage = messages?.[messages.length - 1]?.content || '';
|
|
20
|
+
const components = this.#buildResponse(lastMessage);
|
|
21
|
+
|
|
22
|
+
return {
|
|
23
|
+
content: JSON.stringify([
|
|
24
|
+
{
|
|
25
|
+
type: 'updateComponents',
|
|
26
|
+
surfaceId: 'default',
|
|
27
|
+
components,
|
|
28
|
+
},
|
|
29
|
+
]),
|
|
30
|
+
usage: {
|
|
31
|
+
inputTokens: estimateTokens(JSON.stringify(messages)),
|
|
32
|
+
outputTokens: estimateTokens(JSON.stringify(components)),
|
|
33
|
+
},
|
|
34
|
+
};
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Stream a response as an async iterable of chunks.
|
|
39
|
+
*
|
|
40
|
+
* @param {object} request — Same shape as complete()
|
|
41
|
+
* @yields {{ type: 'text', content: string }}
|
|
42
|
+
*/
|
|
43
|
+
async *stream(request) {
|
|
44
|
+
const result = await this.complete(request);
|
|
45
|
+
// Simulate progressive streaming by yielding the full response
|
|
46
|
+
yield { type: 'text', content: result.content };
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Build a canned component tree from the intent text.
|
|
51
|
+
* @param {string} intent
|
|
52
|
+
* @returns {object[]}
|
|
53
|
+
*/
|
|
54
|
+
#buildResponse(intent) {
|
|
55
|
+
return [
|
|
56
|
+
{ id: 'root', component: 'Card', children: ['hdr', 'sec'] },
|
|
57
|
+
{ id: 'hdr', component: 'Header', children: ['title'] },
|
|
58
|
+
{ id: 'title', component: 'Text', variant: 'h3', textContent: 'Generated UI' },
|
|
59
|
+
{ id: 'sec', component: 'Section', children: ['col'] },
|
|
60
|
+
{ id: 'col', component: 'Column', children: ['desc'] },
|
|
61
|
+
{ id: 'desc', component: 'Text', variant: 'body', textContent: intent || 'No intent provided' },
|
|
62
|
+
];
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/** Rough token estimate (~4 chars per token) */
|
|
67
|
+
function estimateTokens(text) {
|
|
68
|
+
return Math.ceil((text?.length || 0) / 4);
|
|
69
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@adia-ai/a2ui-compose",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"description": "AdiaUI A2UI compose engine — framework-agnostic. Takes natural-language intents + a catalog and produces A2UI protocol messages. Pairs with `@adia-ai/a2ui-retrieval` (intent classification, catalog lookup) and `@adia-ai/a2ui-validator` (schema + semantic checks).",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": "./index.js",
|
|
8
|
+
"./engine": "./engine/generator.js",
|
|
9
|
+
"./engines/zettel": "./engines/zettel/generator-adapter.js",
|
|
10
|
+
"./engines/registry": "./engines/registry.js",
|
|
11
|
+
"./llm": "./llm/llm-bridge.js",
|
|
12
|
+
"./llm/*": "./llm/*.js",
|
|
13
|
+
"./transpiler": "./transpiler/transpiler.js",
|
|
14
|
+
"./evals": "./evals/harness.mjs"
|
|
15
|
+
},
|
|
16
|
+
"files": [
|
|
17
|
+
"engine/",
|
|
18
|
+
"engines/",
|
|
19
|
+
"llm/",
|
|
20
|
+
"evals/",
|
|
21
|
+
"transpiler/",
|
|
22
|
+
"index.js",
|
|
23
|
+
"README.md",
|
|
24
|
+
"CHANGELOG.md"
|
|
25
|
+
],
|
|
26
|
+
"license": "MIT",
|
|
27
|
+
"publishConfig": {
|
|
28
|
+
"access": "public",
|
|
29
|
+
"registry": "https://registry.npmjs.org"
|
|
30
|
+
},
|
|
31
|
+
"repository": {
|
|
32
|
+
"type": "git",
|
|
33
|
+
"url": "git+https://github.com/adiahealth/gen-ui-kit.git",
|
|
34
|
+
"directory": "packages/a2ui/compose"
|
|
35
|
+
},
|
|
36
|
+
"dependencies": {
|
|
37
|
+
"@adia-ai/a2ui-utils": "^0.0.2",
|
|
38
|
+
"@adia-ai/a2ui-retrieval": "^0.0.1",
|
|
39
|
+
"@adia-ai/a2ui-validator": "^0.0.1"
|
|
40
|
+
}
|
|
41
|
+
}
|