@adia-ai/llm 0.3.1 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/adapters/index.js +38 -8
- package/index.js +6 -0
- package/models.js +38 -0
- package/package.json +5 -3
package/adapters/index.js
CHANGED
|
@@ -62,12 +62,32 @@ function resolveAdapter(opts) {
|
|
|
62
62
|
|
|
63
63
|
// ── Proxy mode ──
|
|
64
64
|
//
|
|
65
|
-
//
|
|
66
|
-
//
|
|
67
|
-
//
|
|
68
|
-
//
|
|
69
|
-
//
|
|
70
|
-
//
|
|
65
|
+
// Two proxy flavors are supported:
|
|
66
|
+
//
|
|
67
|
+
// 1. Smart proxy (e.g. packages/llm/server.js on :3456) — speaks a
|
|
68
|
+
// provider-neutral protocol: { provider, model, messages, system?,
|
|
69
|
+
// maxTokens?, temperature?, thinking?, stream }. The proxy holds
|
|
70
|
+
// the real API key and reformats per upstream provider.
|
|
71
|
+
//
|
|
72
|
+
// 2. Passthrough proxy (e.g. Vite dev server's /api/llm/<provider>/...)
|
|
73
|
+
// — dumb URL rewriter that forwards the request body + headers
|
|
74
|
+
// verbatim to the upstream API. The client must send the real
|
|
75
|
+
// upstream body shape AND the real auth header (x-api-key for
|
|
76
|
+
// Anthropic, Authorization: Bearer for OpenAI/Gemini).
|
|
77
|
+
//
|
|
78
|
+
// We distinguish by URL shape: anything matching `/api/llm/<provider>/`
|
|
79
|
+
// is treated as a passthrough proxy and routed through buildRequest()
|
|
80
|
+
// with the URL replaced. Everything else is assumed to be a smart proxy.
|
|
81
|
+
//
|
|
82
|
+
// Each adapter still parses the upstream's streamed body via its own
|
|
83
|
+
// parseStream — passthrough proxies pipe SSE bytes verbatim, smart
|
|
84
|
+
// proxies must do the same.
|
|
85
|
+
|
|
86
|
+
const PASSTHROUGH_PROXY_RE = /\/api\/llm\/[a-z]+(\/|$)/;
|
|
87
|
+
|
|
88
|
+
function isPassthroughProxy(url) {
|
|
89
|
+
return typeof url === 'string' && PASSTHROUGH_PROXY_RE.test(url);
|
|
90
|
+
}
|
|
71
91
|
|
|
72
92
|
function proxyRequest(opts, stream) {
|
|
73
93
|
const provider = opts.provider || detectProvider(opts.model);
|
|
@@ -88,6 +108,16 @@ function proxyRequest(opts, stream) {
|
|
|
88
108
|
};
|
|
89
109
|
}
|
|
90
110
|
|
|
111
|
+
/**
|
|
112
|
+
* Build a passthrough-proxy request: real upstream body + real auth
|
|
113
|
+
* header, but URL pointed at the proxy. The proxy forwards verbatim.
|
|
114
|
+
*/
|
|
115
|
+
function passthroughRequest(opts, stream) {
|
|
116
|
+
const adapter = resolveAdapter(opts);
|
|
117
|
+
const built = adapter.buildRequest({ ...opts, stream });
|
|
118
|
+
return { ...built, url: opts.proxyUrl };
|
|
119
|
+
}
|
|
120
|
+
|
|
91
121
|
// ── Standalone functions ──
|
|
92
122
|
|
|
93
123
|
/**
|
|
@@ -97,7 +127,7 @@ function proxyRequest(opts, stream) {
|
|
|
97
127
|
export async function chat(opts) {
|
|
98
128
|
const adapter = resolveAdapter(opts);
|
|
99
129
|
const { url, headers, body } = opts.proxyUrl
|
|
100
|
-
? proxyRequest(opts, false)
|
|
130
|
+
? (isPassthroughProxy(opts.proxyUrl) ? passthroughRequest(opts, false) : proxyRequest(opts, false))
|
|
101
131
|
: adapter.buildRequest({ ...opts, stream: false });
|
|
102
132
|
|
|
103
133
|
const res = await fetch(url, {
|
|
@@ -122,7 +152,7 @@ export async function chat(opts) {
|
|
|
122
152
|
export async function* streamChat(opts) {
|
|
123
153
|
const adapter = resolveAdapter(opts);
|
|
124
154
|
const { url, headers, body } = opts.proxyUrl
|
|
125
|
-
? proxyRequest(opts, true)
|
|
155
|
+
? (isPassthroughProxy(opts.proxyUrl) ? passthroughRequest(opts, true) : proxyRequest(opts, true))
|
|
126
156
|
: adapter.buildRequest({ ...opts, stream: true });
|
|
127
157
|
|
|
128
158
|
let res;
|
package/index.js
CHANGED
|
@@ -6,6 +6,7 @@
|
|
|
6
6
|
* consumer that needs to talk to anthropic / openai / gemini.
|
|
7
7
|
*
|
|
8
8
|
* import { chat, streamChat, createClient } from '@adia-ai/llm';
|
|
9
|
+
* import { MODELS, DEFAULT_MODEL } from '@adia-ai/llm/models';
|
|
9
10
|
* import { createAdapter } from '@adia-ai/llm/bridge';
|
|
10
11
|
* import { StubLLMAdapter } from '@adia-ai/llm/stub';
|
|
11
12
|
*/
|
|
@@ -15,3 +16,8 @@ export {
|
|
|
15
16
|
streamChat,
|
|
16
17
|
createClient,
|
|
17
18
|
} from './adapters/index.js';
|
|
19
|
+
|
|
20
|
+
export {
|
|
21
|
+
MODELS,
|
|
22
|
+
DEFAULT_MODEL,
|
|
23
|
+
} from './models.js';
|
package/models.js
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared model catalog — chat-input-ui grouped-options shape.
|
|
3
|
+
*
|
|
4
|
+
* Three consumers (as of 2026-05-06): apps/chat/, apps/genui/gen-ui-ux/,
|
|
5
|
+
* apps/genui/gen-ui/. Each previously carried a near-identical literal
|
|
6
|
+
* array; this module promotes them to one source.
|
|
7
|
+
*
|
|
8
|
+
* Format matches `<chat-input-ui>.models` setter (a 2D grouped-options
|
|
9
|
+
* structure consumed by an internal `<select-ui>` with `<optgroup>`s).
|
|
10
|
+
*
|
|
11
|
+
* import { MODELS, DEFAULT_MODEL } from '@adia-ai/llm/models';
|
|
12
|
+
* chatInput.models = MODELS;
|
|
13
|
+
* chatInput.model = DEFAULT_MODEL;
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
export const MODELS = [
|
|
17
|
+
{
|
|
18
|
+
label: 'Anthropic',
|
|
19
|
+
options: [
|
|
20
|
+
{ value: 'claude-haiku-4-5-20251001', label: 'Haiku 4.5' },
|
|
21
|
+
{ value: 'claude-sonnet-4-6', label: 'Sonnet 4.6' },
|
|
22
|
+
],
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
label: 'OpenAI',
|
|
26
|
+
options: [
|
|
27
|
+
{ value: 'gpt-4o-mini', label: 'GPT-4o Mini' },
|
|
28
|
+
],
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
label: 'Google',
|
|
32
|
+
options: [
|
|
33
|
+
{ value: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash' },
|
|
34
|
+
],
|
|
35
|
+
},
|
|
36
|
+
];
|
|
37
|
+
|
|
38
|
+
export const DEFAULT_MODEL = 'claude-haiku-4-5-20251001';
|
package/package.json
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@adia-ai/llm",
|
|
3
|
-
"version": "0.3.
|
|
4
|
-
"description": "Provider-agnostic LLM client
|
|
3
|
+
"version": "0.3.2",
|
|
4
|
+
"description": "Provider-agnostic LLM client \u2014 anthropic / openai / gemini adapters with a unified chat() + streamChat() facade. Used by AdiaUI's chat-shell and the A2UI generation pipeline; works in browser (with proxyUrl) and Node.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"exports": {
|
|
7
7
|
".": "./index.js",
|
|
8
8
|
"./adapters/*": "./adapters/*.js",
|
|
9
9
|
"./bridge": "./llm-bridge.js",
|
|
10
|
+
"./models": "./models.js",
|
|
10
11
|
"./stub": "./llm-stub.js",
|
|
11
12
|
"./package.json": "./package.json"
|
|
12
13
|
},
|
|
@@ -14,6 +15,7 @@
|
|
|
14
15
|
"adapters/",
|
|
15
16
|
"llm-bridge.js",
|
|
16
17
|
"llm-stub.js",
|
|
18
|
+
"models.js",
|
|
17
19
|
"index.js",
|
|
18
20
|
"README.md",
|
|
19
21
|
"CHANGELOG.md"
|
|
@@ -29,4 +31,4 @@
|
|
|
29
31
|
"directory": "packages/llm"
|
|
30
32
|
},
|
|
31
33
|
"license": "MIT"
|
|
32
|
-
}
|
|
34
|
+
}
|