nuxt-edge-ai 0.1.2 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,17 +1,35 @@
1
1
  # nuxt-edge-ai
2
2
 
3
- `nuxt-edge-ai` is a Nuxt module for building local-first AI applications with a real server-side WASM inference runtime.
3
+ [![npm version](https://img.shields.io/npm/v/nuxt-edge-ai/latest.svg)](https://www.npmjs.com/package/nuxt-edge-ai)
4
+ [![npm downloads](https://img.shields.io/npm/dm/nuxt-edge-ai.svg)](https://www.npmjs.com/package/nuxt-edge-ai)
5
+ [![license](https://img.shields.io/npm/l/nuxt-edge-ai.svg)](./LICENSE)
6
+ [![nuxt](https://img.shields.io/badge/Nuxt-4.x-00DC82?logo=nuxt.js&logoColor=white)](https://nuxt.com/)
7
+ [![ci](https://github.com/otadk/nuxt-edge-ai/actions/workflows/ci.yml/badge.svg)](https://github.com/otadk/nuxt-edge-ai/actions/workflows/ci.yml)
8
+
9
+ `nuxt-edge-ai` is a Nuxt module for building local-first AI applications with a real server-side WASM inference runtime and an optional remote API fallback.
4
10
 
5
11
  It ships:
6
12
 
7
13
  - a Nuxt module install surface
8
14
  - Nitro API routes for health, model pull, and generation
9
15
  - a client composable for app-side usage
16
+ - an `EdgeAI` SDK with an OpenAI-like `chat.completions.create()` surface
17
+ - switchable `local`, `remote`, and `mock` providers behind one module API
10
18
  - a vendored `transformers.js` + `onnxruntime-web` runtime inside the package
11
19
  - no Ollama, no `llama.cpp`, no Rust/C++/native runtime dependency for consumers
12
20
 
13
21
  The model weights are not bundled. Users either point the module at a local model directory or allow it to download and cache the model on first run.
14
22
 
23
+ ## Features
24
+
25
+ - Nuxt module install surface designed for app integration
26
+ - Nitro endpoints for health, pull, and generate workflows
27
+ - local-first server-side inference with bundled WASM runtime assets
28
+ - optional OpenAI-compatible remote provider for stronger hosted models
29
+ - OpenAI-compatible `chat/completions` endpoint for SDK-style integration
30
+ - published package includes vendored inference runtime files
31
+ - no consumer requirement for Ollama, Rust, C++, Python, or native AI runtimes
32
+
15
33
  ## Why this exists
16
34
 
17
35
  The goal is to make `nuxt-edge-ai` a credible, publishable Nuxt module:
@@ -19,23 +37,20 @@ The goal is to make `nuxt-edge-ai` a credible, publishable Nuxt module:
19
37
  - installable in a regular Nuxt app
20
38
  - able to run a real local model
21
39
  - packaged as JS/TS + WASM only
22
- - suitable as a strong portfolio / resume project
23
40
 
24
41
  ## Current runtime
25
42
 
26
- Current real runtime path:
43
+ Current local runtime path:
27
44
 
28
45
  - `transformers.js` web build
29
46
  - `onnxruntime-web` WASM backend
30
47
  - server-side execution through Nitro
31
48
 
32
- Recommended first demo model:
49
+ Built-in local preset:
33
50
 
34
- - `Xenova/distilgpt2` for quick validation
51
+ - `distilgpt2`
35
52
 
36
- Recommended next upgrade target:
37
-
38
- - `onnx-community/Qwen2.5-0.5B-Instruct-ONNX`
53
+ The local path is intentionally conservative now. When local inference is not enough, the module can fall back to a remote OpenAI-compatible API.
39
54
 
40
55
  ## Install
41
56
 
@@ -48,20 +63,15 @@ pnpm add nuxt-edge-ai
48
63
  export default defineNuxtConfig({
49
64
  modules: ['nuxt-edge-ai'],
50
65
  edgeAI: {
51
- runtime: 'transformers-wasm',
66
+ provider: 'local',
52
67
  cacheDir: './.cache/nuxt-edge-ai',
53
- model: {
54
- id: 'Xenova/distilgpt2',
55
- task: 'text-generation',
56
- allowRemote: true,
57
- dtype: 'q8',
58
- generation: {
59
- maxNewTokens: 96,
60
- temperature: 0.7,
61
- topP: 0.9,
62
- doSample: true,
63
- repetitionPenalty: 1.05,
64
- },
68
+ preset: 'distilgpt2',
69
+ remote: {
70
+ enabled: true,
71
+ fallback: true,
72
+ baseUrl: 'https://api.openai.com/v1',
73
+ apiKey: process.env.OPENAI_API_KEY,
74
+ model: 'gpt-4o-mini',
65
75
  },
66
76
  },
67
77
  })
@@ -79,6 +89,110 @@ const result = await edgeAI.generate({
79
89
  </script>
80
90
  ```
81
91
 
92
+ ## Configuration
93
+
94
+ Top-level module options:
95
+
96
+ | Option | Type | Default | Notes |
97
+ | --- | --- | --- | --- |
98
+ | `routeBase` | `string` | `/api/edge-ai` | Base path for module endpoints |
99
+ | `provider` | `'local' \| 'remote' \| 'mock'` | `local` | Runtime backend selector |
100
+ | `runtime` | `'transformers-wasm' \| 'mock'` | legacy | Backward-compatible alias for older configs |
101
+ | `cacheDir` | `string` | `./.cache/nuxt-edge-ai` | Cache and model asset directory |
102
+ | `warmup` | `boolean` | `false` | Warm the runtime on health checks |
103
+ | `preset` | `string` | `distilgpt2` | Local model preset |
104
+ | `presets` | `Record<string, ...>` | `undefined` | Register additional local presets |
105
+ | `model` | `object` | see below | Override the local model preset |
106
+ | `remote` | `object` | see below | Remote provider and fallback settings |
107
+
108
+ Local model options:
109
+
110
+ | Option | Type | Default | Notes |
111
+ | --- | --- | --- | --- |
112
+ | `id` | `string` | `Xenova/distilgpt2` | Model identifier used when no local path is set |
113
+ | `task` | `'text-generation'` | `text-generation` | Current supported task |
114
+ | `localPath` | `string \| undefined` | `undefined` | Local model directory |
115
+ | `allowRemote` | `boolean` | `true` | Allow first-run download from remote model source |
116
+ | `dtype` | `string \| undefined` | `q8` | Runtime dtype passed to Transformers.js |
117
+ | `generation.maxNewTokens` | `number` | `96` | Max generated tokens |
118
+ | `generation.temperature` | `number` | `0.7` | Sampling temperature |
119
+ | `generation.topP` | `number` | `0.9` | Top-p sampling |
120
+ | `generation.doSample` | `boolean` | `true` | Enable sampling |
121
+ | `generation.repetitionPenalty` | `number` | `1.05` | Repetition penalty |
122
+
123
+ Remote provider options:
124
+
125
+ | Option | Type | Default | Notes |
126
+ | --- | --- | --- | --- |
127
+ | `enabled` | `boolean` | `false` | Enable remote provider settings |
128
+ | `fallback` | `boolean` | `true` | Fall back to remote if local pull/generate fails |
129
+ | `baseUrl` | `string` | `https://api.openai.com/v1` | Remote API base URL |
130
+ | `path` | `string` | `/chat/completions` | OpenAI-compatible endpoint path |
131
+ | `model` | `string` | `gpt-4o-mini` | Default remote model ID |
132
+ | `apiKey` | `string \| undefined` | `undefined` | Inline API key |
133
+ | `headers` | `Record<string, string> \| undefined` | `undefined` | Extra request headers |
134
+ | `systemPrompt` | `string \| undefined` | `undefined` | Optional system instruction |
135
+
136
+ ## Provider examples
137
+
138
+ Local-only mode:
139
+
140
+ ```ts
141
+ export default defineNuxtConfig({
142
+ modules: ['nuxt-edge-ai'],
143
+ edgeAI: {
144
+ provider: 'local',
145
+ preset: 'distilgpt2',
146
+ remote: {
147
+ enabled: false,
148
+ },
149
+ },
150
+ })
151
+ ```
152
+
153
+ Local with automatic remote fallback:
154
+
155
+ ```ts
156
+ export default defineNuxtConfig({
157
+ modules: ['nuxt-edge-ai'],
158
+ edgeAI: {
159
+ provider: 'local',
160
+ preset: 'distilgpt2',
161
+ remote: {
162
+ enabled: true,
163
+ fallback: true,
164
+ baseUrl: 'https://api.openai.com/v1',
165
+ apiKey: process.env.OPENAI_API_KEY,
166
+ model: 'gpt-4o-mini',
167
+ },
168
+ },
169
+ })
170
+ ```
171
+
172
+ Custom preset registration:
173
+
174
+ ```ts
175
+ export default defineNuxtConfig({
176
+ modules: ['nuxt-edge-ai'],
177
+ edgeAI: {
178
+ presets: {
179
+ 'team-default': {
180
+ label: 'Team Default',
181
+ description: 'Project-specific local preset',
182
+ model: {
183
+ id: 'Xenova/distilgpt2',
184
+ dtype: 'q8',
185
+ generation: {
186
+ maxNewTokens: 120,
187
+ },
188
+ },
189
+ },
190
+ },
191
+ preset: 'team-default',
192
+ },
193
+ })
194
+ ```
195
+
82
196
  ## Consumer runtime guarantees
83
197
 
84
198
  Consumers do not need to install:
@@ -100,9 +214,97 @@ What consumers do need:
100
214
  - `GET /api/edge-ai/health`
101
215
  - `POST /api/edge-ai/pull`
102
216
  - `POST /api/edge-ai/generate`
217
+ - `POST /api/edge-ai/chat/completions`
103
218
  - `useEdgeAI().health()`
104
219
  - `useEdgeAI().pull()`
105
220
  - `useEdgeAI().generate()`
221
+ - `useEdgeAI().chatCompletions()`
222
+
223
+ Health responses also expose:
224
+
225
+ - `provider`
226
+ - `presets`
227
+ - `remoteFallback`
228
+ - `engine.ready`
229
+ - `engine.lastError`
230
+
231
+ ## OpenAI-compatible chat completions
232
+
233
+ You can either point the official OpenAI client at the module's Nitro route, or use the package's own `EdgeAI` client with the same calling style.
234
+
235
+ Using `EdgeAI` directly:
236
+
237
+ ```ts
238
+ import { EdgeAI } from 'nuxt-edge-ai'
239
+
240
+ const client = new EdgeAI({
241
+ baseURL: 'http://localhost:3000/api/edge-ai',
242
+ })
243
+
244
+ const response = await client.chat.completions.create({
245
+ model: 'openai/gpt-oss-20b:free',
246
+ messages: [
247
+ {
248
+ role: 'user',
249
+ content: "How many r's are in strawberry?",
250
+ },
251
+ ],
252
+ reasoning: { enabled: true },
253
+ })
254
+ ```
255
+
256
+ Using the OpenAI SDK against the same route:
257
+
258
+ ```ts
259
+ import OpenAI from 'openai'
260
+
261
+ const client = new OpenAI({
262
+ baseURL: 'http://localhost:3000/api/edge-ai',
263
+ apiKey: 'local-dev-token',
264
+ })
265
+
266
+ const response = await client.chat.completions.create({
267
+ model: 'openai/gpt-oss-20b:free',
268
+ messages: [
269
+ {
270
+ role: 'user',
271
+ content: "How many r's are in strawberry?",
272
+ },
273
+ ],
274
+ reasoning: { enabled: true },
275
+ })
276
+ ```
277
+
278
+ Inside a Nuxt app you can also use `useEdgeAI().client.chat.completions.create(...)`.
279
+
280
+ When the module is using a remote OpenAI-compatible backend, it forwards `messages`, `reasoning`, and any extra `remoteBody` fields. If the upstream provider returns `reasoning_details`, the module preserves them on `choices[0].message`.
281
+
282
+ Example OpenRouter-style config:
283
+
284
+ ```ts
285
+ export default defineNuxtConfig({
286
+ modules: ['nuxt-edge-ai'],
287
+ edgeAI: {
288
+ provider: 'remote',
289
+ remote: {
290
+ enabled: true,
291
+ baseUrl: 'https://openrouter.ai/api/v1',
292
+ apiKey: process.env.OPENROUTER_API_KEY,
293
+ model: 'openai/gpt-oss-20b:free',
294
+ },
295
+ },
296
+ })
297
+ ```
298
+
299
+ ## Troubleshooting
300
+
301
+ Common checks:
302
+
303
+ - Run `POST /api/edge-ai/health` first to confirm route wiring and runtime config.
304
+ - Use `provider: 'mock'` to separate module wiring issues from model/runtime issues.
305
+ - Remote fallback requires `edgeAI.remote.enabled: true` plus `edgeAI.remote.apiKey`.
306
+ - If `pull` fails, inspect server logs first. Most early failures are model-path or packaged-runtime issues.
307
+ - After changing vendored runtime files, always run `pnpm prepack` before validating a published-style install.
106
308
 
107
309
  ## Local development
108
310
 
@@ -125,9 +327,15 @@ pnpm prepack
125
327
 
126
328
  See [`docs/index.md`](./docs/index.md) for the project docs tree.
127
329
 
128
- ## Repository shape
330
+ Key docs:
129
331
 
130
- This repository follows a Nuxt modules-style layout:
332
+ - [`docs/getting-started.md`](./docs/getting-started.md)
333
+ - [`docs/api.md`](./docs/api.md)
334
+ - [`docs/models.md`](./docs/models.md)
335
+ - [`docs/architecture.md`](./docs/architecture.md)
336
+ - [`docs/third-party.md`](./docs/third-party.md)
337
+
338
+ ## Repository shape
131
339
 
132
340
  - `src/module.ts`: module entry and runtime config wiring
133
341
  - `src/runtime/`: composables, plugin, and Nitro runtime code
@@ -138,4 +346,4 @@ This repository follows a Nuxt modules-style layout:
138
346
 
139
347
  ## Status
140
348
 
141
- This is an MVP, but it now runs a real model instead of mock text when `runtime: 'transformers-wasm'` is enabled.
349
+ This is still an MVP, but it now supports three execution modes behind one API: `local`, `remote`, and `mock`.
package/dist/module.d.mts CHANGED
@@ -1,23 +1,29 @@
1
1
  import * as _nuxt_schema from '@nuxt/schema';
2
- import { EdgeAIGenerationOptions } from '../dist/runtime/types.js';
3
- export { EdgeAIGenerateRequest, EdgeAIGenerateResponse, EdgeAIHealthResponse, EdgeAIPullResponse } from '../dist/runtime/types.js';
2
+ import { EdgeAIModelResolvedConfig, EdgeAIGenerationOptions, EdgeAIRemoteConfig, EdgeAIProvider, EdgeAIModelPresetDefinition } from '../dist/runtime/types.js';
3
+ export { EdgeAIChatCompletionRequest, EdgeAIChatCompletionResponse, EdgeAIGenerateRequest, EdgeAIGenerateResponse, EdgeAIHealthResponse, EdgeAIPullResponse, EdgeAIRemoteMessage, EdgeAIRemoteReasoningOptions } from '../dist/runtime/types.js';
4
+ export { EdgeAI, EdgeAIClientOptions } from '../dist/runtime/client.js';
4
5
 
5
- interface EdgeAIModelOptions {
6
- id: string;
7
- task: 'text-generation';
8
- localPath?: string;
9
- allowRemote: boolean;
10
- dtype?: string;
11
- generation: EdgeAIGenerationOptions;
6
+ interface EdgeAIModelOptions extends Partial<Omit<EdgeAIModelResolvedConfig, 'generation'>> {
7
+ generation?: Partial<EdgeAIGenerationOptions>;
8
+ }
9
+ interface EdgeAIRemoteOptions extends Partial<EdgeAIRemoteConfig> {
10
+ baseURL?: string;
11
+ headers?: Record<string, string>;
12
12
  }
13
13
  interface ModuleOptions {
14
- routeBase: string;
15
- runtime: 'transformers-wasm' | 'mock';
16
- cacheDir: string;
17
- warmup: boolean;
18
- model: EdgeAIModelOptions;
14
+ routeBase?: string;
15
+ provider?: EdgeAIProvider;
16
+ runtime?: 'transformers-wasm' | 'mock';
17
+ cacheDir?: string;
18
+ warmup?: boolean;
19
+ preset?: string;
20
+ presets?: Record<string, Partial<EdgeAIModelPresetDefinition> & {
21
+ model: EdgeAIModelOptions;
22
+ }>;
23
+ model?: EdgeAIModelOptions;
24
+ remote?: EdgeAIRemoteOptions;
19
25
  }
20
26
  declare const _default: _nuxt_schema.NuxtModule<ModuleOptions, ModuleOptions, false>;
21
27
 
22
28
  export { _default as default };
23
- export type { EdgeAIModelOptions, ModuleOptions };
29
+ export type { EdgeAIModelOptions, EdgeAIRemoteOptions, ModuleOptions };
package/dist/module.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "nuxt-edge-ai",
3
3
  "configKey": "edgeAI",
4
- "version": "0.1.2",
4
+ "version": "0.1.3",
5
5
  "builder": {
6
6
  "@nuxt/module-builder": "1.0.2",
7
7
  "unbuild": "unknown"
package/dist/module.mjs CHANGED
@@ -1,5 +1,7 @@
1
1
  import { isAbsolute, resolve } from 'node:path';
2
2
  import { defineNuxtModule, createResolver, addImportsDir, addPlugin, addServerHandler, addTypeTemplate } from '@nuxt/kit';
3
+ import { mergeModelConfig, builtinModelPresets } from '../dist/runtime/presets.js';
4
+ export { EdgeAI } from '../dist/runtime/client.js';
3
5
 
4
6
  function resolveMaybeAbsolute(rootDir, value) {
5
7
  if (!value) {
@@ -8,9 +10,56 @@ function resolveMaybeAbsolute(rootDir, value) {
8
10
  return isAbsolute(value) ? value : resolve(rootDir, value);
9
11
  }
10
12
  function normalizeRouteBase(routeBase) {
11
- const normalized = routeBase.trim().replace(/\/+$/, "");
13
+ const normalized = routeBase?.trim().replace(/\/+$/, "");
12
14
  return normalized || "/api/edge-ai";
13
15
  }
16
+ function resolveProvider(options) {
17
+ if (options.runtime === "mock") {
18
+ return "mock";
19
+ }
20
+ return options.provider || "local";
21
+ }
22
+ function resolveRuntime(provider) {
23
+ if (provider === "local") {
24
+ return "transformers-wasm";
25
+ }
26
+ return provider;
27
+ }
28
+ function normalizePresetRegistry(presets) {
29
+ const normalized = { ...builtinModelPresets };
30
+ for (const [id, preset] of Object.entries(presets || {})) {
31
+ normalized[id] = {
32
+ label: preset.label || id,
33
+ description: preset.description || `Custom preset "${id}".`,
34
+ model: mergeModelConfig(builtinModelPresets.distilgpt2.model, preset.model)
35
+ };
36
+ }
37
+ return normalized;
38
+ }
39
+ function toPresetSummary(id, preset) {
40
+ return {
41
+ id,
42
+ label: preset.label,
43
+ description: preset.description,
44
+ model: {
45
+ id: preset.model.id,
46
+ task: preset.model.task,
47
+ dtype: preset.model.dtype
48
+ }
49
+ };
50
+ }
51
+ function resolveRemoteConfig(remote) {
52
+ return {
53
+ enabled: remote?.enabled ?? false,
54
+ fallback: remote?.fallback ?? true,
55
+ baseUrl: remote?.baseUrl || remote?.baseURL || "https://api.openai.com/v1",
56
+ apiKey: remote?.apiKey,
57
+ path: remote?.path || "/chat/completions",
58
+ model: remote?.model || "gpt-4o-mini",
59
+ headers: remote?.headers,
60
+ systemPrompt: remote?.systemPrompt
61
+ };
62
+ }
14
63
  const module$1 = defineNuxtModule({
15
64
  meta: {
16
65
  name: "nuxt-edge-ai",
@@ -18,53 +67,62 @@ const module$1 = defineNuxtModule({
18
67
  },
19
68
  defaults: {
20
69
  routeBase: "/api/edge-ai",
21
- runtime: "transformers-wasm",
70
+ provider: "local",
22
71
  cacheDir: "./.cache/nuxt-edge-ai",
23
72
  warmup: false,
24
- model: {
25
- id: "Xenova/distilgpt2",
26
- task: "text-generation",
27
- allowRemote: true,
28
- dtype: "q8",
29
- generation: {
30
- maxNewTokens: 96,
31
- temperature: 0.7,
32
- topP: 0.9,
33
- doSample: true,
34
- repetitionPenalty: 1.05
35
- }
73
+ preset: "distilgpt2",
74
+ model: {},
75
+ remote: {
76
+ enabled: false,
77
+ fallback: true,
78
+ baseUrl: "https://api.openai.com/v1",
79
+ path: "/chat/completions",
80
+ model: "gpt-4o-mini"
36
81
  }
37
82
  },
38
83
  setup(options, nuxt) {
39
84
  const resolver = createResolver(import.meta.url);
40
85
  const routeBase = normalizeRouteBase(options.routeBase);
41
- const cacheDir = resolveMaybeAbsolute(nuxt.options.rootDir, options.cacheDir) ?? options.cacheDir;
42
- const modelLocalPath = resolveMaybeAbsolute(nuxt.options.rootDir, options.model.localPath);
86
+ const provider = resolveProvider(options);
87
+ const runtime = resolveRuntime(provider);
88
+ const cacheDir = resolveMaybeAbsolute(nuxt.options.rootDir, options.cacheDir) ?? "./.cache/nuxt-edge-ai";
89
+ const presetRegistry = normalizePresetRegistry(options.presets);
90
+ const presetId = options.preset || "distilgpt2";
91
+ const preset = presetRegistry[presetId];
92
+ if (!preset) {
93
+ throw new Error(
94
+ `Unknown edgeAI preset "${presetId}". Available presets: ${Object.keys(presetRegistry).join(", ")}`
95
+ );
96
+ }
97
+ const model = mergeModelConfig(preset.model, options.model);
43
98
  const runtimeConfig = nuxt.options.runtimeConfig;
44
- runtimeConfig.edgeAI = {
99
+ const presets = Object.entries(presetRegistry).map(([id, entry]) => toPresetSummary(id, entry));
100
+ const serverModel = {
101
+ ...model,
102
+ localPath: resolveMaybeAbsolute(nuxt.options.rootDir, model.localPath)
103
+ };
104
+ const remote = resolveRemoteConfig(options.remote);
105
+ const serverRuntimeConfig = {
45
106
  routeBase,
46
- runtime: options.runtime,
107
+ provider,
108
+ runtime,
47
109
  cacheDir,
48
- warmup: options.warmup,
49
- model: {
50
- id: options.model.id,
51
- task: options.model.task,
52
- localPath: modelLocalPath,
53
- allowRemote: options.model.allowRemote,
54
- dtype: options.model.dtype,
55
- generation: {
56
- maxNewTokens: options.model.generation.maxNewTokens,
57
- temperature: options.model.generation.temperature,
58
- topP: options.model.generation.topP,
59
- doSample: options.model.generation.doSample,
60
- repetitionPenalty: options.model.generation.repetitionPenalty
61
- }
62
- }
110
+ warmup: Boolean(options.warmup),
111
+ preset: provider === "local" ? presetId : void 0,
112
+ model: serverModel,
113
+ remote,
114
+ presets
63
115
  };
116
+ runtimeConfig.edgeAI = serverRuntimeConfig;
64
117
  runtimeConfig.public.edgeAI = {
65
118
  routeBase,
66
- runtime: options.runtime,
67
- defaultModel: options.model.id
119
+ provider,
120
+ runtime,
121
+ defaultModel: provider === "remote" ? remote.model : serverModel.id,
122
+ remoteModel: remote.model,
123
+ preset: provider === "local" ? presetId : void 0,
124
+ presets,
125
+ remoteFallback: remote.fallback
68
126
  };
69
127
  addImportsDir(resolver.resolve("./runtime/composables"));
70
128
  addPlugin(resolver.resolve("./runtime/plugin"));
@@ -83,24 +141,39 @@ const module$1 = defineNuxtModule({
83
141
  method: "post",
84
142
  handler: resolver.resolve("./runtime/server/api/generate.post")
85
143
  });
144
+ addServerHandler({
145
+ route: `${routeBase}/chat/completions`,
146
+ method: "post",
147
+ handler: resolver.resolve("./runtime/server/api/chat-completions.post")
148
+ });
86
149
  addTypeTemplate({
87
150
  filename: "types/nuxt-edge-ai.d.ts",
88
151
  getContents: () => `import type { NuxtApp } from '#app'
89
152
  import type {
90
- EdgeAIPullResponse,
153
+ EdgeAIChatCompletionRequest,
154
+ EdgeAIChatCompletionResponse,
155
+ EdgeAIClientOptions,
91
156
  EdgeAIGenerateRequest,
92
157
  EdgeAIGenerateResponse,
93
- EdgeAIHealthResponse
158
+ EdgeAIHealthResponse,
159
+ EdgeAIPullResponse
94
160
  } from 'nuxt-edge-ai'
161
+ import type { EdgeAI } from 'nuxt-edge-ai'
95
162
 
96
163
  declare module '#app' {
97
164
  interface NuxtApp {
98
165
  $edgeAI: {
99
166
  routeBase: string
100
- runtime: 'transformers-wasm' | 'mock'
167
+ provider: 'local' | 'remote' | 'mock'
168
+ runtime: 'transformers-wasm' | 'remote' | 'mock'
101
169
  defaultModel: string
170
+ remoteModel: string
171
+ preset?: string
172
+ remoteFallback: boolean
173
+ client: EdgeAI
102
174
  pull: () => Promise<EdgeAIPullResponse>
103
175
  generate: (payload: EdgeAIGenerateRequest) => Promise<EdgeAIGenerateResponse>
176
+ chatCompletions: (payload: EdgeAIChatCompletionRequest) => Promise<EdgeAIChatCompletionResponse>
104
177
  health: () => Promise<EdgeAIHealthResponse>
105
178
  }
106
179
  }
@@ -0,0 +1,26 @@
1
+ import type { EdgeAIChatCompletionRequest, EdgeAIChatCompletionResponse, EdgeAIGenerateRequest, EdgeAIGenerateResponse, EdgeAIHealthResponse, EdgeAIPullResponse } from './types.js';
2
+ export interface EdgeAIClientOptions {
3
+ baseURL: string;
4
+ apiKey?: string;
5
+ headers?: Record<string, string>;
6
+ fetch?: typeof globalThis.fetch;
7
+ }
8
+ export declare class EdgeAI {
9
+ private readonly baseURL;
10
+ private readonly apiKey?;
11
+ private readonly headers?;
12
+ private readonly fetchImpl;
13
+ readonly chat: {
14
+ completions: {
15
+ create: (payload: EdgeAIChatCompletionRequest) => Promise<EdgeAIChatCompletionResponse>;
16
+ };
17
+ };
18
+ constructor(options: EdgeAIClientOptions);
19
+ responses: {
20
+ create: (payload: EdgeAIChatCompletionRequest) => Promise<EdgeAIChatCompletionResponse>;
21
+ };
22
+ health(): Promise<EdgeAIHealthResponse>;
23
+ pull(): Promise<EdgeAIPullResponse>;
24
+ generate(payload: EdgeAIGenerateRequest): Promise<EdgeAIGenerateResponse>;
25
+ private request;
26
+ }
@@ -0,0 +1,62 @@
1
+ function joinUrl(baseURL, path) {
2
+ const normalizedBase = baseURL.replace(/\/+$/, "");
3
+ const normalizedPath = path.startsWith("/") ? path : `/${path}`;
4
+ return `${normalizedBase}${normalizedPath}`;
5
+ }
6
+ export class EdgeAI {
7
+ baseURL;
8
+ apiKey;
9
+ headers;
10
+ fetchImpl;
11
+ chat = {
12
+ completions: {
13
+ create: (payload) => this.request("/chat/completions", {
14
+ method: "POST",
15
+ body: payload
16
+ })
17
+ }
18
+ };
19
+ constructor(options) {
20
+ this.baseURL = options.baseURL;
21
+ this.apiKey = options.apiKey;
22
+ this.headers = options.headers;
23
+ const resolvedFetch = options.fetch || globalThis.fetch;
24
+ this.fetchImpl = resolvedFetch.bind(globalThis);
25
+ if (!this.fetchImpl) {
26
+ throw new Error("Fetch is not available. Provide EdgeAI({ fetch }) in this runtime.");
27
+ }
28
+ }
29
+ responses = {
30
+ create: async (payload) => this.chat.completions.create(payload)
31
+ };
32
+ health() {
33
+ return this.request("/health");
34
+ }
35
+ pull() {
36
+ return this.request("/pull", {
37
+ method: "POST"
38
+ });
39
+ }
40
+ generate(payload) {
41
+ return this.request("/generate", {
42
+ method: "POST",
43
+ body: payload
44
+ });
45
+ }
46
+ async request(path, options = {}) {
47
+ const response = await this.fetchImpl(joinUrl(this.baseURL, path), {
48
+ method: options.method || "GET",
49
+ headers: {
50
+ "content-type": "application/json",
51
+ ...this.apiKey ? { authorization: `Bearer ${this.apiKey}` } : {},
52
+ ...this.headers
53
+ },
54
+ body: options.body === void 0 ? void 0 : JSON.stringify(options.body)
55
+ });
56
+ if (!response.ok) {
57
+ const errorText = await response.text();
58
+ throw new Error(`EdgeAI request failed with ${response.status}: ${errorText}`);
59
+ }
60
+ return await response.json();
61
+ }
62
+ }