@crazy-goat/nexos-provider 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +127 -0
- package/index.mjs +134 -0
- package/package.json +32 -0
package/README.md
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
# nexos-provider
|
|
2
|
+
|
|
3
|
+
Custom [AI SDK](https://sdk.vercel.ai/) provider for using [nexos.ai](https://nexos.ai) Gemini models with [opencode](https://opencode.ai).
|
|
4
|
+
|
|
5
|
+
## Problem
|
|
6
|
+
|
|
7
|
+
When accessing Gemini models through the nexos.ai API proxy, two issues prevent them from working with opencode (and likely other AI SDK-based tools):
|
|
8
|
+
|
|
9
|
+
1. **Missing `data: [DONE]` in SSE streaming** — Gemini responses via nexos don't emit the standard `data: [DONE]` signal at the end of a streaming response. The AI SDK's `EventSourceParserStream` waits indefinitely for more data, causing opencode to hang forever.
|
|
10
|
+
|
|
11
|
+
2. **`$ref` in tool schemas** — opencode sends JSON Schemas with `$ref` / `$defs` for tool parameters. Gemini (Vertex AI) rejects these with: `Schema.ref was set alongside unsupported fields`.
|
|
12
|
+
|
|
13
|
+
## Solution
|
|
14
|
+
|
|
15
|
+
This provider wraps `@ai-sdk/openai-compatible` and intercepts `fetch` to:
|
|
16
|
+
|
|
17
|
+
- **Append `data: [DONE]\n\n`** to the end of streaming responses from Gemini models (via a `TransformStream` flush handler)
|
|
18
|
+
- **Inline `$ref` references** in tool parameter schemas before sending them to the API
|
|
19
|
+
|
|
20
|
+
No proxy, no extra processes — everything runs inline inside opencode.
|
|
21
|
+
|
|
22
|
+
## Setup
|
|
23
|
+
|
|
24
|
+
### 1. Clone this repo
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
git clone <this-repo> ~/nexos-provider
|
|
28
|
+
cd ~/nexos-provider
|
|
29
|
+
npm install
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### 2. Set your API key
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
export NEXOS_API_KEY="your-nexos-api-key"
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### 3. Configure opencode
|
|
39
|
+
|
|
40
|
+
Add the provider to your `~/.config/opencode/opencode.json`:
|
|
41
|
+
|
|
42
|
+
```json
|
|
43
|
+
{
|
|
44
|
+
"$schema": "https://opencode.ai/config.json",
|
|
45
|
+
"provider": {
|
|
46
|
+
"nexos-gemini": {
|
|
47
|
+
"npm": "file:///absolute/path/to/nexos-provider/index.mjs",
|
|
48
|
+
"name": "Nexos Gemini",
|
|
49
|
+
"env": ["NEXOS_API_KEY"],
|
|
50
|
+
"options": {
|
|
51
|
+
"baseURL": "https://api.nexos.ai/v1/",
|
|
52
|
+
"timeout": 300000
|
|
53
|
+
},
|
|
54
|
+
"models": {
|
|
55
|
+
"Gemini 2.5 Pro": {
|
|
56
|
+
"name": "Gemini 2.5 Pro",
|
|
57
|
+
"limit": { "context": 128000, "output": 64000 }
|
|
58
|
+
},
|
|
59
|
+
"Gemini 3 Flash Preview": {
|
|
60
|
+
"name": "Gemini 3 Flash Preview",
|
|
61
|
+
"limit": { "context": 128000, "output": 64000 }
|
|
62
|
+
},
|
|
63
|
+
"Gemini 3 Pro Preview": {
|
|
64
|
+
"name": "Gemini 3 Pro Preview",
|
|
65
|
+
"limit": { "context": 128000, "output": 64000 }
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
> **Note:** The `npm` path must be an absolute `file://` URL pointing to `index.mjs`.
|
|
74
|
+
|
|
75
|
+
### 4. Use it
|
|
76
|
+
|
|
77
|
+
```bash
|
|
78
|
+
opencode run "hello" -m "nexos-gemini/Gemini 2.5 Pro"
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
Or select the model interactively in opencode with `Ctrl+X M`.
|
|
82
|
+
|
|
83
|
+
## GPT and Claude models
|
|
84
|
+
|
|
85
|
+
GPT and Claude models work fine through nexos.ai without this provider — they correctly emit `data: [DONE]` and handle `$ref` schemas. Use the standard `@ai-sdk/openai-compatible` provider for those:
|
|
86
|
+
|
|
87
|
+
```json
|
|
88
|
+
{
|
|
89
|
+
"nexos-ai": {
|
|
90
|
+
"npm": "@ai-sdk/openai-compatible",
|
|
91
|
+
"name": "Nexos AI",
|
|
92
|
+
"env": ["NEXOS_API_KEY"],
|
|
93
|
+
"options": {
|
|
94
|
+
"baseURL": "https://api.nexos.ai/v1/",
|
|
95
|
+
"timeout": 300000
|
|
96
|
+
},
|
|
97
|
+
"models": {
|
|
98
|
+
"Claude Opus 4.6": {
|
|
99
|
+
"name": "Claude Opus 4.6",
|
|
100
|
+
"limit": { "context": 128000, "output": 64000 }
|
|
101
|
+
},
|
|
102
|
+
"GPT 5.2": {
|
|
103
|
+
"name": "GPT 5.2",
|
|
104
|
+
"limit": { "context": 128000, "output": 64000 }
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## How it works
|
|
112
|
+
|
|
113
|
+
The provider exports `createNexosAI` which creates a standard AI SDK provider with a custom `fetch` wrapper:
|
|
114
|
+
|
|
115
|
+
```
|
|
116
|
+
Request flow:
|
|
117
|
+
opencode → createNexosAI → fetch wrapper → nexos.ai API
|
|
118
|
+
│
|
|
119
|
+
├─ Resolves $ref in tool schemas (for Gemini)
|
|
120
|
+
└─ Appends data: [DONE] to SSE stream (for Gemini)
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Only Gemini model requests are modified — all other models pass through unchanged.
|
|
124
|
+
|
|
125
|
+
## License
|
|
126
|
+
|
|
127
|
+
MIT
|
package/index.mjs
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
|
2
|
+
|
|
3
|
+
function resolveRefs(schema, defs) {
|
|
4
|
+
if (!schema || typeof schema !== "object") return schema;
|
|
5
|
+
if (Array.isArray(schema)) return schema.map((s) => resolveRefs(s, defs));
|
|
6
|
+
|
|
7
|
+
if (schema.$ref || schema.ref) {
|
|
8
|
+
const refName = (schema.$ref || schema.ref)
|
|
9
|
+
.replace(/^#\/\$defs\//, "")
|
|
10
|
+
.replace(/^#\/definitions\//, "");
|
|
11
|
+
const resolved = defs?.[refName];
|
|
12
|
+
if (resolved) {
|
|
13
|
+
const merged = { ...resolveRefs(resolved, defs) };
|
|
14
|
+
if (schema.description) merged.description = schema.description;
|
|
15
|
+
if (schema.default !== undefined) merged.default = schema.default;
|
|
16
|
+
return merged;
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const result = {};
|
|
21
|
+
for (const [k, v] of Object.entries(schema)) {
|
|
22
|
+
if (k === "$defs" || k === "definitions" || k === "$ref" || k === "ref")
|
|
23
|
+
continue;
|
|
24
|
+
result[k] = resolveRefs(v, defs);
|
|
25
|
+
}
|
|
26
|
+
return result;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
function fixToolSchemas(body) {
|
|
30
|
+
if (!body.tools?.length) return body;
|
|
31
|
+
return {
|
|
32
|
+
...body,
|
|
33
|
+
tools: body.tools.map((tool) => {
|
|
34
|
+
if (tool.type !== "function" || !tool.function?.parameters) return tool;
|
|
35
|
+
const params = tool.function.parameters;
|
|
36
|
+
const defs = params.$defs || params.definitions || {};
|
|
37
|
+
return {
|
|
38
|
+
...tool,
|
|
39
|
+
function: {
|
|
40
|
+
...tool.function,
|
|
41
|
+
parameters: resolveRefs(params, defs),
|
|
42
|
+
},
|
|
43
|
+
};
|
|
44
|
+
}),
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function fixFinishReason(text) {
|
|
49
|
+
return text.replace(/data: ({.*})\n/g, (match, jsonStr) => {
|
|
50
|
+
try {
|
|
51
|
+
const parsed = JSON.parse(jsonStr);
|
|
52
|
+
let changed = false;
|
|
53
|
+
if (parsed.choices) {
|
|
54
|
+
for (const choice of parsed.choices) {
|
|
55
|
+
if (choice.finish_reason === "stop" && choice.delta?.tool_calls?.length) {
|
|
56
|
+
choice.finish_reason = "tool_calls";
|
|
57
|
+
changed = true;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
if (changed) {
|
|
62
|
+
return "data: " + JSON.stringify(parsed) + "\n";
|
|
63
|
+
}
|
|
64
|
+
} catch {}
|
|
65
|
+
return match;
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
function appendDoneToStream() {
|
|
70
|
+
const encoder = new TextEncoder();
|
|
71
|
+
let sawDone = false;
|
|
72
|
+
|
|
73
|
+
return new TransformStream({
|
|
74
|
+
transform(chunk, controller) {
|
|
75
|
+
let text =
|
|
76
|
+
typeof chunk === "string" ? chunk : new TextDecoder().decode(chunk);
|
|
77
|
+
if (text.includes("[DONE]")) sawDone = true;
|
|
78
|
+
text = fixFinishReason(text);
|
|
79
|
+
controller.enqueue(encoder.encode(text));
|
|
80
|
+
},
|
|
81
|
+
flush(controller) {
|
|
82
|
+
if (!sawDone) {
|
|
83
|
+
controller.enqueue(encoder.encode("data: [DONE]\n\n"));
|
|
84
|
+
}
|
|
85
|
+
},
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
function isGeminiModel(model) {
|
|
90
|
+
return typeof model === "string" && model.toLowerCase().includes("gemini");
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
function createNexosFetch(baseFetch) {
|
|
94
|
+
const realFetch = baseFetch || globalThis.fetch;
|
|
95
|
+
|
|
96
|
+
return async function nexosFetch(url, init) {
|
|
97
|
+
let requestBody;
|
|
98
|
+
try {
|
|
99
|
+
requestBody = init?.body ? JSON.parse(init.body) : {};
|
|
100
|
+
} catch {
|
|
101
|
+
requestBody = {};
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const gemini = isGeminiModel(requestBody.model);
|
|
105
|
+
|
|
106
|
+
if (gemini) {
|
|
107
|
+
if (requestBody.tools) {
|
|
108
|
+
requestBody = fixToolSchemas(requestBody);
|
|
109
|
+
}
|
|
110
|
+
init = { ...init, body: JSON.stringify(requestBody) };
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
const response = await realFetch(url, init);
|
|
114
|
+
|
|
115
|
+
if (gemini && requestBody.stream) {
|
|
116
|
+
const fixedBody = response.body.pipeThrough(appendDoneToStream());
|
|
117
|
+
return new Response(fixedBody, {
|
|
118
|
+
status: response.status,
|
|
119
|
+
statusText: response.statusText,
|
|
120
|
+
headers: response.headers,
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
return response;
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
export function createNexosAI(options = {}) {
|
|
129
|
+
return createOpenAICompatible({
|
|
130
|
+
...options,
|
|
131
|
+
name: options.name || "nexos-ai",
|
|
132
|
+
fetch: createNexosFetch(options.fetch),
|
|
133
|
+
});
|
|
134
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@crazy-goat/nexos-provider",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Custom AI SDK provider for nexos.ai Gemini models in opencode",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "index.mjs",
|
|
7
|
+
"exports": {
|
|
8
|
+
".": "./index.mjs"
|
|
9
|
+
},
|
|
10
|
+
"files": [
|
|
11
|
+
"index.mjs"
|
|
12
|
+
],
|
|
13
|
+
"keywords": [
|
|
14
|
+
"opencode",
|
|
15
|
+
"ai-sdk",
|
|
16
|
+
"nexos",
|
|
17
|
+
"gemini",
|
|
18
|
+
"provider"
|
|
19
|
+
],
|
|
20
|
+
"license": "MIT",
|
|
21
|
+
"repository": {
|
|
22
|
+
"type": "git",
|
|
23
|
+
"url": "https://github.com/crazy-goat/nexos-provider.git"
|
|
24
|
+
},
|
|
25
|
+
"homepage": "https://github.com/crazy-goat/nexos-provider",
|
|
26
|
+
"bugs": {
|
|
27
|
+
"url": "https://github.com/crazy-goat/nexos-provider/issues"
|
|
28
|
+
},
|
|
29
|
+
"dependencies": {
|
|
30
|
+
"@ai-sdk/openai-compatible": "1.0.32"
|
|
31
|
+
}
|
|
32
|
+
}
|