@serii84/vertex-partner-provider 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +104 -0
- package/index.js +94 -0
- package/package.json +19 -0
- package/test.js +78 -0
package/README.md
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
# Vertex Partner Provider
|
|
2
|
+
|
|
3
|
+
Vercel AI SDK provider for **Vertex AI partner models** (GLM, Kimi, DeepSeek, MiniMax, Qwen) using the OpenAI-compatible format.
|
|
4
|
+
|
|
5
|
+
## The Problem
|
|
6
|
+
|
|
7
|
+
Vertex AI partner models use OpenAI-compatible format, but `@ai-sdk/google-vertex` only supports Gemini format. This provider bridges that gap using the same pattern as `@ai-sdk/google-vertex/anthropic`.
|
|
8
|
+
|
|
9
|
+
## How It Works
|
|
10
|
+
|
|
11
|
+
```
|
|
12
|
+
┌─────────────────────────────────────────────────────────────────┐
|
|
13
|
+
│ @ai-sdk/google-vertex │
|
|
14
|
+
├─────────────────────┬───────────────────────────────────────────┤
|
|
15
|
+
│ Main (Gemini) │ /anthropic submodule │
|
|
16
|
+
│ │ │
|
|
17
|
+
│ Uses: │ Uses: │
|
|
18
|
+
│ GoogleGenerative │ AnthropicMessagesLanguageModel │
|
|
19
|
+
│ AILanguageModel │ (from @ai-sdk/anthropic/internal) │
|
|
20
|
+
│ │ │
|
|
21
|
+
│ Format: Gemini │ Format: Anthropic (native) │
|
|
22
|
+
└─────────────────────┴───────────────────────────────────────────┘
|
|
23
|
+
|
|
24
|
+
┌─────────────────────────────────────────────────────────────────┐
|
|
25
|
+
│ vertex-partner-provider (this package) │
|
|
26
|
+
├─────────────────────────────────────────────────────────────────┤
|
|
27
|
+
│ Uses: │
|
|
28
|
+
│ OpenAICompatibleChatLanguageModel │
|
|
29
|
+
│ (from @ai-sdk/openai-compatible) │
|
|
30
|
+
│ │
|
|
31
|
+
│ Format: OpenAI (native) + Google Cloud Auth │
|
|
32
|
+
│ │
|
|
33
|
+
│ Endpoint: /endpoints/openapi/chat/completions │
|
|
34
|
+
└─────────────────────────────────────────────────────────────────┘
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Installation
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
npm install @ai-sdk/openai-compatible google-auth-library ai
|
|
41
|
+
# Then copy index.js to your project
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Usage
|
|
45
|
+
|
|
46
|
+
```javascript
|
|
47
|
+
const { createGLM, createDeepSeek, createKimi } = require('./vertex-partner-provider');
|
|
48
|
+
const { generateText } = require('ai');
|
|
49
|
+
|
|
50
|
+
// Create provider for GLM
|
|
51
|
+
const glm = createGLM({
|
|
52
|
+
project: 'your-gcp-project',
|
|
53
|
+
location: 'global', // or specific region
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
// Use it
|
|
57
|
+
const result = await generateText({
|
|
58
|
+
model: glm('glm-4.7-maas'),
|
|
59
|
+
prompt: 'Hello!',
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
console.log(result.text);
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
## Available Factory Functions
|
|
66
|
+
|
|
67
|
+
| Function | Publisher | Models |
|
|
68
|
+
|----------|-----------|--------|
|
|
69
|
+
| `createGLM()` | zai-org | glm-4.7-maas |
|
|
70
|
+
| `createKimi()` | moonshotai | kimi-k2-thinking-maas |
|
|
71
|
+
| `createDeepSeek()` | deepseek-ai | deepseek-v3.2-maas |
|
|
72
|
+
| `createMiniMax()` | minimaxai | minimax-m2-maas |
|
|
73
|
+
| `createQwen()` | qwen | qwen3-coder-480b-a35b-instruct-maas |
|
|
74
|
+
|
|
75
|
+
## Generic Usage
|
|
76
|
+
|
|
77
|
+
```javascript
|
|
78
|
+
const { createVertexPartner } = require('./vertex-partner-provider');
|
|
79
|
+
|
|
80
|
+
const provider = createVertexPartner({
|
|
81
|
+
project: 'your-project',
|
|
82
|
+
location: 'global',
|
|
83
|
+
publisher: 'any-publisher',
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
const model = provider('model-id');
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
## Authentication
|
|
90
|
+
|
|
91
|
+
Uses Google Cloud Application Default Credentials:
|
|
92
|
+
|
|
93
|
+
```bash
|
|
94
|
+
gcloud auth application-default login
|
|
95
|
+
gcloud auth application-default set-quota-project YOUR_PROJECT
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## Tool Calling Support
|
|
99
|
+
|
|
100
|
+
⚠️ **Note**: Tool calling should work with this provider since it uses the native OpenAI-compatible format. The previous issues were caused by using the Gemini format class with OpenAI-format responses.
|
|
101
|
+
|
|
102
|
+
## License
|
|
103
|
+
|
|
104
|
+
MIT
|
package/index.js
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vertex Partner Provider for OpenCode
|
|
3
|
+
*
|
|
4
|
+
* Uses custom fetch to inject Google Cloud auth since
|
|
5
|
+
* @ai-sdk/openai-compatible doesn't support async headers.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
const { createOpenAICompatible } = require('@ai-sdk/openai-compatible');
|
|
9
|
+
const { GoogleAuth } = require('google-auth-library');
|
|
10
|
+
|
|
11
|
+
let authClient = null;
|
|
12
|
+
|
|
13
|
+
async function getAuthToken(googleAuthOptions) {
|
|
14
|
+
if (!authClient) {
|
|
15
|
+
authClient = new GoogleAuth({
|
|
16
|
+
scopes: ['https://www.googleapis.com/auth/cloud-platform'],
|
|
17
|
+
...googleAuthOptions,
|
|
18
|
+
});
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
const client = await authClient.getClient();
|
|
22
|
+
const token = await client.getAccessToken();
|
|
23
|
+
return token.token;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
function createProvider(options = {}) {
|
|
27
|
+
const {
|
|
28
|
+
project = process.env.GOOGLE_VERTEX_PROJECT,
|
|
29
|
+
location = process.env.GOOGLE_VERTEX_LOCATION || 'global',
|
|
30
|
+
publisher,
|
|
31
|
+
googleAuthOptions,
|
|
32
|
+
...rest
|
|
33
|
+
} = options;
|
|
34
|
+
|
|
35
|
+
if (!project) throw new Error('project is required');
|
|
36
|
+
if (!publisher) throw new Error('publisher is required');
|
|
37
|
+
|
|
38
|
+
const baseHost = location === 'global'
|
|
39
|
+
? 'aiplatform.googleapis.com'
|
|
40
|
+
: `${location}-aiplatform.googleapis.com`;
|
|
41
|
+
|
|
42
|
+
const baseURL = `https://${baseHost}/v1/projects/${project}/locations/${location}/endpoints/openapi`;
|
|
43
|
+
|
|
44
|
+
console.error(`[vertex-partner] baseURL: ${baseURL}`);
|
|
45
|
+
|
|
46
|
+
// Custom fetch that injects Google auth
|
|
47
|
+
const authFetch = async (url, init) => {
|
|
48
|
+
console.error(`[vertex-partner] Fetching: ${url}`);
|
|
49
|
+
|
|
50
|
+
const token = await getAuthToken(googleAuthOptions);
|
|
51
|
+
console.error(`[vertex-partner] Got token: ${token.substring(0, 20)}...`);
|
|
52
|
+
|
|
53
|
+
const headers = new Headers(init?.headers);
|
|
54
|
+
headers.set('Authorization', `Bearer ${token}`);
|
|
55
|
+
|
|
56
|
+
return fetch(url, {
|
|
57
|
+
...init,
|
|
58
|
+
headers,
|
|
59
|
+
});
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
const provider = createOpenAICompatible({
|
|
63
|
+
name: `vertex-${publisher}`,
|
|
64
|
+
baseURL,
|
|
65
|
+
fetch: authFetch, // Use custom fetch for auth
|
|
66
|
+
...rest,
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
// Wrap to auto-prefix model IDs
|
|
70
|
+
const wrappedProvider = (modelId) => {
|
|
71
|
+
const fullModelId = modelId.includes('/') ? modelId : `${publisher}/${modelId}`;
|
|
72
|
+
console.error(`[vertex-partner] Using model: ${fullModelId}`);
|
|
73
|
+
return provider(fullModelId);
|
|
74
|
+
};
|
|
75
|
+
|
|
76
|
+
wrappedProvider.specificationVersion = provider.specificationVersion;
|
|
77
|
+
wrappedProvider.languageModel = (id) => wrappedProvider(id);
|
|
78
|
+
wrappedProvider.chatModel = (id) => wrappedProvider(id);
|
|
79
|
+
wrappedProvider.completionModel = provider.completionModel;
|
|
80
|
+
wrappedProvider.embeddingModel = provider.embeddingModel;
|
|
81
|
+
wrappedProvider.textEmbeddingModel = provider.textEmbeddingModel;
|
|
82
|
+
wrappedProvider.imageModel = provider.imageModel;
|
|
83
|
+
|
|
84
|
+
return wrappedProvider;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
module.exports = createProvider;
|
|
88
|
+
module.exports.createProvider = createProvider;
|
|
89
|
+
module.exports.createVertexPartner = createProvider;
|
|
90
|
+
module.exports.createGLM = (opts) => createProvider({ ...opts, publisher: 'zai-org' });
|
|
91
|
+
module.exports.createKimi = (opts) => createProvider({ ...opts, publisher: 'moonshotai' });
|
|
92
|
+
module.exports.createDeepSeek = (opts) => createProvider({ ...opts, publisher: 'deepseek-ai' });
|
|
93
|
+
module.exports.createMiniMax = (opts) => createProvider({ ...opts, publisher: 'minimaxai' });
|
|
94
|
+
module.exports.createQwen = (opts) => createProvider({ ...opts, publisher: 'qwen' });
|
package/package.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@serii84/vertex-partner-provider",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "",
|
|
5
|
+
"main": "index.js",
|
|
6
|
+
"scripts": {
|
|
7
|
+
"test": "echo \"Error: no test specified\" && exit 1"
|
|
8
|
+
},
|
|
9
|
+
"keywords": [],
|
|
10
|
+
"author": "",
|
|
11
|
+
"license": "ISC",
|
|
12
|
+
"dependencies": {
|
|
13
|
+
"@ai-sdk/openai-compatible": "^2.0.4",
|
|
14
|
+
"@ai-sdk/provider-utils": "^4.0.4",
|
|
15
|
+
"ai": "^6.0.27",
|
|
16
|
+
"google-auth-library": "^10.5.0",
|
|
17
|
+
"zod": "^4.3.5"
|
|
18
|
+
}
|
|
19
|
+
}
|
package/test.js
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Test script for Vertex Partner Provider
|
|
3
|
+
*
|
|
4
|
+
* Run with: node test.js
|
|
5
|
+
*
|
|
6
|
+
* Requires: gcloud auth application-default login
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
const { createGLM, createDeepSeek, createKimi } = require('./index.js');
|
|
10
|
+
const { generateText, tool } = require('ai');
|
|
11
|
+
const { z } = require('zod');
|
|
12
|
+
|
|
13
|
+
const PROJECT = process.env.GOOGLE_VERTEX_PROJECT || 'noter-1c2a1';
|
|
14
|
+
|
|
15
|
+
async function testBasicChat() {
|
|
16
|
+
console.log('\n📝 Test 1: Basic Chat');
|
|
17
|
+
console.log('-'.repeat(40));
|
|
18
|
+
|
|
19
|
+
const glm = createGLM({ project: PROJECT, location: 'global' });
|
|
20
|
+
const model = glm('glm-4.7-maas');
|
|
21
|
+
|
|
22
|
+
console.log('Model ID:', model.modelId);
|
|
23
|
+
|
|
24
|
+
const result = await generateText({
|
|
25
|
+
model,
|
|
26
|
+
prompt: 'Say "Hello from GLM!" and nothing else.',
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
console.log('✅ Response:', result.text);
|
|
30
|
+
return true;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
async function testToolCalling() {
|
|
34
|
+
console.log('\n🔧 Test 2: Tool Calling');
|
|
35
|
+
console.log('-'.repeat(40));
|
|
36
|
+
|
|
37
|
+
const glm = createGLM({ project: PROJECT, location: 'global' });
|
|
38
|
+
|
|
39
|
+
const result = await generateText({
|
|
40
|
+
model: glm('glm-4.7-maas'),
|
|
41
|
+
prompt: 'What is the weather in San Francisco? Use the weather tool.',
|
|
42
|
+
tools: {
|
|
43
|
+
weather: tool({
|
|
44
|
+
description: 'Get the weather for a location',
|
|
45
|
+
parameters: z.object({
|
|
46
|
+
location: z.string().describe('The city name'),
|
|
47
|
+
}),
|
|
48
|
+
execute: async ({ location }) => {
|
|
49
|
+
return { location, temperature: 72, condition: 'sunny' };
|
|
50
|
+
},
|
|
51
|
+
}),
|
|
52
|
+
},
|
|
53
|
+
maxSteps: 3,
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
console.log('✅ Response:', result.text);
|
|
57
|
+
console.log('Tool calls:', result.toolCalls?.length || 0);
|
|
58
|
+
return true;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
async function main() {
|
|
62
|
+
console.log('='.repeat(50));
|
|
63
|
+
console.log('Vertex Partner Provider Test Suite');
|
|
64
|
+
console.log('='.repeat(50));
|
|
65
|
+
console.log(`Project: ${PROJECT}`);
|
|
66
|
+
|
|
67
|
+
try {
|
|
68
|
+
await testBasicChat();
|
|
69
|
+
await testToolCalling();
|
|
70
|
+
console.log('\n✅ All tests passed!');
|
|
71
|
+
} catch (error) {
|
|
72
|
+
console.error('\n❌ Test failed:', error.message);
|
|
73
|
+
if (error.cause) console.error('Cause:', JSON.stringify(error.cause, null, 2));
|
|
74
|
+
process.exit(1);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
main();
|