@serii84/vertex-partner-provider 1.0.17 → 1.0.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.js +69 -10
  2. package/package.json +1 -1
package/index.js CHANGED
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * Vertex Partner Provider for OpenCode
3
- * v1.0.17 - Skip usage-only chunk at end (empty choices)
3
+ * v1.0.20 - Normalize finish_reason to string (some models return object)
4
4
  */
5
5
 
6
6
  const { createOpenAICompatible } = require('@ai-sdk/openai-compatible');
@@ -20,23 +20,38 @@ async function getAuthToken(googleAuthOptions) {
20
20
  return token.token;
21
21
  }
22
22
 
23
- function cleanChunk(parsed) {
23
+ function cleanResponse(parsed) {
24
24
  if (parsed.choices) {
25
25
  for (const choice of parsed.choices) {
26
26
  delete choice.matched_stop;
27
27
  delete choice.logprobs;
28
-
28
+
29
+ // Normalize finish_reason to a string (some models return an object)
30
+ if (choice.finish_reason && typeof choice.finish_reason === 'object') {
31
+ // Extract string value from object, common patterns: {type: "stop"}, {reason: "stop"}
32
+ choice.finish_reason = choice.finish_reason.type
33
+ || choice.finish_reason.reason
34
+ || choice.finish_reason.stop_reason
35
+ || 'stop';
36
+ }
37
+
29
38
  if (choice.delta) {
30
39
  if (!choice.delta.content && choice.delta.reasoning_content) {
31
40
  choice.delta.content = choice.delta.reasoning_content;
32
41
  }
33
42
  delete choice.delta.reasoning_content;
34
43
  }
44
+
45
+ if (choice.message) {
46
+ if (!choice.message.content && choice.message.reasoning_content) {
47
+ choice.message.content = choice.message.reasoning_content;
48
+ }
49
+ delete choice.message.reasoning_content;
50
+ }
35
51
  }
36
52
  }
37
53
 
38
54
  if (parsed.usage) {
39
- // Keep only standard fields
40
55
  const { prompt_tokens, completion_tokens, total_tokens } = parsed.usage;
41
56
  parsed.usage = { prompt_tokens, completion_tokens, total_tokens };
42
57
  }
@@ -83,13 +98,12 @@ function transformStream(response) {
83
98
  try {
84
99
  const parsed = JSON.parse(data);
85
100
 
86
- // Skip the usage-only chunk (empty choices array)
87
- // This chunk causes issues with some parsers
101
+ // Skip empty choices (usage-only chunk)
88
102
  if (parsed.choices && parsed.choices.length === 0) {
89
103
  continue;
90
104
  }
91
105
 
92
- const cleaned = cleanChunk(parsed);
106
+ const cleaned = cleanResponse(parsed);
93
107
  controller.enqueue(encoder.encode('data: ' + JSON.stringify(cleaned) + '\n\n'));
94
108
  } catch (e) {
95
109
  // Skip invalid JSON
@@ -113,6 +127,46 @@ function transformStream(response) {
113
127
  });
114
128
  }
115
129
 
130
+ async function transformNonStreamingResponse(response) {
131
+ const text = await response.text();
132
+
133
+ // Handle empty response
134
+ if (!text || text.trim() === '') {
135
+ return new Response(JSON.stringify({
136
+ id: 'empty',
137
+ object: 'chat.completion',
138
+ choices: [{
139
+ index: 0,
140
+ message: { role: 'assistant', content: '' },
141
+ finish_reason: 'stop'
142
+ }],
143
+ usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }
144
+ }), {
145
+ headers: { 'content-type': 'application/json' },
146
+ status: response.status,
147
+ statusText: response.statusText,
148
+ });
149
+ }
150
+
151
+ try {
152
+ const data = JSON.parse(text);
153
+ const cleaned = cleanResponse(data);
154
+
155
+ return new Response(JSON.stringify(cleaned), {
156
+ headers: { 'content-type': 'application/json' },
157
+ status: response.status,
158
+ statusText: response.statusText,
159
+ });
160
+ } catch (e) {
161
+ // If JSON parse fails, return original text as-is
162
+ return new Response(text, {
163
+ headers: response.headers,
164
+ status: response.status,
165
+ statusText: response.statusText,
166
+ });
167
+ }
168
+ }
169
+
116
170
  function createVertexPartner(options = {}) {
117
171
  const {
118
172
  project = process.env.GOOGLE_VERTEX_PROJECT,
@@ -145,11 +199,16 @@ function createVertexPartner(options = {}) {
145
199
 
146
200
  const response = await fetch(url, { ...init, headers });
147
201
 
148
- if (isStreaming && response.ok) {
149
- return transformStream(response);
202
+ if (!response.ok) {
203
+ // Clone and return error responses as-is
204
+ return response;
150
205
  }
151
206
 
152
- return response;
207
+ if (isStreaming) {
208
+ return transformStream(response);
209
+ } else {
210
+ return transformNonStreamingResponse(response);
211
+ }
153
212
  };
154
213
 
155
214
  const provider = createOpenAICompatible({
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@serii84/vertex-partner-provider",
3
- "version": "1.0.17",
3
+ "version": "1.0.20",
4
4
  "description": "Vertex AI partner models (GLM, Kimi, DeepSeek) for OpenCode",
5
5
  "main": "index.js",
6
6
  "scripts": {