@serii84/vertex-partner-provider 1.0.15 → 1.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.js +159 -3
  2. package/package.json +1 -1
package/index.js CHANGED
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * Vertex Partner Provider for OpenCode
3
- * v1.0.15 - No transform, pass through raw (debug)
3
+ * v1.0.19 - Fix response body consumption issue
4
4
  */
5
5
 
6
6
  const { createOpenAICompatible } = require('@ai-sdk/openai-compatible');
@@ -20,6 +20,144 @@ async function getAuthToken(googleAuthOptions) {
20
20
  return token.token;
21
21
  }
22
22
 
23
+ function cleanResponse(parsed) {
24
+ if (parsed.choices) {
25
+ for (const choice of parsed.choices) {
26
+ delete choice.matched_stop;
27
+ delete choice.logprobs;
28
+
29
+ if (choice.delta) {
30
+ if (!choice.delta.content && choice.delta.reasoning_content) {
31
+ choice.delta.content = choice.delta.reasoning_content;
32
+ }
33
+ delete choice.delta.reasoning_content;
34
+ }
35
+
36
+ if (choice.message) {
37
+ if (!choice.message.content && choice.message.reasoning_content) {
38
+ choice.message.content = choice.message.reasoning_content;
39
+ }
40
+ delete choice.message.reasoning_content;
41
+ }
42
+ }
43
+ }
44
+
45
+ if (parsed.usage) {
46
+ const { prompt_tokens, completion_tokens, total_tokens } = parsed.usage;
47
+ parsed.usage = { prompt_tokens, completion_tokens, total_tokens };
48
+ }
49
+
50
+ delete parsed.metadata;
51
+
52
+ return parsed;
53
+ }
54
+
55
+ function transformStream(response) {
56
+ const reader = response.body.getReader();
57
+ const decoder = new TextDecoder();
58
+ const encoder = new TextEncoder();
59
+
60
+ let buffer = '';
61
+
62
+ const transformedStream = new ReadableStream({
63
+ async pull(controller) {
64
+ try {
65
+ const { done, value } = await reader.read();
66
+
67
+ if (done) {
68
+ controller.close();
69
+ return;
70
+ }
71
+
72
+ buffer += decoder.decode(value, { stream: true });
73
+
74
+ let boundary;
75
+ while ((boundary = buffer.indexOf('\n\n')) !== -1) {
76
+ const message = buffer.slice(0, boundary);
77
+ buffer = buffer.slice(boundary + 2);
78
+
79
+ if (!message.trim()) continue;
80
+
81
+ if (message.startsWith('data: ')) {
82
+ const data = message.slice(6);
83
+
84
+ if (data === '[DONE]') {
85
+ controller.enqueue(encoder.encode('data: [DONE]\n\n'));
86
+ continue;
87
+ }
88
+
89
+ try {
90
+ const parsed = JSON.parse(data);
91
+
92
+ // Skip empty choices (usage-only chunk)
93
+ if (parsed.choices && parsed.choices.length === 0) {
94
+ continue;
95
+ }
96
+
97
+ const cleaned = cleanResponse(parsed);
98
+ controller.enqueue(encoder.encode('data: ' + JSON.stringify(cleaned) + '\n\n'));
99
+ } catch (e) {
100
+ // Skip invalid JSON
101
+ }
102
+ }
103
+ }
104
+ } catch (err) {
105
+ controller.error(err);
106
+ }
107
+ },
108
+
109
+ cancel() {
110
+ reader.cancel();
111
+ }
112
+ });
113
+
114
+ return new Response(transformedStream, {
115
+ headers: response.headers,
116
+ status: response.status,
117
+ statusText: response.statusText,
118
+ });
119
+ }
120
+
121
+ async function transformNonStreamingResponse(response) {
122
+ const text = await response.text();
123
+
124
+ // Handle empty response
125
+ if (!text || text.trim() === '') {
126
+ return new Response(JSON.stringify({
127
+ id: 'empty',
128
+ object: 'chat.completion',
129
+ choices: [{
130
+ index: 0,
131
+ message: { role: 'assistant', content: '' },
132
+ finish_reason: 'stop'
133
+ }],
134
+ usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }
135
+ }), {
136
+ headers: { 'content-type': 'application/json' },
137
+ status: response.status,
138
+ statusText: response.statusText,
139
+ });
140
+ }
141
+
142
+ try {
143
+ const data = JSON.parse(text);
144
+ const cleaned = cleanResponse(data);
145
+
146
+ return new Response(JSON.stringify(cleaned), {
147
+ headers: { 'content-type': 'application/json' },
148
+ status: response.status,
149
+ statusText: response.statusText,
150
+ });
151
+ } catch (e) {
152
+ // If JSON parse fails, return original text as-is
153
+ return new Response(text, {
154
+ headers: response.headers,
155
+ status: response.status,
156
+ statusText: response.statusText,
157
+ });
158
+ }
159
+ }
160
+
23
161
  function createVertexPartner(options = {}) {
24
162
  const {
25
163
  project = process.env.GOOGLE_VERTEX_PROJECT,
@@ -42,8 +180,26 @@ function createVertexPartner(options = {}) {
42
180
  const headers = new Headers(init?.headers);
43
181
  headers.set('Authorization', `Bearer ${token}`);
44
182
 
45
- // Just pass through - no transform
46
- return fetch(url, { ...init, headers });
183
+ let isStreaming = false;
184
+ if (init?.body) {
185
+ try {
186
+ const body = JSON.parse(init.body);
187
+ isStreaming = body.stream === true;
188
+ } catch (e) {}
189
+ }
190
+
191
+ const response = await fetch(url, { ...init, headers });
192
+
193
+ if (!response.ok) {
194
+ // Clone and return error responses as-is
195
+ return response;
196
+ }
197
+
198
+ if (isStreaming) {
199
+ return transformStream(response);
200
+ } else {
201
+ return transformNonStreamingResponse(response);
202
+ }
47
203
  };
48
204
 
49
205
  const provider = createOpenAICompatible({
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@serii84/vertex-partner-provider",
3
- "version": "1.0.15",
3
+ "version": "1.0.19",
4
4
  "description": "Vertex AI partner models (GLM, Kimi, DeepSeek) for OpenCode",
5
5
  "main": "index.js",
6
6
  "scripts": {