@adminforth/completion-adapter-open-ai-chat-gpt 2.0.11 → 2.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.js +173 -70
  2. package/index.ts +208 -91
  3. package/package.json +2 -1
package/dist/index.js CHANGED
@@ -8,48 +8,111 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
8
8
  });
9
9
  };
10
10
  import { encoding_for_model } from "tiktoken";
11
+ function extractOutputText(data) {
12
+ var _a;
13
+ let text = "";
14
+ for (const item of (_a = data.output) !== null && _a !== void 0 ? _a : []) {
15
+ if (item.type !== "message" || !Array.isArray(item.content))
16
+ continue;
17
+ for (const part of item.content) {
18
+ if (part.type === "output_text" && typeof part.text === "string") {
19
+ text += part.text;
20
+ }
21
+ }
22
+ }
23
+ return text;
24
+ }
25
+ function extractReasoning(data) {
26
+ var _a, _b, _c;
27
+ let reasoning = "";
28
+ for (const item of (_a = data.output) !== null && _a !== void 0 ? _a : []) {
29
+ if (item.type !== "reasoning")
30
+ continue;
31
+ for (const part of (_b = item.summary) !== null && _b !== void 0 ? _b : []) {
32
+ if ((part === null || part === void 0 ? void 0 : part.type) === "summary_text" && typeof part.text === "string") {
33
+ reasoning += part.text;
34
+ }
35
+ }
36
+ if (!reasoning) {
37
+ for (const part of (_c = item.content) !== null && _c !== void 0 ? _c : []) {
38
+ if ((part === null || part === void 0 ? void 0 : part.type) === "reasoning_text" && typeof part.text === "string") {
39
+ reasoning += part.text;
40
+ }
41
+ }
42
+ }
43
+ }
44
+ return reasoning || undefined;
45
+ }
46
+ function parseSseBlock(block) {
47
+ let event;
48
+ let data = "";
49
+ for (const rawLine of block.split("\n")) {
50
+ const line = rawLine.trimEnd();
51
+ if (!line)
52
+ continue;
53
+ if (line.startsWith("event:"))
54
+ event = line.slice(6).trim();
55
+ if (line.startsWith("data:"))
56
+ data += line.slice(5).trim();
57
+ }
58
+ return data ? { event, data } : null;
59
+ }
11
60
  export default class CompletionAdapterOpenAIChatGPT {
12
61
  constructor(options) {
13
62
  //@ts-ignore
14
- this.complete = (content_1, ...args_1) => __awaiter(this, [content_1, ...args_1], void 0, function* (content, maxTokens = 50, outputSchema, onChunk) {
15
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o;
16
- // stop parameter is alredy not supported
17
- // adapter users should explicitely ask model to stop at dot if needed (or "Complete only up to the end of sentence")
63
+ this.complete = (content_1, ...args_1) => __awaiter(this, [content_1, ...args_1], void 0, function* (content, maxTokens = 50, outputSchema, reasoningEffort = "low", onChunk) {
64
+ var _a, _b, _c;
18
65
  const model = this.options.model || "gpt-5-nano";
19
66
  const isStreaming = typeof onChunk === "function";
20
- const resp = yield fetch("https://api.openai.com/v1/chat/completions", {
67
+ const body = {
68
+ model,
69
+ input: content,
70
+ max_output_tokens: maxTokens,
71
+ stream: isStreaming,
72
+ text: outputSchema
73
+ ? {
74
+ format: Object.assign({ type: "json_schema" }, outputSchema),
75
+ }
76
+ : {
77
+ format: {
78
+ type: "text",
79
+ },
80
+ },
81
+ reasoning: {
82
+ effort: reasoningEffort,
83
+ }
84
+ };
85
+ const resp = yield fetch("https://api.openai.com/v1/responses", {
21
86
  method: "POST",
22
87
  headers: {
23
88
  "Content-Type": "application/json",
24
89
  Authorization: `Bearer ${this.options.openAiApiKey}`,
25
90
  },
26
- body: JSON.stringify(Object.assign({ model, messages: [
27
- {
28
- role: "user",
29
- content,
30
- },
31
- ], max_completion_tokens: maxTokens, response_format: outputSchema
32
- ? Object.assign({ type: "json_schema" }, outputSchema) : undefined, stream: isStreaming }, this.options.extraRequestBodyParameters)),
91
+ body: JSON.stringify(body),
33
92
  });
34
93
  if (!resp.ok) {
35
94
  let errorMessage = `OpenAI request failed with status ${resp.status}`;
36
95
  try {
37
- const errorData = yield resp.json();
38
- if ((_a = errorData === null || errorData === void 0 ? void 0 : errorData.error) === null || _a === void 0 ? void 0 : _a.message) {
96
+ const errorData = (yield resp.json());
97
+ if ((_a = errorData.error) === null || _a === void 0 ? void 0 : _a.message)
39
98
  errorMessage = errorData.error.message;
40
- }
41
99
  }
42
- catch (_p) { }
100
+ catch (_d) { }
43
101
  return { error: errorMessage };
44
102
  }
45
103
  if (!isStreaming) {
46
- const data = yield resp.json();
104
+ const json = yield resp.json();
105
+ const data = json;
47
106
  if (data.error) {
48
107
  return { error: data.error.message };
49
108
  }
109
+ const parsedContent = extractOutputText(data);
110
+ const reasoning = extractReasoning(data);
50
111
  return {
51
- content: (_d = (_c = (_b = data.choices) === null || _b === void 0 ? void 0 : _b[0]) === null || _c === void 0 ? void 0 : _c.message) === null || _d === void 0 ? void 0 : _d.content,
52
- finishReason: (_f = (_e = data.choices) === null || _e === void 0 ? void 0 : _e[0]) === null || _f === void 0 ? void 0 : _f.finish_reason,
112
+ content: parsedContent,
113
+ finishReason: ((_b = data.incomplete_details) === null || _b === void 0 ? void 0 : _b.reason)
114
+ ? data.incomplete_details.reason
115
+ : undefined,
53
116
  };
54
117
  }
55
118
  if (!resp.body) {
@@ -59,79 +122,121 @@ export default class CompletionAdapterOpenAIChatGPT {
59
122
  const decoder = new TextDecoder("utf-8");
60
123
  let buffer = "";
61
124
  let fullContent = "";
125
+ let fullReasoning = "";
62
126
  let finishReason;
127
+ const handleEvent = (event, eventType) => __awaiter(this, void 0, void 0, function* () {
128
+ var _a, _b, _c, _d;
129
+ const type = (event === null || event === void 0 ? void 0 : event.type) || eventType;
130
+ if (type === "response.output_text.delta") {
131
+ const delta = (event === null || event === void 0 ? void 0 : event.delta) || "";
132
+ if (!delta)
133
+ return;
134
+ fullContent += delta;
135
+ yield (onChunk === null || onChunk === void 0 ? void 0 : onChunk(delta, { type: "output", delta, text: fullContent }));
136
+ return;
137
+ }
138
+ if (type === "response.reasoning_summary_text.delta" ||
139
+ type === "response.reasoning_text.delta") {
140
+ const delta = (event === null || event === void 0 ? void 0 : event.delta) || "";
141
+ if (!delta)
142
+ return;
143
+ fullReasoning += delta;
144
+ yield (onChunk === null || onChunk === void 0 ? void 0 : onChunk(delta, {
145
+ type: "reasoning",
146
+ delta,
147
+ text: fullReasoning,
148
+ }));
149
+ return;
150
+ }
151
+ if (type === "response.completed" ||
152
+ type === "response.incomplete") {
153
+ const response = event === null || event === void 0 ? void 0 : event.response;
154
+ if (!response)
155
+ return;
156
+ const finalContent = extractOutputText(response);
157
+ if (finalContent.startsWith(fullContent)) {
158
+ const delta = finalContent.slice(fullContent.length);
159
+ if (delta) {
160
+ fullContent = finalContent;
161
+ yield (onChunk === null || onChunk === void 0 ? void 0 : onChunk(delta, {
162
+ type: "output",
163
+ delta,
164
+ text: fullContent,
165
+ }));
166
+ }
167
+ }
168
+ const finalReasoning = extractReasoning(response) || "";
169
+ if (finalReasoning.startsWith(fullReasoning)) {
170
+ const delta = finalReasoning.slice(fullReasoning.length);
171
+ if (delta) {
172
+ fullReasoning = finalReasoning;
173
+ yield (onChunk === null || onChunk === void 0 ? void 0 : onChunk(delta, {
174
+ type: "reasoning",
175
+ delta,
176
+ text: fullReasoning,
177
+ }));
178
+ }
179
+ }
180
+ finishReason =
181
+ ((_a = response.incomplete_details) === null || _a === void 0 ? void 0 : _a.reason) || response.status || finishReason;
182
+ return;
183
+ }
184
+ if (type === "response.failed") {
185
+ throw new Error(((_c = (_b = event === null || event === void 0 ? void 0 : event.response) === null || _b === void 0 ? void 0 : _b.error) === null || _c === void 0 ? void 0 : _c.message) ||
186
+ ((_d = event === null || event === void 0 ? void 0 : event.error) === null || _d === void 0 ? void 0 : _d.message) ||
187
+ "Response failed");
188
+ }
189
+ });
63
190
  try {
64
191
  while (true) {
65
192
  const { value, done } = yield reader.read();
66
- if (done) {
193
+ if (done)
67
194
  break;
68
- }
69
195
  buffer += decoder.decode(value, { stream: true });
70
- const lines = buffer.split("\n");
71
- buffer = lines.pop() || "";
72
- for (const rawLine of lines) {
73
- const line = rawLine.trim();
74
- if (!line || !line.startsWith("data:")) {
196
+ const blocks = buffer.split("\n\n");
197
+ buffer = blocks.pop() || "";
198
+ for (const block of blocks) {
199
+ const parsedBlock = parseSseBlock(block);
200
+ if (!(parsedBlock === null || parsedBlock === void 0 ? void 0 : parsedBlock.data) || parsedBlock.data === "[DONE]")
75
201
  continue;
76
- }
77
- const dataStr = line.slice(5).trim();
78
- if (dataStr === "[DONE]") {
79
- return {
80
- content: fullContent,
81
- finishReason,
82
- };
83
- }
84
- let parsed;
202
+ let event;
85
203
  try {
86
- parsed = JSON.parse(dataStr);
204
+ event = JSON.parse(parsedBlock.data);
87
205
  }
88
- catch (_q) {
206
+ catch (_e) {
89
207
  continue;
90
208
  }
91
- if ((_g = parsed.error) === null || _g === void 0 ? void 0 : _g.message) {
92
- return { error: parsed.error.message };
209
+ if ((_c = event === null || event === void 0 ? void 0 : event.error) === null || _c === void 0 ? void 0 : _c.message) {
210
+ return { error: event.error.message };
93
211
  }
94
- const choice = (_h = parsed.choices) === null || _h === void 0 ? void 0 : _h[0];
95
- if (!choice) {
96
- continue;
97
- }
98
- if (choice.finish_reason) {
99
- finishReason = choice.finish_reason;
100
- }
101
- const chunk = (_k = (_j = choice.delta) === null || _j === void 0 ? void 0 : _j.content) !== null && _k !== void 0 ? _k : "";
102
- if (!chunk) {
103
- continue;
104
- }
105
- fullContent += chunk;
106
- yield onChunk(chunk);
212
+ yield handleEvent(event, parsedBlock.event);
107
213
  }
108
214
  }
109
- if (buffer.trim().startsWith("data:")) {
110
- const dataStr = buffer.trim().slice(5).trim();
111
- if (dataStr && dataStr !== "[DONE]") {
215
+ if (buffer.trim()) {
216
+ const parsedBlock = parseSseBlock(buffer.trim());
217
+ if ((parsedBlock === null || parsedBlock === void 0 ? void 0 : parsedBlock.data) && parsedBlock.data !== "[DONE]") {
112
218
  try {
113
- const parsed = JSON.parse(dataStr);
114
- const choice = (_l = parsed.choices) === null || _l === void 0 ? void 0 : _l[0];
115
- const chunk = (_o = (_m = choice === null || choice === void 0 ? void 0 : choice.delta) === null || _m === void 0 ? void 0 : _m.content) !== null && _o !== void 0 ? _o : "";
116
- if (chunk) {
117
- fullContent += chunk;
118
- yield onChunk(chunk);
119
- }
120
- if (choice === null || choice === void 0 ? void 0 : choice.finish_reason) {
121
- finishReason = choice.finish_reason;
122
- }
219
+ yield handleEvent(JSON.parse(parsedBlock.data), parsedBlock.event);
220
+ }
221
+ catch (error) {
222
+ return {
223
+ error: (error === null || error === void 0 ? void 0 : error.message) || "Streaming failed",
224
+ content: fullContent || undefined,
225
+ finishReason,
226
+ };
123
227
  }
124
- catch (_r) { }
125
228
  }
126
229
  }
127
230
  return {
128
- content: fullContent,
231
+ content: fullContent || undefined,
129
232
  finishReason,
130
233
  };
131
234
  }
132
235
  catch (error) {
133
236
  return {
134
237
  error: (error === null || error === void 0 ? void 0 : error.message) || "Streaming failed",
238
+ content: fullContent || undefined,
239
+ finishReason,
135
240
  };
136
241
  }
137
242
  finally {
@@ -147,8 +252,6 @@ export default class CompletionAdapterOpenAIChatGPT {
147
252
  }
148
253
  }
149
254
  measureTokensCount(content) {
150
- //TODO: Implement token counting logic
151
- const tokens = this.encoding.encode(content);
152
- return tokens.length;
255
+ return this.encoding.encode(content).length;
153
256
  }
154
257
  }
package/index.ts CHANGED
@@ -1,8 +1,76 @@
1
1
  import type { AdapterOptions } from "./types.js";
2
- import type { CompletionAdapter } from "adminforth";
2
+ import type { CompletionAdapter, CompletionStreamEvent } from "adminforth";
3
3
  import { encoding_for_model, type TiktokenModel } from "tiktoken";
4
+ import type OpenAI from "openai";
4
5
 
5
- type StreamChunkCallback = (chunk: string) => void | Promise<void>;
6
+ type StreamChunkCallback = (
7
+ chunk: string,
8
+ event?: CompletionStreamEvent,
9
+ ) => void | Promise<void>;
10
+
11
+ type ResponseCreateBody = OpenAI.Responses.ResponseCreateParams;
12
+ type OpenAIResponsesSuccess = OpenAI.Responses.Response;
13
+ type OpenAIErrorResponse = {
14
+ error?: {
15
+ message?: string;
16
+ type?: string;
17
+ param?: string | null;
18
+ code?: string | null;
19
+ };
20
+ };
21
+
22
+ function extractOutputText(data: OpenAIResponsesSuccess): string {
23
+ let text = "";
24
+
25
+ for (const item of data.output ?? []) {
26
+ if (item.type !== "message" || !Array.isArray(item.content)) continue;
27
+ for (const part of item.content) {
28
+ if (part.type === "output_text" && typeof part.text === "string") {
29
+ text += part.text;
30
+ }
31
+ }
32
+ }
33
+
34
+ return text;
35
+ }
36
+
37
+ function extractReasoning(data: OpenAIResponsesSuccess): string | undefined {
38
+ let reasoning = "";
39
+
40
+ for (const item of data.output ?? []) {
41
+ if (item.type !== "reasoning") continue;
42
+
43
+ for (const part of item.summary ?? []) {
44
+ if (part?.type === "summary_text" && typeof part.text === "string") {
45
+ reasoning += part.text;
46
+ }
47
+ }
48
+
49
+ if (!reasoning) {
50
+ for (const part of item.content ?? []) {
51
+ if (part?.type === "reasoning_text" && typeof part.text === "string") {
52
+ reasoning += part.text;
53
+ }
54
+ }
55
+ }
56
+ }
57
+
58
+ return reasoning || undefined;
59
+ }
60
+
61
+ function parseSseBlock(block: string) {
62
+ let event: string | undefined;
63
+ let data = "";
64
+
65
+ for (const rawLine of block.split("\n")) {
66
+ const line = rawLine.trimEnd();
67
+ if (!line) continue;
68
+ if (line.startsWith("event:")) event = line.slice(6).trim();
69
+ if (line.startsWith("data:")) data += line.slice(5).trim();
70
+ }
71
+
72
+ return data ? { event, data } : null;
73
+ }
6
74
 
7
75
  export default class CompletionAdapterOpenAIChatGPT
8
76
  implements CompletionAdapter
@@ -13,7 +81,7 @@ export default class CompletionAdapterOpenAIChatGPT
13
81
  constructor(options: AdapterOptions) {
14
82
  this.options = options;
15
83
  this.encoding = encoding_for_model(
16
- (this.options.model || "gpt-5-nano") as TiktokenModel
84
+ (this.options.model || "gpt-5-nano") as TiktokenModel,
17
85
  );
18
86
  }
19
87
 
@@ -24,72 +92,77 @@ export default class CompletionAdapterOpenAIChatGPT
24
92
  }
25
93
 
26
94
  measureTokensCount(content: string): number {
27
- //TODO: Implement token counting logic
28
- const tokens = this.encoding.encode(content);
29
- return tokens.length;
95
+ return this.encoding.encode(content).length;
30
96
  }
31
97
  //@ts-ignore
32
98
  complete = async (
33
99
  content: string,
34
- maxTokens: number = 50,
100
+ maxTokens = 50,
35
101
  outputSchema?: any,
102
+ reasoningEffort: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh' = "low",
36
103
  onChunk?: StreamChunkCallback,
37
104
  ): Promise<{
38
105
  content?: string;
39
106
  finishReason?: string;
40
107
  error?: string;
41
108
  }> => {
42
- // stop parameter is alredy not supported
43
- // adapter users should explicitely ask model to stop at dot if needed (or "Complete only up to the end of sentence")
44
109
  const model = this.options.model || "gpt-5-nano";
45
110
  const isStreaming = typeof onChunk === "function";
111
+ const body = {
112
+ model,
113
+ input: content,
114
+ max_output_tokens: maxTokens,
115
+ stream: isStreaming,
116
+ text: outputSchema
117
+ ? {
118
+ format: {
119
+ type: "json_schema",
120
+ ...outputSchema,
121
+ },
122
+ }
123
+ : {
124
+ format: {
125
+ type: "text",
126
+ },
127
+ },
128
+ reasoning: {
129
+ effort: reasoningEffort,
130
+ }
131
+ } as ResponseCreateBody;
46
132
 
47
- const resp = await fetch("https://api.openai.com/v1/chat/completions", {
133
+ const resp = await fetch("https://api.openai.com/v1/responses", {
48
134
  method: "POST",
49
135
  headers: {
50
136
  "Content-Type": "application/json",
51
137
  Authorization: `Bearer ${this.options.openAiApiKey}`,
52
138
  },
53
- body: JSON.stringify({
54
- model,
55
- messages: [
56
- {
57
- role: "user",
58
- content,
59
- },
60
- ],
61
- max_completion_tokens: maxTokens,
62
- response_format: outputSchema
63
- ? {
64
- type: "json_schema",
65
- ...outputSchema,
66
- }
67
- : undefined,
68
- stream: isStreaming,
69
- ...this.options.extraRequestBodyParameters,
70
- }),
139
+ body: JSON.stringify(body),
71
140
  });
72
141
 
73
142
  if (!resp.ok) {
74
143
  let errorMessage = `OpenAI request failed with status ${resp.status}`;
75
144
  try {
76
- const errorData = await resp.json();
77
- if (errorData?.error?.message) {
78
- errorMessage = errorData.error.message;
79
- }
145
+ const errorData = (await resp.json()) as OpenAIErrorResponse;
146
+ if (errorData.error?.message) errorMessage = errorData.error.message;
80
147
  } catch {}
81
148
  return { error: errorMessage };
82
149
  }
83
150
 
84
151
  if (!isStreaming) {
85
- const data = await resp.json();
152
+ const json = await resp.json();
153
+ const data = json as OpenAIResponsesSuccess & OpenAIErrorResponse;
86
154
  if (data.error) {
87
155
  return { error: data.error.message };
88
156
  }
89
157
 
158
+ const parsedContent = extractOutputText(data);
159
+ const reasoning = extractReasoning(data);
160
+
90
161
  return {
91
- content: data.choices?.[0]?.message?.content,
92
- finishReason: data.choices?.[0]?.finish_reason,
162
+ content: parsedContent,
163
+ finishReason: data.incomplete_details?.reason
164
+ ? data.incomplete_details.reason
165
+ : undefined,
93
166
  };
94
167
  }
95
168
 
@@ -102,94 +175,138 @@ export default class CompletionAdapterOpenAIChatGPT
102
175
 
103
176
  let buffer = "";
104
177
  let fullContent = "";
178
+ let fullReasoning = "";
105
179
  let finishReason: string | undefined;
106
180
 
107
- try {
108
- while (true) {
109
- const { value, done } = await reader.read();
110
- if (done) {
111
- break;
112
- }
181
+ const handleEvent = async (event: any, eventType?: string) => {
182
+ const type = event?.type || eventType;
113
183
 
114
- buffer += decoder.decode(value, { stream: true });
184
+ if (type === "response.output_text.delta") {
185
+ const delta = event?.delta || "";
186
+ if (!delta) return;
187
+ fullContent += delta;
188
+ await onChunk?.(delta, { type: "output", delta, text: fullContent });
189
+ return;
190
+ }
115
191
 
116
- const lines = buffer.split("\n");
117
- buffer = lines.pop() || "";
192
+ if (
193
+ type === "response.reasoning_summary_text.delta" ||
194
+ type === "response.reasoning_text.delta"
195
+ ) {
196
+ const delta = event?.delta || "";
197
+ if (!delta) return;
198
+ fullReasoning += delta;
199
+ await onChunk?.(delta, {
200
+ type: "reasoning",
201
+ delta,
202
+ text: fullReasoning,
203
+ });
204
+ return;
205
+ }
118
206
 
119
- for (const rawLine of lines) {
120
- const line = rawLine.trim();
207
+ if (
208
+ type === "response.completed" ||
209
+ type === "response.incomplete"
210
+ ) {
211
+ const response = event?.response as OpenAIResponsesSuccess | undefined;
212
+ if (!response) return;
121
213
 
122
- if (!line || !line.startsWith("data:")) {
123
- continue;
214
+ const finalContent = extractOutputText(response);
215
+ if (finalContent.startsWith(fullContent)) {
216
+ const delta = finalContent.slice(fullContent.length);
217
+ if (delta) {
218
+ fullContent = finalContent;
219
+ await onChunk?.(delta, {
220
+ type: "output",
221
+ delta,
222
+ text: fullContent,
223
+ });
124
224
  }
225
+ }
125
226
 
126
- const dataStr = line.slice(5).trim();
127
-
128
- if (dataStr === "[DONE]") {
129
- return {
130
- content: fullContent,
131
- finishReason,
132
- };
227
+ const finalReasoning = extractReasoning(response) || "";
228
+ if (finalReasoning.startsWith(fullReasoning)) {
229
+ const delta = finalReasoning.slice(fullReasoning.length);
230
+ if (delta) {
231
+ fullReasoning = finalReasoning;
232
+ await onChunk?.(delta, {
233
+ type: "reasoning",
234
+ delta,
235
+ text: fullReasoning,
236
+ });
133
237
  }
238
+ }
134
239
 
135
- let parsed: any;
136
- try {
137
- parsed = JSON.parse(dataStr);
138
- } catch {
139
- continue;
140
- }
240
+ finishReason =
241
+ response.incomplete_details?.reason || response.status || finishReason;
242
+ return;
243
+ }
141
244
 
142
- if (parsed.error?.message) {
143
- return { error: parsed.error.message };
144
- }
245
+ if (type === "response.failed") {
246
+ throw new Error(
247
+ event?.response?.error?.message ||
248
+ event?.error?.message ||
249
+ "Response failed",
250
+ );
251
+ }
252
+ };
145
253
 
146
- const choice = parsed.choices?.[0];
147
- if (!choice) {
148
- continue;
149
- }
254
+ try {
255
+ while (true) {
256
+ const { value, done } = await reader.read();
257
+ if (done) break;
150
258
 
151
- if (choice.finish_reason) {
152
- finishReason = choice.finish_reason;
153
- }
259
+ buffer += decoder.decode(value, { stream: true });
260
+
261
+ const blocks = buffer.split("\n\n");
262
+ buffer = blocks.pop() || "";
263
+
264
+ for (const block of blocks) {
265
+ const parsedBlock = parseSseBlock(block);
266
+ if (!parsedBlock?.data || parsedBlock.data === "[DONE]") continue;
154
267
 
155
- const chunk = choice.delta?.content ?? "";
156
- if (!chunk) {
268
+ let event: any;
269
+ try {
270
+ event = JSON.parse(parsedBlock.data);
271
+ } catch {
157
272
  continue;
158
273
  }
159
274
 
160
- fullContent += chunk;
161
- await onChunk(chunk);
275
+ if (event?.error?.message) {
276
+ return { error: event.error.message };
277
+ }
278
+
279
+ await handleEvent(event, parsedBlock.event);
162
280
  }
163
281
  }
164
282
 
165
- if (buffer.trim().startsWith("data:")) {
166
- const dataStr = buffer.trim().slice(5).trim();
167
- if (dataStr && dataStr !== "[DONE]") {
283
+ if (buffer.trim()) {
284
+ const parsedBlock = parseSseBlock(buffer.trim());
285
+ if (parsedBlock?.data && parsedBlock.data !== "[DONE]") {
168
286
  try {
169
- const parsed = JSON.parse(dataStr);
170
- const choice = parsed.choices?.[0];
171
- const chunk = choice?.delta?.content ?? "";
172
- if (chunk) {
173
- fullContent += chunk;
174
- await onChunk(chunk);
175
- }
176
- if (choice?.finish_reason) {
177
- finishReason = choice.finish_reason;
178
- }
179
- } catch {}
287
+ await handleEvent(JSON.parse(parsedBlock.data), parsedBlock.event);
288
+ } catch (error: any) {
289
+ return {
290
+ error: error?.message || "Streaming failed",
291
+ content: fullContent || undefined,
292
+ finishReason,
293
+ };
294
+ }
180
295
  }
181
296
  }
182
297
 
183
298
  return {
184
- content: fullContent,
299
+ content: fullContent || undefined,
185
300
  finishReason,
186
301
  };
187
302
  } catch (error: any) {
188
303
  return {
189
304
  error: error?.message || "Streaming failed",
305
+ content: fullContent || undefined,
306
+ finishReason,
190
307
  };
191
308
  } finally {
192
309
  reader.releaseLock();
193
310
  }
194
311
  };
195
- }
312
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@adminforth/completion-adapter-open-ai-chat-gpt",
3
- "version": "2.0.11",
3
+ "version": "2.0.13",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "type": "module",
@@ -16,6 +16,7 @@
16
16
  "typescript": "^5.9.3"
17
17
  },
18
18
  "dependencies": {
19
+ "openai": "^6.34.0",
19
20
  "tiktoken": "^1.0.22"
20
21
  },
21
22
  "peerDependencies": {