@adminforth/completion-adapter-open-ai-chat-gpt 2.0.10 → 2.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.js +113 -11
  2. package/index.ts +143 -16
  3. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -10,10 +10,13 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
10
10
  import { encoding_for_model } from "tiktoken";
11
11
  export default class CompletionAdapterOpenAIChatGPT {
12
12
  constructor(options) {
13
- this.complete = (content_1, ...args_1) => __awaiter(this, [content_1, ...args_1], void 0, function* (content, stop = ["."], maxTokens = 50, outputSchema) {
13
+ //@ts-ignore
14
+ this.complete = (content_1, ...args_1) => __awaiter(this, [content_1, ...args_1], void 0, function* (content, maxTokens = 50, outputSchema, onChunk) {
15
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o;
14
16
  // stop parameter is alredy not supported
15
17
  // adapter users should explicitely ask model to stop at dot if needed (or "Complete only up to the end of sentence")
16
18
  const model = this.options.model || "gpt-5-nano";
19
+ const isStreaming = typeof onChunk === "function";
17
20
  const resp = yield fetch("https://api.openai.com/v1/chat/completions", {
18
21
  method: "POST",
19
22
  headers: {
@@ -23,18 +26,117 @@ export default class CompletionAdapterOpenAIChatGPT {
23
26
  body: JSON.stringify(Object.assign({ model, messages: [
24
27
  {
25
28
  role: "user",
26
- content, //param
29
+ content,
27
30
  },
28
- ], max_completion_tokens: maxTokens, response_format: outputSchema ? Object.assign({ type: "json_schema" }, outputSchema) : undefined }, this.options.extraRequestBodyParameters)),
31
+ ], max_completion_tokens: maxTokens, response_format: outputSchema
32
+ ? Object.assign({ type: "json_schema" }, outputSchema) : undefined, stream: isStreaming }, this.options.extraRequestBodyParameters)),
29
33
  });
30
- const data = yield resp.json();
31
- if (data.error) {
32
- return { error: data.error.message };
34
+ if (!resp.ok) {
35
+ let errorMessage = `OpenAI request failed with status ${resp.status}`;
36
+ try {
37
+ const errorData = yield resp.json();
38
+ if ((_a = errorData === null || errorData === void 0 ? void 0 : errorData.error) === null || _a === void 0 ? void 0 : _a.message) {
39
+ errorMessage = errorData.error.message;
40
+ }
41
+ }
42
+ catch (_p) { }
43
+ return { error: errorMessage };
44
+ }
45
+ if (!isStreaming) {
46
+ const data = yield resp.json();
47
+ if (data.error) {
48
+ return { error: data.error.message };
49
+ }
50
+ return {
51
+ content: (_d = (_c = (_b = data.choices) === null || _b === void 0 ? void 0 : _b[0]) === null || _c === void 0 ? void 0 : _c.message) === null || _d === void 0 ? void 0 : _d.content,
52
+ finishReason: (_f = (_e = data.choices) === null || _e === void 0 ? void 0 : _e[0]) === null || _f === void 0 ? void 0 : _f.finish_reason,
53
+ };
54
+ }
55
+ if (!resp.body) {
56
+ return { error: "Response body is empty" };
57
+ }
58
+ const reader = resp.body.getReader();
59
+ const decoder = new TextDecoder("utf-8");
60
+ let buffer = "";
61
+ let fullContent = "";
62
+ let finishReason;
63
+ try {
64
+ while (true) {
65
+ const { value, done } = yield reader.read();
66
+ if (done) {
67
+ break;
68
+ }
69
+ buffer += decoder.decode(value, { stream: true });
70
+ const lines = buffer.split("\n");
71
+ buffer = lines.pop() || "";
72
+ for (const rawLine of lines) {
73
+ const line = rawLine.trim();
74
+ if (!line || !line.startsWith("data:")) {
75
+ continue;
76
+ }
77
+ const dataStr = line.slice(5).trim();
78
+ if (dataStr === "[DONE]") {
79
+ return {
80
+ content: fullContent,
81
+ finishReason,
82
+ };
83
+ }
84
+ let parsed;
85
+ try {
86
+ parsed = JSON.parse(dataStr);
87
+ }
88
+ catch (_q) {
89
+ continue;
90
+ }
91
+ if ((_g = parsed.error) === null || _g === void 0 ? void 0 : _g.message) {
92
+ return { error: parsed.error.message };
93
+ }
94
+ const choice = (_h = parsed.choices) === null || _h === void 0 ? void 0 : _h[0];
95
+ if (!choice) {
96
+ continue;
97
+ }
98
+ if (choice.finish_reason) {
99
+ finishReason = choice.finish_reason;
100
+ }
101
+ const chunk = (_k = (_j = choice.delta) === null || _j === void 0 ? void 0 : _j.content) !== null && _k !== void 0 ? _k : "";
102
+ if (!chunk) {
103
+ continue;
104
+ }
105
+ fullContent += chunk;
106
+ yield onChunk(chunk);
107
+ }
108
+ }
109
+ if (buffer.trim().startsWith("data:")) {
110
+ const dataStr = buffer.trim().slice(5).trim();
111
+ if (dataStr && dataStr !== "[DONE]") {
112
+ try {
113
+ const parsed = JSON.parse(dataStr);
114
+ const choice = (_l = parsed.choices) === null || _l === void 0 ? void 0 : _l[0];
115
+ const chunk = (_o = (_m = choice === null || choice === void 0 ? void 0 : choice.delta) === null || _m === void 0 ? void 0 : _m.content) !== null && _o !== void 0 ? _o : "";
116
+ if (chunk) {
117
+ fullContent += chunk;
118
+ yield onChunk(chunk);
119
+ }
120
+ if (choice === null || choice === void 0 ? void 0 : choice.finish_reason) {
121
+ finishReason = choice.finish_reason;
122
+ }
123
+ }
124
+ catch (_r) { }
125
+ }
126
+ }
127
+ return {
128
+ content: fullContent,
129
+ finishReason,
130
+ };
131
+ }
132
+ catch (error) {
133
+ return {
134
+ error: (error === null || error === void 0 ? void 0 : error.message) || "Streaming failed",
135
+ };
136
+ }
137
+ finally {
138
+ reader.releaseLock();
33
139
  }
34
- return {
35
- content: data.choices[0].message.content,
36
- finishReason: data.choices[0].finish_reason,
37
- };
38
140
  });
39
141
  this.options = options;
40
142
  this.encoding = encoding_for_model((this.options.model || "gpt-5-nano"));
@@ -45,7 +147,7 @@ export default class CompletionAdapterOpenAIChatGPT {
45
147
  }
46
148
  }
47
149
  measureTokensCount(content) {
48
- // Implement token counting logic here
150
+ //TODO: Implement token counting logic
49
151
  const tokens = this.encoding.encode(content);
50
152
  return tokens.length;
51
153
  }
package/index.ts CHANGED
@@ -1,6 +1,9 @@
1
1
  import type { AdapterOptions } from "./types.js";
2
2
  import type { CompletionAdapter } from "adminforth";
3
3
  import { encoding_for_model, type TiktokenModel } from "tiktoken";
4
+
5
+ type StreamChunkCallback = (chunk: string) => void | Promise<void>;
6
+
4
7
  export default class CompletionAdapterOpenAIChatGPT
5
8
  implements CompletionAdapter
6
9
  {
@@ -21,12 +24,17 @@ export default class CompletionAdapterOpenAIChatGPT
21
24
  }
22
25
 
23
26
  measureTokensCount(content: string): number {
24
- // Implement token counting logic here
27
+ //TODO: Implement token counting logic
25
28
  const tokens = this.encoding.encode(content);
26
29
  return tokens.length;
27
30
  }
28
-
29
- complete = async (content: string, stop = ["."], maxTokens = 50, outputSchema?: any): Promise<{
31
+ //@ts-ignore
32
+ complete = async (
33
+ content: string,
34
+ maxTokens: number = 50,
35
+ outputSchema?: any,
36
+ onChunk?: StreamChunkCallback,
37
+ ): Promise<{
30
38
  content?: string;
31
39
  finishReason?: string;
32
40
  error?: string;
@@ -34,6 +42,8 @@ export default class CompletionAdapterOpenAIChatGPT
34
42
  // stop parameter is alredy not supported
35
43
  // adapter users should explicitely ask model to stop at dot if needed (or "Complete only up to the end of sentence")
36
44
  const model = this.options.model || "gpt-5-nano";
45
+ const isStreaming = typeof onChunk === "function";
46
+
37
47
  const resp = await fetch("https://api.openai.com/v1/chat/completions", {
38
48
  method: "POST",
39
49
  headers: {
@@ -45,24 +55,141 @@ export default class CompletionAdapterOpenAIChatGPT
45
55
  messages: [
46
56
  {
47
57
  role: "user",
48
- content, //param
58
+ content,
49
59
  },
50
60
  ],
51
61
  max_completion_tokens: maxTokens,
52
- response_format: outputSchema ? {
53
- type: "json_schema",
54
- ...outputSchema,
55
- } : undefined,
62
+ response_format: outputSchema
63
+ ? {
64
+ type: "json_schema",
65
+ ...outputSchema,
66
+ }
67
+ : undefined,
68
+ stream: isStreaming,
56
69
  ...this.options.extraRequestBodyParameters,
57
70
  }),
58
71
  });
59
- const data = await resp.json();
60
- if (data.error) {
61
- return { error: data.error.message };
72
+
73
+ if (!resp.ok) {
74
+ let errorMessage = `OpenAI request failed with status ${resp.status}`;
75
+ try {
76
+ const errorData = await resp.json();
77
+ if (errorData?.error?.message) {
78
+ errorMessage = errorData.error.message;
79
+ }
80
+ } catch {}
81
+ return { error: errorMessage };
82
+ }
83
+
84
+ if (!isStreaming) {
85
+ const data = await resp.json();
86
+ if (data.error) {
87
+ return { error: data.error.message };
88
+ }
89
+
90
+ return {
91
+ content: data.choices?.[0]?.message?.content,
92
+ finishReason: data.choices?.[0]?.finish_reason,
93
+ };
94
+ }
95
+
96
+ if (!resp.body) {
97
+ return { error: "Response body is empty" };
98
+ }
99
+
100
+ const reader = resp.body.getReader();
101
+ const decoder = new TextDecoder("utf-8");
102
+
103
+ let buffer = "";
104
+ let fullContent = "";
105
+ let finishReason: string | undefined;
106
+
107
+ try {
108
+ while (true) {
109
+ const { value, done } = await reader.read();
110
+ if (done) {
111
+ break;
112
+ }
113
+
114
+ buffer += decoder.decode(value, { stream: true });
115
+
116
+ const lines = buffer.split("\n");
117
+ buffer = lines.pop() || "";
118
+
119
+ for (const rawLine of lines) {
120
+ const line = rawLine.trim();
121
+
122
+ if (!line || !line.startsWith("data:")) {
123
+ continue;
124
+ }
125
+
126
+ const dataStr = line.slice(5).trim();
127
+
128
+ if (dataStr === "[DONE]") {
129
+ return {
130
+ content: fullContent,
131
+ finishReason,
132
+ };
133
+ }
134
+
135
+ let parsed: any;
136
+ try {
137
+ parsed = JSON.parse(dataStr);
138
+ } catch {
139
+ continue;
140
+ }
141
+
142
+ if (parsed.error?.message) {
143
+ return { error: parsed.error.message };
144
+ }
145
+
146
+ const choice = parsed.choices?.[0];
147
+ if (!choice) {
148
+ continue;
149
+ }
150
+
151
+ if (choice.finish_reason) {
152
+ finishReason = choice.finish_reason;
153
+ }
154
+
155
+ const chunk = choice.delta?.content ?? "";
156
+ if (!chunk) {
157
+ continue;
158
+ }
159
+
160
+ fullContent += chunk;
161
+ await onChunk(chunk);
162
+ }
163
+ }
164
+
165
+ if (buffer.trim().startsWith("data:")) {
166
+ const dataStr = buffer.trim().slice(5).trim();
167
+ if (dataStr && dataStr !== "[DONE]") {
168
+ try {
169
+ const parsed = JSON.parse(dataStr);
170
+ const choice = parsed.choices?.[0];
171
+ const chunk = choice?.delta?.content ?? "";
172
+ if (chunk) {
173
+ fullContent += chunk;
174
+ await onChunk(chunk);
175
+ }
176
+ if (choice?.finish_reason) {
177
+ finishReason = choice.finish_reason;
178
+ }
179
+ } catch {}
180
+ }
181
+ }
182
+
183
+ return {
184
+ content: fullContent,
185
+ finishReason,
186
+ };
187
+ } catch (error: any) {
188
+ return {
189
+ error: error?.message || "Streaming failed",
190
+ };
191
+ } finally {
192
+ reader.releaseLock();
62
193
  }
63
- return {
64
- content: data.choices[0].message.content,
65
- finishReason: data.choices[0].finish_reason,
66
- };
67
194
  };
68
- }
195
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@adminforth/completion-adapter-open-ai-chat-gpt",
3
- "version": "2.0.10",
3
+ "version": "2.0.11",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "type": "module",