@agentica/core 0.32.3-dev.2 → 0.32.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/Agentica.js +24 -78
- package/lib/Agentica.js.map +1 -1
- package/lib/MicroAgentica.d.ts +3 -1
- package/lib/MicroAgentica.js +26 -79
- package/lib/MicroAgentica.js.map +1 -1
- package/lib/context/AgenticaContext.d.ts +1 -1
- package/lib/context/MicroAgenticaContext.d.ts +1 -1
- package/lib/index.mjs +159 -203
- package/lib/index.mjs.map +1 -1
- package/lib/orchestrate/call.js +13 -11
- package/lib/orchestrate/call.js.map +1 -1
- package/lib/orchestrate/cancel.js +5 -4
- package/lib/orchestrate/cancel.js.map +1 -1
- package/lib/orchestrate/select.js +15 -12
- package/lib/orchestrate/select.js.map +1 -1
- package/lib/utils/request.d.ts +12 -0
- package/lib/utils/request.js +80 -0
- package/lib/utils/request.js.map +1 -0
- package/package.json +1 -1
- package/src/Agentica.ts +26 -105
- package/src/MicroAgentica.ts +30 -100
- package/src/context/AgenticaContext.ts +1 -1
- package/src/context/MicroAgenticaContext.ts +1 -1
- package/src/orchestrate/call.ts +9 -7
- package/src/orchestrate/cancel.ts +4 -3
- package/src/orchestrate/select.ts +10 -7
- package/src/utils/request.ts +100 -0
package/src/MicroAgentica.ts
CHANGED
|
@@ -1,14 +1,11 @@
|
|
|
1
1
|
import type { ILlmSchema } from "@samchon/openapi";
|
|
2
|
-
import type OpenAI from "openai";
|
|
3
2
|
|
|
4
3
|
import { Semaphore } from "tstl";
|
|
5
|
-
import { v4 } from "uuid";
|
|
6
4
|
|
|
7
5
|
import type { AgenticaOperation } from "./context/AgenticaOperation";
|
|
8
6
|
import type { AgenticaOperationCollection } from "./context/AgenticaOperationCollection";
|
|
9
7
|
import type { MicroAgenticaContext } from "./context/MicroAgenticaContext";
|
|
10
8
|
import type { AgenticaUserMessageEvent } from "./events";
|
|
11
|
-
import type { AgenticaRequestEvent } from "./events/AgenticaRequestEvent";
|
|
12
9
|
import type { MicroAgenticaEvent } from "./events/MicroAgenticaEvent";
|
|
13
10
|
import type { AgenticaUserMessageContent } from "./histories";
|
|
14
11
|
import type { AgenticaExecuteHistory } from "./histories/AgenticaExecuteHistory";
|
|
@@ -20,13 +17,11 @@ import type { IMicroAgenticaProps } from "./structures/IMicroAgenticaProps";
|
|
|
20
17
|
|
|
21
18
|
import { AgenticaTokenUsage } from "./context/AgenticaTokenUsage";
|
|
22
19
|
import { AgenticaOperationComposer } from "./context/internal/AgenticaOperationComposer";
|
|
23
|
-
import {
|
|
24
|
-
import { createRequestEvent, createUserMessageEvent } from "./factory/events";
|
|
20
|
+
import { createUserMessageEvent } from "./factory/events";
|
|
25
21
|
import { call, describe } from "./orchestrate";
|
|
26
22
|
import { transformHistory } from "./transformers/transformHistory";
|
|
27
23
|
import { __map_take } from "./utils/__map_take";
|
|
28
|
-
import {
|
|
29
|
-
import { streamDefaultReaderToAsyncGenerator, StreamUtil } from "./utils/StreamUtil";
|
|
24
|
+
import { getChatCompletionWithStreamingFunction } from "./utils/request";
|
|
30
25
|
|
|
31
26
|
/**
|
|
32
27
|
* Micro AI chatbot.
|
|
@@ -128,21 +123,27 @@ export class MicroAgentica<Model extends ILlmSchema.Model> {
|
|
|
128
123
|
*/
|
|
129
124
|
public async conversate(
|
|
130
125
|
content: string | AgenticaUserMessageContent | Array<AgenticaUserMessageContent>,
|
|
126
|
+
options: {
|
|
127
|
+
abortSignal?: AbortSignal;
|
|
128
|
+
} = {},
|
|
131
129
|
): Promise<MicroAgenticaHistory<Model>[]> {
|
|
132
130
|
const histories: Array<() => Promise<MicroAgenticaHistory<Model>>> = [];
|
|
133
|
-
const dispatch = (event: MicroAgenticaEvent<Model>): void => {
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
if ("
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
131
|
+
const dispatch = async (event: MicroAgenticaEvent<Model>): Promise<void> => {
|
|
132
|
+
try {
|
|
133
|
+
await this.dispatch(event);
|
|
134
|
+
if ("toHistory" in event) {
|
|
135
|
+
if ("join" in event) {
|
|
136
|
+
histories.push(async () => {
|
|
137
|
+
await event.join();
|
|
138
|
+
return event.toHistory();
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
else {
|
|
142
|
+
histories.push(async () => event.toHistory());
|
|
143
|
+
}
|
|
144
144
|
}
|
|
145
145
|
}
|
|
146
|
+
catch {}
|
|
146
147
|
};
|
|
147
148
|
|
|
148
149
|
const prompt: AgenticaUserMessageEvent = createUserMessageEvent({
|
|
@@ -155,12 +156,13 @@ export class MicroAgentica<Model extends ILlmSchema.Model> {
|
|
|
155
156
|
}]
|
|
156
157
|
: [content],
|
|
157
158
|
});
|
|
158
|
-
dispatch(prompt);
|
|
159
|
+
void dispatch(prompt).catch(() => {});
|
|
159
160
|
|
|
160
161
|
const ctx: MicroAgenticaContext<Model> = this.getContext({
|
|
161
162
|
prompt,
|
|
162
163
|
dispatch,
|
|
163
164
|
usage: this.token_usage_,
|
|
165
|
+
abortSignal: options.abortSignal,
|
|
164
166
|
});
|
|
165
167
|
const executes: AgenticaExecuteHistory<Model>[] = await call(
|
|
166
168
|
ctx,
|
|
@@ -244,88 +246,16 @@ export class MicroAgentica<Model extends ILlmSchema.Model> {
|
|
|
244
246
|
public getContext(props: {
|
|
245
247
|
prompt: AgenticaUserMessageEvent;
|
|
246
248
|
usage: AgenticaTokenUsage;
|
|
247
|
-
dispatch: (event: MicroAgenticaEvent<Model>) => void
|
|
249
|
+
dispatch: (event: MicroAgenticaEvent<Model>) => Promise<void>;
|
|
250
|
+
abortSignal?: AbortSignal;
|
|
248
251
|
}): MicroAgenticaContext<Model> {
|
|
249
|
-
const request =
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
...body,
|
|
257
|
-
model: this.props.vendor.model,
|
|
258
|
-
stream: true,
|
|
259
|
-
stream_options: {
|
|
260
|
-
include_usage: true,
|
|
261
|
-
},
|
|
262
|
-
},
|
|
263
|
-
options: this.props.vendor.options,
|
|
264
|
-
});
|
|
265
|
-
props.dispatch(event);
|
|
266
|
-
|
|
267
|
-
// completion
|
|
268
|
-
const backoffStrategy = this.props.config?.backoffStrategy ?? ((props) => {
|
|
269
|
-
throw props.error;
|
|
270
|
-
});
|
|
271
|
-
const completion = await (async () => {
|
|
272
|
-
let count = 0;
|
|
273
|
-
while (true) {
|
|
274
|
-
try {
|
|
275
|
-
return await this.props.vendor.api.chat.completions.create(
|
|
276
|
-
event.body,
|
|
277
|
-
event.options,
|
|
278
|
-
);
|
|
279
|
-
}
|
|
280
|
-
catch (error) {
|
|
281
|
-
const waiting = backoffStrategy({ count, error });
|
|
282
|
-
await new Promise(resolve => setTimeout(resolve, waiting));
|
|
283
|
-
count++;
|
|
284
|
-
}
|
|
285
|
-
}
|
|
286
|
-
})();
|
|
287
|
-
|
|
288
|
-
const [streamForEvent, temporaryStream] = StreamUtil.transform(
|
|
289
|
-
completion.toReadableStream() as ReadableStream<Uint8Array>,
|
|
290
|
-
value =>
|
|
291
|
-
ChatGptCompletionMessageUtil.transformCompletionChunk(value),
|
|
292
|
-
).tee();
|
|
293
|
-
|
|
294
|
-
const [streamForAggregate, streamForReturn] = temporaryStream.tee();
|
|
295
|
-
|
|
296
|
-
void (async () => {
|
|
297
|
-
const reader = streamForAggregate.getReader();
|
|
298
|
-
while (true) {
|
|
299
|
-
const chunk = await reader.read();
|
|
300
|
-
if (chunk.done) {
|
|
301
|
-
break;
|
|
302
|
-
}
|
|
303
|
-
if (chunk.value.usage != null) {
|
|
304
|
-
AgenticaTokenUsageAggregator.aggregate({
|
|
305
|
-
kind: source,
|
|
306
|
-
completionUsage: chunk.value.usage,
|
|
307
|
-
usage: props.usage,
|
|
308
|
-
});
|
|
309
|
-
}
|
|
310
|
-
}
|
|
311
|
-
})().catch(() => {});
|
|
312
|
-
|
|
313
|
-
const [streamForStream, streamForJoin] = streamForEvent.tee();
|
|
314
|
-
props.dispatch({
|
|
315
|
-
id: v4(),
|
|
316
|
-
type: "response",
|
|
317
|
-
source,
|
|
318
|
-
stream: streamDefaultReaderToAsyncGenerator(streamForStream.getReader()),
|
|
319
|
-
body: event.body,
|
|
320
|
-
options: event.options,
|
|
321
|
-
join: async () => {
|
|
322
|
-
const chunks = await StreamUtil.readAll(streamForJoin);
|
|
323
|
-
return ChatGptCompletionMessageUtil.merge(chunks);
|
|
324
|
-
},
|
|
325
|
-
created_at: new Date().toISOString(),
|
|
326
|
-
});
|
|
327
|
-
return streamForReturn;
|
|
328
|
-
};
|
|
252
|
+
const request = getChatCompletionWithStreamingFunction<Model>({
|
|
253
|
+
vendor: this.props.vendor,
|
|
254
|
+
config: this.props.config,
|
|
255
|
+
dispatch: props.dispatch,
|
|
256
|
+
abortSignal: props.abortSignal,
|
|
257
|
+
usage: this.token_usage_,
|
|
258
|
+
});
|
|
329
259
|
return {
|
|
330
260
|
operations: this.operations_,
|
|
331
261
|
config: this.props.config,
|
|
@@ -115,7 +115,7 @@ export interface AgenticaContext<Model extends ILlmSchema.Model> {
|
|
|
115
115
|
*
|
|
116
116
|
* @param event Event to deliver
|
|
117
117
|
*/
|
|
118
|
-
dispatch: (event: AgenticaEvent<Model>) => void
|
|
118
|
+
dispatch: (event: AgenticaEvent<Model>) => Promise<void>;
|
|
119
119
|
|
|
120
120
|
/**
|
|
121
121
|
* Request to the OpenAI server.
|
|
@@ -79,7 +79,7 @@ export interface MicroAgenticaContext<Model extends ILlmSchema.Model> {
|
|
|
79
79
|
*
|
|
80
80
|
* @param event Event to deliver
|
|
81
81
|
*/
|
|
82
|
-
dispatch: (event: MicroAgenticaEvent<Model>) => void
|
|
82
|
+
dispatch: (event: MicroAgenticaEvent<Model>) => Promise<void>;
|
|
83
83
|
|
|
84
84
|
/**
|
|
85
85
|
* Request to the OpenAI server.
|
package/src/orchestrate/call.ts
CHANGED
|
@@ -60,12 +60,14 @@ export async function call<Model extends ILlmSchema.Model>(
|
|
|
60
60
|
role: "user",
|
|
61
61
|
content: ctx.prompt.contents.map(decodeUserMessageContent),
|
|
62
62
|
},
|
|
63
|
-
...(prevError instanceof AssistantMessageEmptyWithReasoningError
|
|
63
|
+
...(prevError instanceof AssistantMessageEmptyWithReasoningError
|
|
64
|
+
? [
|
|
64
65
|
{
|
|
65
66
|
role: "assistant",
|
|
66
67
|
content: prevError.reasoning,
|
|
67
68
|
} satisfies OpenAI.ChatCompletionMessageParam,
|
|
68
|
-
|
|
69
|
+
]
|
|
70
|
+
: []),
|
|
69
71
|
// SYSTEM PROMPT
|
|
70
72
|
...(ctx.config?.systemPrompt?.execute === null
|
|
71
73
|
? []
|
|
@@ -108,7 +110,7 @@ export async function call<Model extends ILlmSchema.Model>(
|
|
|
108
110
|
|
|
109
111
|
const completion = await reduceStreamingWithDispatch(stream, (props) => {
|
|
110
112
|
const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent(props);
|
|
111
|
-
ctx.dispatch(event);
|
|
113
|
+
void ctx.dispatch(event).catch(() => {});
|
|
112
114
|
});
|
|
113
115
|
|
|
114
116
|
const allAssistantMessagesEmpty = completion.choices.every(v => v.message.tool_calls == null && v.message.content === "");
|
|
@@ -131,7 +133,7 @@ export async function call<Model extends ILlmSchema.Model>(
|
|
|
131
133
|
return "";
|
|
132
134
|
},
|
|
133
135
|
});
|
|
134
|
-
ctx.dispatch(event);
|
|
136
|
+
void ctx.dispatch(event).catch(() => {});
|
|
135
137
|
return [];
|
|
136
138
|
}
|
|
137
139
|
|
|
@@ -154,7 +156,7 @@ export async function call<Model extends ILlmSchema.Model>(
|
|
|
154
156
|
[],
|
|
155
157
|
retry,
|
|
156
158
|
);
|
|
157
|
-
ctx.dispatch(event);
|
|
159
|
+
await ctx.dispatch(event);
|
|
158
160
|
executes.push(event);
|
|
159
161
|
if (isAgenticaContext(ctx)) {
|
|
160
162
|
cancelFunctionFromContext(ctx, {
|
|
@@ -181,7 +183,7 @@ async function predicate<Model extends ILlmSchema.Model>(
|
|
|
181
183
|
operation,
|
|
182
184
|
toolCall,
|
|
183
185
|
);
|
|
184
|
-
ctx.dispatch(call);
|
|
186
|
+
await ctx.dispatch(call);
|
|
185
187
|
if (call.type === "jsonParseError") {
|
|
186
188
|
return correctJsonError(ctx, call, previousValidationErrors, life - 1);
|
|
187
189
|
}
|
|
@@ -194,7 +196,7 @@ async function predicate<Model extends ILlmSchema.Model>(
|
|
|
194
196
|
operation,
|
|
195
197
|
result: check,
|
|
196
198
|
});
|
|
197
|
-
ctx.dispatch(event);
|
|
199
|
+
await ctx.dispatch(event);
|
|
198
200
|
return correctTypeError(
|
|
199
201
|
ctx,
|
|
200
202
|
call,
|
|
@@ -48,9 +48,8 @@ export async function cancel<Model extends ILlmSchema.Model>(
|
|
|
48
48
|
{
|
|
49
49
|
...ctx,
|
|
50
50
|
stack: stacks[i]!,
|
|
51
|
-
dispatch: (e) => {
|
|
51
|
+
dispatch: async (e) => {
|
|
52
52
|
events.push(e);
|
|
53
|
-
return e;
|
|
54
53
|
},
|
|
55
54
|
},
|
|
56
55
|
operations,
|
|
@@ -81,7 +80,9 @@ export async function cancel<Model extends ILlmSchema.Model>(
|
|
|
81
80
|
const cancelled: AgenticaCancelEvent<Model>[]
|
|
82
81
|
= events.filter(e => e.type === "cancel");
|
|
83
82
|
(cancelled.length !== 0 ? cancelled : events)
|
|
84
|
-
.forEach(
|
|
83
|
+
.forEach((e) => {
|
|
84
|
+
void ctx.dispatch(e).catch(() => {});
|
|
85
|
+
});
|
|
85
86
|
}
|
|
86
87
|
}
|
|
87
88
|
|
|
@@ -51,9 +51,8 @@ export async function select<Model extends ILlmSchema.Model>(
|
|
|
51
51
|
{
|
|
52
52
|
...ctx,
|
|
53
53
|
stack: stacks[i]!,
|
|
54
|
-
dispatch: (e) => {
|
|
54
|
+
dispatch: async (e) => {
|
|
55
55
|
events.push(e);
|
|
56
|
-
return e;
|
|
57
56
|
},
|
|
58
57
|
},
|
|
59
58
|
operations,
|
|
@@ -84,7 +83,9 @@ export async function select<Model extends ILlmSchema.Model>(
|
|
|
84
83
|
const selected: AgenticaSelectEvent<Model>[]
|
|
85
84
|
= events.filter(e => e.type === "select");
|
|
86
85
|
(selected.length !== 0 ? selected : events)
|
|
87
|
-
.forEach(
|
|
86
|
+
.forEach((e) => {
|
|
87
|
+
void ctx.dispatch(e).catch(() => {});
|
|
88
|
+
});
|
|
88
89
|
}
|
|
89
90
|
}
|
|
90
91
|
|
|
@@ -153,12 +154,14 @@ async function step<Model extends ILlmSchema.Model>(
|
|
|
153
154
|
content: ctx.prompt.contents.map(decodeUserMessageContent),
|
|
154
155
|
},
|
|
155
156
|
// PREVIOUS ERROR
|
|
156
|
-
...(prevError instanceof AssistantMessageEmptyWithReasoningError
|
|
157
|
+
...(prevError instanceof AssistantMessageEmptyWithReasoningError
|
|
158
|
+
? [
|
|
157
159
|
{
|
|
158
160
|
role: "assistant",
|
|
159
161
|
content: prevError.reasoning,
|
|
160
162
|
} satisfies OpenAI.ChatCompletionMessageParam,
|
|
161
|
-
|
|
163
|
+
]
|
|
164
|
+
: []),
|
|
162
165
|
// SYSTEM PROMPT
|
|
163
166
|
{
|
|
164
167
|
role: "system",
|
|
@@ -190,7 +193,7 @@ async function step<Model extends ILlmSchema.Model>(
|
|
|
190
193
|
|
|
191
194
|
const completion = await reduceStreamingWithDispatch(stream, (props) => {
|
|
192
195
|
const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent(props);
|
|
193
|
-
ctx.dispatch(event);
|
|
196
|
+
void ctx.dispatch(event).catch(() => {});
|
|
194
197
|
});
|
|
195
198
|
const allAssistantMessagesEmpty = completion.choices.every(v => v.message.tool_calls == null && v.message.content === "");
|
|
196
199
|
if (allAssistantMessagesEmpty) {
|
|
@@ -212,7 +215,7 @@ async function step<Model extends ILlmSchema.Model>(
|
|
|
212
215
|
return "";
|
|
213
216
|
},
|
|
214
217
|
});
|
|
215
|
-
ctx.dispatch(event);
|
|
218
|
+
void ctx.dispatch(event).catch(() => {});
|
|
216
219
|
return;
|
|
217
220
|
}
|
|
218
221
|
// ----
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
import { AgenticaEventSource, AgenticaRequestEvent, AgenticaResponseEvent } from "../events";
|
|
3
|
+
import { ChatGptCompletionMessageUtil } from "./ChatGptCompletionMessageUtil";
|
|
4
|
+
import { streamDefaultReaderToAsyncGenerator, StreamUtil } from "./StreamUtil";
|
|
5
|
+
import { createRequestEvent } from "../factory";
|
|
6
|
+
import { IAgenticaConfig, IAgenticaVendor, IMicroAgenticaConfig } from "../structures";
|
|
7
|
+
import { ILlmSchema } from "@samchon/openapi";
|
|
8
|
+
import { AgenticaTokenUsageAggregator } from "../context/internal/AgenticaTokenUsageAggregator";
|
|
9
|
+
import { AgenticaTokenUsage } from "../context/AgenticaTokenUsage";
|
|
10
|
+
import { v4 } from "uuid";
|
|
11
|
+
|
|
12
|
+
export const getChatCompletionWithStreamingFunction = <Model extends ILlmSchema.Model>(props: {
|
|
13
|
+
vendor: IAgenticaVendor;
|
|
14
|
+
config?: IAgenticaConfig<Model> | IMicroAgenticaConfig<Model>;
|
|
15
|
+
dispatch: (event: AgenticaRequestEvent | AgenticaResponseEvent) => Promise<void>;
|
|
16
|
+
abortSignal?: AbortSignal;
|
|
17
|
+
usage: AgenticaTokenUsage;
|
|
18
|
+
}) => async (
|
|
19
|
+
source: AgenticaEventSource,
|
|
20
|
+
body: Omit<OpenAI.ChatCompletionCreateParamsStreaming, "model" | "stream">,
|
|
21
|
+
) => {
|
|
22
|
+
const event: AgenticaRequestEvent = createRequestEvent({
|
|
23
|
+
source,
|
|
24
|
+
body: {
|
|
25
|
+
...body,
|
|
26
|
+
model: props.vendor.model,
|
|
27
|
+
stream: true,
|
|
28
|
+
stream_options: {
|
|
29
|
+
include_usage: true,
|
|
30
|
+
},
|
|
31
|
+
},
|
|
32
|
+
options: {
|
|
33
|
+
...props.vendor.options,
|
|
34
|
+
signal: props.abortSignal,
|
|
35
|
+
},
|
|
36
|
+
});
|
|
37
|
+
await props.dispatch(event);
|
|
38
|
+
|
|
39
|
+
// completion
|
|
40
|
+
const backoffStrategy = props.config?.backoffStrategy ?? ((props) => {
|
|
41
|
+
throw props.error;
|
|
42
|
+
});
|
|
43
|
+
const completion = await (async () => {
|
|
44
|
+
let count = 0;
|
|
45
|
+
while (true) {
|
|
46
|
+
try {
|
|
47
|
+
return await props.vendor.api.chat.completions.create(
|
|
48
|
+
event.body,
|
|
49
|
+
event.options,
|
|
50
|
+
);
|
|
51
|
+
}
|
|
52
|
+
catch (error) {
|
|
53
|
+
const waiting = backoffStrategy({ count, error });
|
|
54
|
+
await new Promise(resolve => setTimeout(resolve, waiting));
|
|
55
|
+
count++;
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
})();
|
|
59
|
+
|
|
60
|
+
const [streamForEvent, temporaryStream] = StreamUtil.transform(
|
|
61
|
+
completion.toReadableStream() as ReadableStream<Uint8Array>,
|
|
62
|
+
value =>
|
|
63
|
+
ChatGptCompletionMessageUtil.transformCompletionChunk(value),
|
|
64
|
+
).tee();
|
|
65
|
+
|
|
66
|
+
const [streamForAggregate, streamForReturn] = temporaryStream.tee();
|
|
67
|
+
|
|
68
|
+
(async () => {
|
|
69
|
+
const reader = streamForAggregate.getReader();
|
|
70
|
+
while (true) {
|
|
71
|
+
const chunk = await reader.read();
|
|
72
|
+
if (chunk.done) {
|
|
73
|
+
break;
|
|
74
|
+
}
|
|
75
|
+
if (chunk.value.usage != null) {
|
|
76
|
+
AgenticaTokenUsageAggregator.aggregate({
|
|
77
|
+
kind: source,
|
|
78
|
+
completionUsage: chunk.value.usage,
|
|
79
|
+
usage: props.usage,
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
})().catch(() => {});
|
|
84
|
+
|
|
85
|
+
const [streamForStream, streamForJoin] = streamForEvent.tee();
|
|
86
|
+
void props.dispatch({
|
|
87
|
+
id: v4(),
|
|
88
|
+
type: "response",
|
|
89
|
+
source,
|
|
90
|
+
stream: streamDefaultReaderToAsyncGenerator(streamForStream.getReader()),
|
|
91
|
+
body: event.body,
|
|
92
|
+
options: event.options,
|
|
93
|
+
join: async () => {
|
|
94
|
+
const chunks = await StreamUtil.readAll(streamForJoin);
|
|
95
|
+
return ChatGptCompletionMessageUtil.merge(chunks);
|
|
96
|
+
},
|
|
97
|
+
created_at: new Date().toISOString(),
|
|
98
|
+
}).catch(() => {});
|
|
99
|
+
return streamForReturn;
|
|
100
|
+
};
|