@agentica/core 0.32.2 → 0.32.3-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.mjs +320 -342
- package/lib/index.mjs.map +1 -1
- package/lib/orchestrate/call.js +87 -107
- package/lib/orchestrate/call.js.map +1 -1
- package/lib/orchestrate/describe.js +5 -50
- package/lib/orchestrate/describe.js.map +1 -1
- package/lib/orchestrate/initialize.js +5 -50
- package/lib/orchestrate/initialize.js.map +1 -1
- package/lib/orchestrate/select.js +107 -126
- package/lib/orchestrate/select.js.map +1 -1
- package/lib/utils/AssistantMessageEmptyError.d.ts +7 -0
- package/lib/utils/AssistantMessageEmptyError.js +17 -0
- package/lib/utils/AssistantMessageEmptyError.js.map +1 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.d.ts +8 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.js +86 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.js.map +1 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.spec.d.ts +1 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.spec.js +855 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.spec.js.map +1 -0
- package/lib/utils/MPSC.js +8 -6
- package/lib/utils/MPSC.js.map +1 -1
- package/lib/utils/StreamUtil.d.ts +1 -1
- package/lib/utils/StreamUtil.js +2 -2
- package/lib/utils/StreamUtil.js.map +1 -1
- package/lib/utils/__retry.d.ts +1 -0
- package/lib/utils/__retry.js +30 -0
- package/lib/utils/__retry.js.map +1 -0
- package/lib/utils/__retry.spec.d.ts +1 -0
- package/lib/utils/__retry.spec.js +172 -0
- package/lib/utils/__retry.spec.js.map +1 -0
- package/package.json +1 -1
- package/src/orchestrate/call.ts +88 -114
- package/src/orchestrate/describe.ts +7 -65
- package/src/orchestrate/initialize.ts +4 -64
- package/src/orchestrate/select.ts +111 -138
- package/src/utils/AssistantMessageEmptyError.ts +13 -0
- package/src/utils/ChatGptCompletionMessageUtil.ts +1 -1
- package/src/utils/ChatGptCompletionStreamingUtil.spec.ts +908 -0
- package/src/utils/ChatGptCompletionStreamingUtil.ts +90 -0
- package/src/utils/MPSC.ts +8 -6
- package/src/utils/StreamUtil.ts +2 -2
- package/src/utils/__retry.spec.ts +198 -0
- package/src/utils/__retry.ts +18 -0
|
@@ -10,9 +10,7 @@ import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
|
|
|
10
10
|
import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
|
|
11
11
|
import { createDescribeEvent } from "../factory/events";
|
|
12
12
|
import { decodeHistory } from "../factory/histories";
|
|
13
|
-
import {
|
|
14
|
-
import { MPSC } from "../utils/MPSC";
|
|
15
|
-
import { streamDefaultReaderToAsyncGenerator, StreamUtil } from "../utils/StreamUtil";
|
|
13
|
+
import { reduceStreamingWithDispatch } from "../utils/ChatGptCompletionStreamingUtil";
|
|
16
14
|
|
|
17
15
|
export async function describe<Model extends ILlmSchema.Model>(
|
|
18
16
|
ctx: AgenticaContext<Model> | MicroAgenticaContext<Model>,
|
|
@@ -41,68 +39,12 @@ export async function describe<Model extends ILlmSchema.Model>(
|
|
|
41
39
|
],
|
|
42
40
|
});
|
|
43
41
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
OpenAI.ChatCompletionChunk,
|
|
51
|
-
Promise<OpenAI.ChatCompletion>
|
|
52
|
-
>(completionStream, async (accPromise, chunk) => {
|
|
53
|
-
const acc = await accPromise;
|
|
54
|
-
const registerContext = (
|
|
55
|
-
choices: OpenAI.ChatCompletionChunk.Choice[],
|
|
56
|
-
) => {
|
|
57
|
-
for (const choice of choices) {
|
|
58
|
-
/**
|
|
59
|
-
* @TODO fix it
|
|
60
|
-
* Sometimes, the complete message arrives along with a finish reason.
|
|
61
|
-
*/
|
|
62
|
-
if (choice.finish_reason != null) {
|
|
63
|
-
describeContext[choice.index]!.mpsc.close();
|
|
64
|
-
continue;
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
if (choice.delta.content == null) {
|
|
68
|
-
continue;
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
if (describeContext[choice.index] != null) {
|
|
72
|
-
describeContext[choice.index]!.content += choice.delta.content;
|
|
73
|
-
describeContext[choice.index]!.mpsc.produce(choice.delta.content);
|
|
74
|
-
continue;
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
const mpsc = new MPSC<string>();
|
|
78
|
-
|
|
79
|
-
describeContext[choice.index] = {
|
|
80
|
-
content: choice.delta.content,
|
|
81
|
-
mpsc,
|
|
82
|
-
};
|
|
83
|
-
mpsc.produce(choice.delta.content);
|
|
84
|
-
|
|
85
|
-
const event: AgenticaDescribeEvent<Model> = createDescribeEvent({
|
|
86
|
-
executes: histories,
|
|
87
|
-
stream: streamDefaultReaderToAsyncGenerator(mpsc.consumer.getReader()),
|
|
88
|
-
done: () => mpsc.done(),
|
|
89
|
-
get: () => describeContext[choice.index]?.content ?? "",
|
|
90
|
-
join: async () => {
|
|
91
|
-
await mpsc.waitClosed();
|
|
92
|
-
return describeContext[choice.index]!.content;
|
|
93
|
-
},
|
|
94
|
-
});
|
|
95
|
-
ctx.dispatch(event);
|
|
96
|
-
}
|
|
97
|
-
};
|
|
98
|
-
|
|
99
|
-
if (acc.object === "chat.completion.chunk") {
|
|
100
|
-
registerContext([acc, chunk].flatMap(v => v.choices));
|
|
101
|
-
return ChatGptCompletionMessageUtil.merge([acc, chunk]);
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
registerContext(chunk.choices);
|
|
105
|
-
return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
|
|
42
|
+
await reduceStreamingWithDispatch(completionStream, (props) => {
|
|
43
|
+
const event: AgenticaDescribeEvent<Model> = createDescribeEvent({
|
|
44
|
+
executes: histories,
|
|
45
|
+
...props,
|
|
46
|
+
});
|
|
47
|
+
ctx.dispatch(event);
|
|
106
48
|
});
|
|
107
49
|
}
|
|
108
50
|
|
|
@@ -11,9 +11,7 @@ import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
|
|
|
11
11
|
import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
|
|
12
12
|
import { createAssistantMessageEvent } from "../factory/events";
|
|
13
13
|
import { decodeHistory, decodeUserMessageContent } from "../factory/histories";
|
|
14
|
-
import {
|
|
15
|
-
import { MPSC } from "../utils/MPSC";
|
|
16
|
-
import { streamDefaultReaderToAsyncGenerator, StreamUtil } from "../utils/StreamUtil";
|
|
14
|
+
import { reduceStreamingWithDispatch } from "../utils/ChatGptCompletionStreamingUtil";
|
|
17
15
|
|
|
18
16
|
const FUNCTION: ILlmFunction<"chatgpt"> = typia.llm.application<
|
|
19
17
|
__IChatInitialApplication,
|
|
@@ -65,67 +63,9 @@ export async function initialize<Model extends ILlmSchema.Model>(ctx: AgenticaCo
|
|
|
65
63
|
// parallel_tool_calls: false,
|
|
66
64
|
});
|
|
67
65
|
|
|
68
|
-
const
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
})[] = [];
|
|
72
|
-
|
|
73
|
-
const completion = await StreamUtil.reduce<
|
|
74
|
-
OpenAI.ChatCompletionChunk,
|
|
75
|
-
Promise<OpenAI.ChatCompletion>
|
|
76
|
-
>(completionStream, async (accPromise, chunk) => {
|
|
77
|
-
const acc = await accPromise;
|
|
78
|
-
const registerContext = (
|
|
79
|
-
choices: OpenAI.ChatCompletionChunk.Choice[],
|
|
80
|
-
) => {
|
|
81
|
-
for (const choice of choices) {
|
|
82
|
-
/**
|
|
83
|
-
* @TODO fix it
|
|
84
|
-
* Sometimes, the complete message arrives along with a finish reason.
|
|
85
|
-
*/
|
|
86
|
-
if (choice.finish_reason != null) {
|
|
87
|
-
textContext[choice.index]?.mpsc.close();
|
|
88
|
-
continue;
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
if (choice.delta.content == null || choice.delta.content.length === 0) {
|
|
92
|
-
continue;
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
if (textContext[choice.index] != null) {
|
|
96
|
-
textContext[choice.index]!.content += choice.delta.content;
|
|
97
|
-
textContext[choice.index]!.mpsc.produce(choice.delta.content);
|
|
98
|
-
continue;
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
const mpsc = new MPSC<string>();
|
|
102
|
-
|
|
103
|
-
textContext[choice.index] = {
|
|
104
|
-
content: choice.delta.content,
|
|
105
|
-
mpsc,
|
|
106
|
-
};
|
|
107
|
-
mpsc.produce(choice.delta.content);
|
|
108
|
-
|
|
109
|
-
const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent({
|
|
110
|
-
stream: streamDefaultReaderToAsyncGenerator(mpsc.consumer.getReader()),
|
|
111
|
-
done: () => mpsc.done(),
|
|
112
|
-
get: () => textContext[choice.index]!.content,
|
|
113
|
-
join: async () => {
|
|
114
|
-
await mpsc.waitClosed();
|
|
115
|
-
return textContext[choice.index]!.content;
|
|
116
|
-
},
|
|
117
|
-
});
|
|
118
|
-
ctx.dispatch(event);
|
|
119
|
-
}
|
|
120
|
-
};
|
|
121
|
-
|
|
122
|
-
if (acc.object === "chat.completion.chunk") {
|
|
123
|
-
registerContext([acc, chunk].flatMap(v => v.choices));
|
|
124
|
-
return ChatGptCompletionMessageUtil.merge([acc, chunk]);
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
registerContext(chunk.choices);
|
|
128
|
-
return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
|
|
66
|
+
const completion = await reduceStreamingWithDispatch(completionStream, (props) => {
|
|
67
|
+
const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent(props);
|
|
68
|
+
ctx.dispatch(event);
|
|
129
69
|
});
|
|
130
70
|
|
|
131
71
|
if (completion === null) {
|
|
@@ -17,9 +17,10 @@ import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
|
|
|
17
17
|
import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
|
|
18
18
|
import { createAssistantMessageEvent } from "../factory/events";
|
|
19
19
|
import { decodeHistory, decodeUserMessageContent } from "../factory/histories";
|
|
20
|
-
import {
|
|
21
|
-
import {
|
|
22
|
-
import {
|
|
20
|
+
import { __get_retry } from "../utils/__retry";
|
|
21
|
+
import { AssistantMessageEmptyError, AssistantMessageEmptyWithReasoningError } from "../utils/AssistantMessageEmptyError";
|
|
22
|
+
import { reduceStreamingWithDispatch } from "../utils/ChatGptCompletionStreamingUtil";
|
|
23
|
+
import { toAsyncGenerator } from "../utils/StreamUtil";
|
|
23
24
|
|
|
24
25
|
import { selectFunctionFromContext } from "./internal/selectFunctionFromContext";
|
|
25
26
|
|
|
@@ -93,154 +94,126 @@ async function step<Model extends ILlmSchema.Model>(
|
|
|
93
94
|
retry: number,
|
|
94
95
|
failures?: IFailure[],
|
|
95
96
|
): Promise<void> {
|
|
97
|
+
const _retryFn = __get_retry(ctx.config?.retry ?? AgenticaConstant.RETRY);
|
|
98
|
+
const retryFn = async (fn: (prevError?: unknown) => Promise<OpenAI.ChatCompletion>) => {
|
|
99
|
+
return _retryFn(fn).catch((e) => {
|
|
100
|
+
if (e instanceof AssistantMessageEmptyError) {
|
|
101
|
+
return Symbol("emptyAssistantMessage");
|
|
102
|
+
}
|
|
103
|
+
throw e;
|
|
104
|
+
});
|
|
105
|
+
};
|
|
96
106
|
// ----
|
|
97
107
|
// EXECUTE CHATGPT API
|
|
98
108
|
// ----
|
|
99
|
-
const
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
109
|
+
const completion = await retryFn(async (prevError) => {
|
|
110
|
+
const stream = await ctx.request("select", {
|
|
111
|
+
messages: [
|
|
112
|
+
// COMMON SYSTEM PROMPT
|
|
113
|
+
{
|
|
114
|
+
role: "system",
|
|
115
|
+
content: AgenticaDefaultPrompt.write(ctx.config),
|
|
116
|
+
} satisfies OpenAI.ChatCompletionSystemMessageParam,
|
|
117
|
+
// CANDIDATE FUNCTIONS
|
|
118
|
+
{
|
|
119
|
+
role: "assistant",
|
|
120
|
+
tool_calls: [
|
|
121
|
+
{
|
|
122
|
+
type: "function",
|
|
123
|
+
id: "getApiFunctions",
|
|
124
|
+
function: {
|
|
125
|
+
name: "getApiFunctions",
|
|
126
|
+
arguments: JSON.stringify({}),
|
|
127
|
+
},
|
|
116
128
|
},
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
const acc = await accPromise;
|
|
179
|
-
|
|
180
|
-
const registerContext = (
|
|
181
|
-
choices: OpenAI.ChatCompletionChunk.Choice[],
|
|
182
|
-
) => {
|
|
183
|
-
for (const choice of choices) {
|
|
184
|
-
/**
|
|
185
|
-
* @TODO fix it
|
|
186
|
-
* Sometimes, the complete message arrives along with a finish reason.
|
|
187
|
-
*/
|
|
188
|
-
if (choice.finish_reason != null) {
|
|
189
|
-
selectContext[choice.index]?.mpsc.close();
|
|
190
|
-
continue;
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
if (choice.delta.content == null || choice.delta.content === "") {
|
|
194
|
-
continue;
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
if (selectContext[choice.index] != null) {
|
|
198
|
-
selectContext[choice.index]!.content += choice.delta.content;
|
|
199
|
-
selectContext[choice.index]!.mpsc.produce(choice.delta.content);
|
|
200
|
-
continue;
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
const mpsc = new MPSC<string>();
|
|
204
|
-
|
|
205
|
-
selectContext[choice.index] = {
|
|
206
|
-
content: choice.delta.content,
|
|
207
|
-
mpsc,
|
|
208
|
-
};
|
|
209
|
-
mpsc.produce(choice.delta.content);
|
|
129
|
+
],
|
|
130
|
+
},
|
|
131
|
+
{
|
|
132
|
+
role: "tool",
|
|
133
|
+
tool_call_id: "getApiFunctions",
|
|
134
|
+
content: JSON.stringify(
|
|
135
|
+
operations.map(op => ({
|
|
136
|
+
name: op.name,
|
|
137
|
+
description: op.function.description,
|
|
138
|
+
...(op.protocol === "http"
|
|
139
|
+
? {
|
|
140
|
+
method: op.function.method,
|
|
141
|
+
path: op.function.path,
|
|
142
|
+
tags: op.function.tags,
|
|
143
|
+
}
|
|
144
|
+
: {}),
|
|
145
|
+
})),
|
|
146
|
+
),
|
|
147
|
+
},
|
|
148
|
+
// PREVIOUS HISTORIES
|
|
149
|
+
...ctx.histories.map(decodeHistory).flat(),
|
|
150
|
+
// USER INPUT
|
|
151
|
+
{
|
|
152
|
+
role: "user",
|
|
153
|
+
content: ctx.prompt.contents.map(decodeUserMessageContent),
|
|
154
|
+
},
|
|
155
|
+
// PREVIOUS ERROR
|
|
156
|
+
...(prevError instanceof AssistantMessageEmptyWithReasoningError ? [
|
|
157
|
+
{
|
|
158
|
+
role: "assistant",
|
|
159
|
+
content: prevError.reasoning,
|
|
160
|
+
} satisfies OpenAI.ChatCompletionMessageParam,
|
|
161
|
+
] : []),
|
|
162
|
+
// SYSTEM PROMPT
|
|
163
|
+
{
|
|
164
|
+
role: "system",
|
|
165
|
+
content:
|
|
166
|
+
ctx.config?.systemPrompt?.select?.(ctx.histories)
|
|
167
|
+
?? AgenticaSystemPrompt.SELECT,
|
|
168
|
+
},
|
|
169
|
+
// TYPE CORRECTIONS
|
|
170
|
+
...emendMessages(failures ?? []),
|
|
171
|
+
],
|
|
172
|
+
// STACK FUNCTIONS
|
|
173
|
+
tools: [{
|
|
174
|
+
type: "function",
|
|
175
|
+
function: {
|
|
176
|
+
name: CONTAINER.functions[0]!.name,
|
|
177
|
+
description: CONTAINER.functions[0]!.description,
|
|
178
|
+
/**
|
|
179
|
+
* @TODO fix it
|
|
180
|
+
* The property and value have a type mismatch, but it works.
|
|
181
|
+
*/
|
|
182
|
+
parameters: CONTAINER.functions[0]!.parameters as unknown as Record<string, unknown>,
|
|
183
|
+
},
|
|
184
|
+
} satisfies OpenAI.ChatCompletionTool],
|
|
185
|
+
tool_choice: retry === 0
|
|
186
|
+
? "auto"
|
|
187
|
+
: "required",
|
|
188
|
+
// parallel_tool_calls: false,
|
|
189
|
+
});
|
|
210
190
|
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
});
|
|
220
|
-
ctx.dispatch(event);
|
|
191
|
+
const completion = await reduceStreamingWithDispatch(stream, (props) => {
|
|
192
|
+
const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent(props);
|
|
193
|
+
ctx.dispatch(event);
|
|
194
|
+
});
|
|
195
|
+
const allAssistantMessagesEmpty = completion.choices.every(v => v.message.tool_calls == null && v.message.content === "");
|
|
196
|
+
if (allAssistantMessagesEmpty) {
|
|
197
|
+
const firstChoice = completion.choices.at(0);
|
|
198
|
+
if ((firstChoice?.message as { reasoning?: string })?.reasoning != null) {
|
|
199
|
+
throw new AssistantMessageEmptyWithReasoningError((firstChoice?.message as { reasoning?: string })?.reasoning ?? "");
|
|
221
200
|
}
|
|
222
|
-
|
|
223
|
-
if (acc.object === "chat.completion.chunk") {
|
|
224
|
-
registerContext([acc, chunk].flatMap(v => v.choices));
|
|
225
|
-
return ChatGptCompletionMessageUtil.merge([acc, chunk]);
|
|
201
|
+
throw new AssistantMessageEmptyError();
|
|
226
202
|
}
|
|
227
|
-
|
|
228
|
-
return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
|
|
203
|
+
return completion;
|
|
229
204
|
});
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
if(emptyAssistantMessages.length > 0) {
|
|
233
|
-
emptyAssistantMessages.forEach(v => {
|
|
205
|
+
|
|
206
|
+
if (typeof completion === "symbol") {
|
|
234
207
|
const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent({
|
|
235
|
-
stream: toAsyncGenerator(
|
|
208
|
+
stream: toAsyncGenerator(""),
|
|
236
209
|
done: () => true,
|
|
237
|
-
get: () =>
|
|
210
|
+
get: () => "",
|
|
238
211
|
join: async () => {
|
|
239
|
-
return
|
|
212
|
+
return "";
|
|
240
213
|
},
|
|
241
|
-
});
|
|
242
|
-
ctx.dispatch(event);
|
|
243
214
|
});
|
|
215
|
+
ctx.dispatch(event);
|
|
216
|
+
return;
|
|
244
217
|
}
|
|
245
218
|
// ----
|
|
246
219
|
// VALIDATION
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
export class AssistantMessageEmptyError extends Error {
|
|
2
|
+
constructor() {
|
|
3
|
+
super();
|
|
4
|
+
}
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
export class AssistantMessageEmptyWithReasoningError extends AssistantMessageEmptyError {
|
|
8
|
+
public readonly reasoning: string;
|
|
9
|
+
constructor(reasoning: string) {
|
|
10
|
+
super();
|
|
11
|
+
this.reasoning = reasoning;
|
|
12
|
+
}
|
|
13
|
+
}
|