@agentica/core 0.32.1 → 0.32.3-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/lib/index.mjs +308 -334
  2. package/lib/index.mjs.map +1 -1
  3. package/lib/orchestrate/call.js +87 -107
  4. package/lib/orchestrate/call.js.map +1 -1
  5. package/lib/orchestrate/describe.js +5 -50
  6. package/lib/orchestrate/describe.js.map +1 -1
  7. package/lib/orchestrate/initialize.js +5 -50
  8. package/lib/orchestrate/initialize.js.map +1 -1
  9. package/lib/orchestrate/select.js +107 -126
  10. package/lib/orchestrate/select.js.map +1 -1
  11. package/lib/utils/AssistantMessageEmptyError.d.ts +7 -0
  12. package/lib/utils/AssistantMessageEmptyError.js +17 -0
  13. package/lib/utils/AssistantMessageEmptyError.js.map +1 -0
  14. package/lib/utils/ChatGptCompletionMessageUtil.js +15 -8
  15. package/lib/utils/ChatGptCompletionMessageUtil.js.map +1 -1
  16. package/lib/utils/ChatGptCompletionStreamingUtil.d.ts +8 -0
  17. package/lib/utils/ChatGptCompletionStreamingUtil.js +74 -0
  18. package/lib/utils/ChatGptCompletionStreamingUtil.js.map +1 -0
  19. package/lib/utils/__retry.d.ts +1 -0
  20. package/lib/utils/__retry.js +30 -0
  21. package/lib/utils/__retry.js.map +1 -0
  22. package/lib/utils/__retry.spec.d.ts +1 -0
  23. package/lib/utils/__retry.spec.js +172 -0
  24. package/lib/utils/__retry.spec.js.map +1 -0
  25. package/package.json +1 -1
  26. package/src/orchestrate/call.ts +88 -114
  27. package/src/orchestrate/describe.ts +7 -65
  28. package/src/orchestrate/initialize.ts +4 -64
  29. package/src/orchestrate/select.ts +111 -138
  30. package/src/utils/AssistantMessageEmptyError.ts +13 -0
  31. package/src/utils/ChatGptCompletionMessageUtil.ts +14 -0
  32. package/src/utils/ChatGptCompletionStreamingUtil.ts +81 -0
  33. package/src/utils/__retry.spec.ts +198 -0
  34. package/src/utils/__retry.ts +18 -0
@@ -17,9 +17,10 @@ import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
17
17
  import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
18
18
  import { createAssistantMessageEvent } from "../factory/events";
19
19
  import { decodeHistory, decodeUserMessageContent } from "../factory/histories";
20
- import { MPSC } from "../utils";
21
- import { ChatGptCompletionMessageUtil } from "../utils/ChatGptCompletionMessageUtil";
22
- import { streamDefaultReaderToAsyncGenerator, StreamUtil, toAsyncGenerator } from "../utils/StreamUtil";
20
+ import { __get_retry } from "../utils/__retry";
21
+ import { AssistantMessageEmptyError, AssistantMessageEmptyWithReasoningError } from "../utils/AssistantMessageEmptyError";
22
+ import { reduceStreamingWithDispatch } from "../utils/ChatGptCompletionStreamingUtil";
23
+ import { toAsyncGenerator } from "../utils/StreamUtil";
23
24
 
24
25
  import { selectFunctionFromContext } from "./internal/selectFunctionFromContext";
25
26
 
@@ -93,154 +94,126 @@ async function step<Model extends ILlmSchema.Model>(
93
94
  retry: number,
94
95
  failures?: IFailure[],
95
96
  ): Promise<void> {
97
+ const _retryFn = __get_retry(ctx.config?.retry ?? AgenticaConstant.RETRY);
98
+ const retryFn = async (fn: (prevError?: unknown) => Promise<OpenAI.ChatCompletion>) => {
99
+ return _retryFn(fn).catch((e) => {
100
+ if (e instanceof AssistantMessageEmptyError) {
101
+ return Symbol("emptyAssistantMessage");
102
+ }
103
+ throw e;
104
+ });
105
+ };
96
106
  // ----
97
107
  // EXECUTE CHATGPT API
98
108
  // ----
99
- const completionStream = await ctx.request("select", {
100
- messages: [
101
- // COMMON SYSTEM PROMPT
102
- {
103
- role: "system",
104
- content: AgenticaDefaultPrompt.write(ctx.config),
105
- } satisfies OpenAI.ChatCompletionSystemMessageParam,
106
- // CANDIDATE FUNCTIONS
107
- {
108
- role: "assistant",
109
- tool_calls: [
110
- {
111
- type: "function",
112
- id: "getApiFunctions",
113
- function: {
114
- name: "getApiFunctions",
115
- arguments: JSON.stringify({}),
109
+ const completion = await retryFn(async (prevError) => {
110
+ const stream = await ctx.request("select", {
111
+ messages: [
112
+ // COMMON SYSTEM PROMPT
113
+ {
114
+ role: "system",
115
+ content: AgenticaDefaultPrompt.write(ctx.config),
116
+ } satisfies OpenAI.ChatCompletionSystemMessageParam,
117
+ // CANDIDATE FUNCTIONS
118
+ {
119
+ role: "assistant",
120
+ tool_calls: [
121
+ {
122
+ type: "function",
123
+ id: "getApiFunctions",
124
+ function: {
125
+ name: "getApiFunctions",
126
+ arguments: JSON.stringify({}),
127
+ },
116
128
  },
117
- },
118
- ],
119
- },
120
- {
121
- role: "tool",
122
- tool_call_id: "getApiFunctions",
123
- content: JSON.stringify(
124
- operations.map(op => ({
125
- name: op.name,
126
- description: op.function.description,
127
- ...(op.protocol === "http"
128
- ? {
129
- method: op.function.method,
130
- path: op.function.path,
131
- tags: op.function.tags,
132
- }
133
- : {}),
134
- })),
135
- ),
136
- },
137
- // PREVIOUS HISTORIES
138
- ...ctx.histories.map(decodeHistory).flat(),
139
- // USER INPUT
140
- {
141
- role: "user",
142
- content: ctx.prompt.contents.map(decodeUserMessageContent),
143
- },
144
- // SYSTEM PROMPT
145
- {
146
- role: "system",
147
- content:
148
- ctx.config?.systemPrompt?.select?.(ctx.histories)
149
- ?? AgenticaSystemPrompt.SELECT,
150
- },
151
- // TYPE CORRECTIONS
152
- ...emendMessages(failures ?? []),
153
- ],
154
- // STACK FUNCTIONS
155
- tools: [{
156
- type: "function",
157
- function: {
158
- name: CONTAINER.functions[0]!.name,
159
- description: CONTAINER.functions[0]!.description,
160
- /**
161
- * @TODO fix it
162
- * The property and value have a type mismatch, but it works.
163
- */
164
- parameters: CONTAINER.functions[0]!.parameters as unknown as Record<string, unknown>,
165
- },
166
- } satisfies OpenAI.ChatCompletionTool],
167
- tool_choice: retry === 0
168
- ? "auto"
169
- : "required",
170
- // parallel_tool_calls: false,
171
- });
172
-
173
- const selectContext: ({
174
- content: string;
175
- mpsc: MPSC<string>;
176
- })[] = [];
177
- const nullableCompletion = await StreamUtil.reduce<OpenAI.ChatCompletionChunk, Promise<OpenAI.ChatCompletion>>(completionStream, async (accPromise, chunk) => {
178
- const acc = await accPromise;
179
-
180
- const registerContext = (
181
- choices: OpenAI.ChatCompletionChunk.Choice[],
182
- ) => {
183
- for (const choice of choices) {
184
- /**
185
- * @TODO fix it
186
- * Sometimes, the complete message arrives along with a finish reason.
187
- */
188
- if (choice.finish_reason != null) {
189
- selectContext[choice.index]?.mpsc.close();
190
- continue;
191
- }
192
-
193
- if (choice.delta.content == null || choice.delta.content === "") {
194
- continue;
195
- }
196
-
197
- if (selectContext[choice.index] != null) {
198
- selectContext[choice.index]!.content += choice.delta.content;
199
- selectContext[choice.index]!.mpsc.produce(choice.delta.content);
200
- continue;
201
- }
202
-
203
- const mpsc = new MPSC<string>();
204
-
205
- selectContext[choice.index] = {
206
- content: choice.delta.content,
207
- mpsc,
208
- };
209
- mpsc.produce(choice.delta.content);
129
+ ],
130
+ },
131
+ {
132
+ role: "tool",
133
+ tool_call_id: "getApiFunctions",
134
+ content: JSON.stringify(
135
+ operations.map(op => ({
136
+ name: op.name,
137
+ description: op.function.description,
138
+ ...(op.protocol === "http"
139
+ ? {
140
+ method: op.function.method,
141
+ path: op.function.path,
142
+ tags: op.function.tags,
143
+ }
144
+ : {}),
145
+ })),
146
+ ),
147
+ },
148
+ // PREVIOUS HISTORIES
149
+ ...ctx.histories.map(decodeHistory).flat(),
150
+ // USER INPUT
151
+ {
152
+ role: "user",
153
+ content: ctx.prompt.contents.map(decodeUserMessageContent),
154
+ },
155
+ // PREVIOUS ERROR
156
+ ...(prevError instanceof AssistantMessageEmptyWithReasoningError ? [
157
+ {
158
+ role: "assistant",
159
+ content: prevError.reasoning,
160
+ } satisfies OpenAI.ChatCompletionMessageParam,
161
+ ] : []),
162
+ // SYSTEM PROMPT
163
+ {
164
+ role: "system",
165
+ content:
166
+ ctx.config?.systemPrompt?.select?.(ctx.histories)
167
+ ?? AgenticaSystemPrompt.SELECT,
168
+ },
169
+ // TYPE CORRECTIONS
170
+ ...emendMessages(failures ?? []),
171
+ ],
172
+ // STACK FUNCTIONS
173
+ tools: [{
174
+ type: "function",
175
+ function: {
176
+ name: CONTAINER.functions[0]!.name,
177
+ description: CONTAINER.functions[0]!.description,
178
+ /**
179
+ * @TODO fix it
180
+ * The property and value have a type mismatch, but it works.
181
+ */
182
+ parameters: CONTAINER.functions[0]!.parameters as unknown as Record<string, unknown>,
183
+ },
184
+ } satisfies OpenAI.ChatCompletionTool],
185
+ tool_choice: retry === 0
186
+ ? "auto"
187
+ : "required",
188
+ // parallel_tool_calls: false,
189
+ });
210
190
 
211
- const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent({
212
- stream: streamDefaultReaderToAsyncGenerator(mpsc.consumer.getReader()),
213
- done: () => mpsc.done(),
214
- get: () => selectContext[choice.index]?.content ?? "",
215
- join: async () => {
216
- await mpsc.waitClosed();
217
- return selectContext[choice.index]!.content;
218
- },
219
- });
220
- ctx.dispatch(event);
191
+ const completion = await reduceStreamingWithDispatch(stream, (props) => {
192
+ const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent(props);
193
+ ctx.dispatch(event);
194
+ });
195
+ const allAssistantMessagesEmpty = completion.choices.every(v => v.message.tool_calls == null && v.message.content === "");
196
+ if (allAssistantMessagesEmpty) {
197
+ const firstChoice = completion.choices.at(0);
198
+ if ((firstChoice?.message as { reasoning?: string })?.reasoning != null) {
199
+ throw new AssistantMessageEmptyWithReasoningError((firstChoice?.message as { reasoning?: string })?.reasoning ?? "");
221
200
  }
222
- };
223
- if (acc.object === "chat.completion.chunk") {
224
- registerContext([acc, chunk].flatMap(v => v.choices));
225
- return ChatGptCompletionMessageUtil.merge([acc, chunk]);
201
+ throw new AssistantMessageEmptyError();
226
202
  }
227
- registerContext(chunk.choices);
228
- return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
203
+ return completion;
229
204
  });
230
- const completion = nullableCompletion!;
231
- const emptyAssistantMessages = completion.choices.filter(v => v.message.tool_calls == null && v.message.content === "");
232
- if(emptyAssistantMessages.length > 0) {
233
- emptyAssistantMessages.forEach(v => {
205
+
206
+ if (typeof completion === "symbol") {
234
207
  const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent({
235
- stream: toAsyncGenerator(v.message.content ?? ""),
208
+ stream: toAsyncGenerator(""),
236
209
  done: () => true,
237
- get: () => v.message.content ?? "",
210
+ get: () => "",
238
211
  join: async () => {
239
- return v.message.content ?? "";
212
+ return "";
240
213
  },
241
- });
242
- ctx.dispatch(event);
243
214
  });
215
+ ctx.dispatch(event);
216
+ return;
244
217
  }
245
218
  // ----
246
219
  // VALIDATION
@@ -0,0 +1,13 @@
1
+ export class AssistantMessageEmptyError extends Error {
2
+ constructor() {
3
+ super();
4
+ }
5
+ }
6
+
7
+ export class AssistantMessageEmptyWithReasoningError extends AssistantMessageEmptyError {
8
+ public readonly reasoning: string;
9
+ constructor(reasoning: string) {
10
+ super();
11
+ this.reasoning = reasoning;
12
+ }
13
+ }
@@ -54,6 +54,10 @@ function accumulate(origin: ChatCompletion, chunk: ChatCompletionChunk): ChatCom
54
54
  content: choice.delta.content ?? null,
55
55
  refusal: choice.delta.refusal ?? null,
56
56
  role: "assistant",
57
+ ...({
58
+ // for open router
59
+ reasoning: (choice.delta as { reasoning?: string }).reasoning ?? null,
60
+ }),
57
61
  } satisfies ChatCompletionMessage,
58
62
  };
59
63
  });
@@ -133,6 +137,16 @@ function mergeChoice(acc: ChatCompletion.Choice, cur: ChatCompletionChunk.Choice
133
137
  }
134
138
  }
135
139
 
140
+ // for open router
141
+ if ((cur.delta as { reasoning?: string }).reasoning != null) {
142
+ if ((acc.message as { reasoning?: string }).reasoning == null) {
143
+ (acc.message as { reasoning?: string }).reasoning = (cur.delta as { reasoning?: string }).reasoning;
144
+ }
145
+ else {
146
+ (acc.message as unknown as { reasoning: string }).reasoning += (cur.delta as { reasoning: string }).reasoning;
147
+ }
148
+ }
149
+
136
150
  if (cur.delta.tool_calls != null) {
137
151
  acc.message.tool_calls ??= [];
138
152
  const toolCalls = acc.message.tool_calls;
@@ -0,0 +1,81 @@
1
+ import type { ChatCompletion, ChatCompletionChunk } from "openai/resources";
2
+
3
+ import { ChatGptCompletionMessageUtil, MPSC, streamDefaultReaderToAsyncGenerator, StreamUtil } from ".";
4
+
5
+ async function reduceStreamingWithDispatch(stream: ReadableStream<ChatCompletionChunk>, eventProcessor: (props: {
6
+ stream: AsyncGenerator<string, undefined, undefined>;
7
+ done: () => boolean;
8
+ get: () => string;
9
+ join: () => Promise<string>;
10
+ }) => void) {
11
+ const streamContext = new Map<number, { content: string; mpsc: MPSC<string> }>();
12
+
13
+ const nullableCompletion = await StreamUtil.reduce<ChatCompletionChunk, Promise<ChatCompletion>>(stream, async (accPromise, chunk) => {
14
+ const acc = await accPromise;
15
+
16
+ const registerContext = (
17
+ choices: ChatCompletionChunk.Choice[],
18
+ ) => {
19
+ for (const choice of choices) {
20
+ /**
21
+ * @TODO fix it
22
+ * Sometimes, the complete message arrives along with a finish reason.
23
+ */
24
+ if (choice.finish_reason != null) {
25
+ const context = streamContext.get(choice.index);
26
+ if (context != null) {
27
+ context.mpsc.close();
28
+ }
29
+ continue;
30
+ }
31
+
32
+ if (choice.delta.content == null || choice.delta.content === "") {
33
+ continue;
34
+ }
35
+
36
+ if (streamContext.has(choice.index)) {
37
+ const context = streamContext.get(choice.index)!;
38
+ context.content += choice.delta.content;
39
+ context.mpsc.produce(choice.delta.content);
40
+ continue;
41
+ }
42
+
43
+ const mpsc = new MPSC<string>();
44
+
45
+ streamContext.set(choice.index, {
46
+ content: choice.delta.content,
47
+ mpsc,
48
+ });
49
+ mpsc.produce(choice.delta.content);
50
+
51
+ eventProcessor({
52
+ stream: streamDefaultReaderToAsyncGenerator(mpsc.consumer.getReader()),
53
+ done: () => mpsc.done(),
54
+ get: () => streamContext.get(choice.index)?.content ?? "",
55
+ join: async () => {
56
+ await mpsc.waitClosed();
57
+ return streamContext.get(choice.index)!.content;
58
+ },
59
+ });
60
+ }
61
+ };
62
+ if (acc.object === "chat.completion.chunk") {
63
+ registerContext([acc, chunk].flatMap(v => v.choices));
64
+ return ChatGptCompletionMessageUtil.merge([acc, chunk]);
65
+ }
66
+ registerContext(chunk.choices);
67
+ return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
68
+ });
69
+
70
+ if (nullableCompletion == null) {
71
+ throw new Error(
72
+ "StreamUtil.reduce did not produce a ChatCompletion. Possible causes: the input stream was empty, invalid, or closed prematurely. "
73
+ + "To debug: check that the stream is properly initialized and contains valid ChatCompletionChunk data. "
74
+ + "You may also enable verbose logging upstream to inspect the stream contents. "
75
+ + `Stream locked: ${stream.locked}.`,
76
+ );
77
+ }
78
+ return nullableCompletion;
79
+ }
80
+
81
+ export { reduceStreamingWithDispatch };
@@ -0,0 +1,198 @@
1
+ import { __get_retry } from "./__retry";
2
+
3
+ describe("__get_retry", () => {
4
+ beforeEach(() => {
5
+ vi.clearAllMocks();
6
+ });
7
+
8
+ describe("success cases", () => {
9
+ it("should not retry when successful on first attempt", async () => {
10
+ const mockFn = vi.fn().mockResolvedValue("success");
11
+ const retryFn = __get_retry(3);
12
+
13
+ const result = await retryFn(mockFn);
14
+
15
+ expect(result).toBe("success");
16
+ expect(mockFn).toHaveBeenCalledTimes(1);
17
+ expect(mockFn).toHaveBeenCalledWith(undefined);
18
+ });
19
+
20
+ it("should call exactly 2 times when successful on second attempt", async () => {
21
+ const mockFn = vi.fn()
22
+ .mockRejectedValueOnce(new Error("First failure"))
23
+ .mockResolvedValue("success");
24
+ const retryFn = __get_retry(3);
25
+
26
+ const result = await retryFn(mockFn);
27
+
28
+ expect(result).toBe("success");
29
+ expect(mockFn).toHaveBeenCalledTimes(2);
30
+ expect(mockFn).toHaveBeenNthCalledWith(1, undefined);
31
+ expect(mockFn).toHaveBeenNthCalledWith(2, new Error("First failure"));
32
+ });
33
+
34
+ it("should call limit times when successful on last attempt", async () => {
35
+ const mockFn = vi.fn()
36
+ .mockRejectedValueOnce(new Error("First failure"))
37
+ .mockRejectedValueOnce(new Error("Second failure"))
38
+ .mockResolvedValue("success");
39
+ const retryFn = __get_retry(3);
40
+
41
+ const result = await retryFn(mockFn);
42
+
43
+ expect(result).toBe("success");
44
+ expect(mockFn).toHaveBeenCalledTimes(3);
45
+ expect(mockFn).toHaveBeenNthCalledWith(1, undefined);
46
+ expect(mockFn).toHaveBeenNthCalledWith(2, new Error("First failure"));
47
+ expect(mockFn).toHaveBeenNthCalledWith(3, new Error("Second failure"));
48
+ });
49
+ });
50
+
51
+ describe("failure cases", () => {
52
+ it("should throw last error after limit attempts", async () => {
53
+ const error1 = new Error("First failure");
54
+ const error2 = new Error("Second failure");
55
+ const error3 = new Error("Third failure");
56
+
57
+ const mockFn = vi.fn()
58
+ .mockRejectedValueOnce(error1)
59
+ .mockRejectedValueOnce(error2)
60
+ .mockRejectedValueOnce(error3);
61
+ const retryFn = __get_retry(3);
62
+
63
+ await expect(retryFn(mockFn)).rejects.toThrow("Third failure");
64
+ expect(mockFn).toHaveBeenCalledTimes(3);
65
+ expect(mockFn).toHaveBeenNthCalledWith(1, undefined);
66
+ expect(mockFn).toHaveBeenNthCalledWith(2, error1);
67
+ expect(mockFn).toHaveBeenNthCalledWith(3, error2);
68
+ });
69
+
70
+ it("should throw error immediately when limit is 1", async () => {
71
+ const error = new Error("Immediate failure");
72
+ const mockFn = vi.fn().mockRejectedValue(error);
73
+ const retryFn = __get_retry(1);
74
+
75
+ await expect(retryFn(mockFn)).rejects.toThrow("Immediate failure");
76
+ expect(mockFn).toHaveBeenCalledTimes(1);
77
+ expect(mockFn).toHaveBeenCalledWith(undefined);
78
+ });
79
+ });
80
+
81
+ describe("prevError propagation", () => {
82
+ it("should pass previous error as prevError correctly", async () => {
83
+ const error1 = new Error("First error");
84
+ const error2 = new Error("Second error");
85
+
86
+ const mockFn = vi.fn()
87
+ .mockRejectedValueOnce(error1)
88
+ .mockRejectedValueOnce(error2)
89
+ .mockResolvedValue("success");
90
+ const retryFn = __get_retry(3);
91
+
92
+ const result = await retryFn(mockFn);
93
+
94
+ expect(result).toBe("success");
95
+ expect(mockFn).toHaveBeenCalledTimes(3);
96
+ expect(mockFn).toHaveBeenNthCalledWith(1, undefined);
97
+ expect(mockFn).toHaveBeenNthCalledWith(2, error1);
98
+ expect(mockFn).toHaveBeenNthCalledWith(3, error2);
99
+ });
100
+
101
+ it("should use initial prevError in first call when provided", async () => {
102
+ const initialError = new Error("Initial error");
103
+ const mockFn = vi.fn().mockResolvedValue("success");
104
+ const retryFn = __get_retry(3);
105
+
106
+ const result = await retryFn(mockFn, initialError);
107
+
108
+ expect(result).toBe("success");
109
+ expect(mockFn).toHaveBeenCalledTimes(1);
110
+ expect(mockFn).toHaveBeenCalledWith(initialError);
111
+ });
112
+ });
113
+
114
+ describe("different error types", () => {
115
+ it("should handle string errors correctly", async () => {
116
+ const mockFn = vi.fn()
117
+ .mockRejectedValueOnce("String error")
118
+ .mockResolvedValue("success");
119
+ const retryFn = __get_retry(2);
120
+
121
+ const result = await retryFn(mockFn);
122
+
123
+ expect(result).toBe("success");
124
+ expect(mockFn).toHaveBeenCalledTimes(2);
125
+ expect(mockFn).toHaveBeenNthCalledWith(2, "String error");
126
+ });
127
+
128
+ it("should handle null errors correctly", async () => {
129
+ const mockFn = vi.fn()
130
+ .mockRejectedValueOnce(null)
131
+ .mockResolvedValue("success");
132
+ const retryFn = __get_retry(2);
133
+
134
+ const result = await retryFn(mockFn);
135
+
136
+ expect(result).toBe("success");
137
+ expect(mockFn).toHaveBeenCalledTimes(2);
138
+ expect(mockFn).toHaveBeenNthCalledWith(2, null);
139
+ });
140
+
141
+ it("should handle undefined errors correctly", async () => {
142
+ const mockFn = vi.fn()
143
+ .mockRejectedValueOnce(undefined)
144
+ .mockResolvedValue("success");
145
+ const retryFn = __get_retry(2);
146
+
147
+ const result = await retryFn(mockFn);
148
+
149
+ expect(result).toBe("success");
150
+ expect(mockFn).toHaveBeenCalledTimes(2);
151
+ expect(mockFn).toHaveBeenNthCalledWith(2, undefined);
152
+ });
153
+ });
154
+
155
+ describe("recursive call verification", () => {
156
+ it("should occur recursive calls in correct order", async () => {
157
+ const callOrder: string[] = [];
158
+ const mockFn = vi.fn()
159
+ .mockImplementationOnce(() => {
160
+ callOrder.push("first call");
161
+ throw new Error("First failure");
162
+ })
163
+ .mockImplementationOnce(() => {
164
+ callOrder.push("second call");
165
+ throw new Error("Second failure");
166
+ })
167
+ .mockImplementationOnce(async () => {
168
+ callOrder.push("third call");
169
+ return Promise.resolve("success");
170
+ });
171
+
172
+ const retryFn = __get_retry(3);
173
+ const result = await retryFn(mockFn);
174
+
175
+ expect(result).toBe("success");
176
+ expect(callOrder).toEqual(["first call", "second call", "third call"]);
177
+ expect(mockFn).toHaveBeenCalledTimes(3);
178
+ });
179
+ });
180
+
181
+ describe("type safety", () => {
182
+ it("should handle different return types correctly", async () => {
183
+ const stringFn = vi.fn().mockResolvedValue("string result");
184
+ const numberFn = vi.fn().mockResolvedValue(42);
185
+ const objectFn = vi.fn().mockResolvedValue({ key: "value" });
186
+
187
+ const retryFn = __get_retry(3);
188
+
189
+ const stringResult = await retryFn(stringFn);
190
+ const numberResult = await retryFn(numberFn);
191
+ const objectResult = await retryFn(objectFn);
192
+
193
+ expect(stringResult).toBe("string result");
194
+ expect(numberResult).toBe(42);
195
+ expect(objectResult).toEqual({ key: "value" });
196
+ });
197
+ });
198
+ });
@@ -0,0 +1,18 @@
1
+ /**
2
+ * @internal
3
+ */
4
+ export function __get_retry(limit: number) {
5
+ const retryFn = async <T>(fn: (prevError?: unknown) => Promise<T>, prevError?: unknown, attempt: number = 0): Promise<T> => {
6
+ try {
7
+ return await fn(prevError);
8
+ }
9
+ catch (error) {
10
+ if (attempt >= limit - 1) {
11
+ throw error;
12
+ }
13
+ return retryFn(fn, error, attempt + 1);
14
+ }
15
+ };
16
+
17
+ return retryFn;
18
+ }