@aigne/gemini 0.14.10-beta → 0.14.10-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,89 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.14.10-beta.3](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.10-beta.2...gemini-v0.14.10-beta.3) (2025-11-19)
4
+
5
+
6
+ ### Features
7
+
8
+ * **models:** support gemini 3.x thinking level and thoughtSignature ([#760](https://github.com/AIGNE-io/aigne-framework/issues/760)) ([243f2d4](https://github.com/AIGNE-io/aigne-framework/commit/243f2d457792a20ba2b87378576092e6f88e319c))
9
+
10
+
11
+ ### Dependencies
12
+
13
+ * The following workspace dependencies were updated
14
+ * dependencies
15
+ * @aigne/core bumped to 1.69.0-beta.2
16
+ * devDependencies
17
+ * @aigne/test-utils bumped to 0.5.63-beta.3
18
+
19
+ ## [0.14.10-beta.2](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.10-beta.1...gemini-v0.14.10-beta.2) (2025-11-18)
20
+
21
+
22
+ ### Features
23
+
24
+ * add dynamic model options resolution with getter pattern ([#708](https://github.com/AIGNE-io/aigne-framework/issues/708)) ([5ed5085](https://github.com/AIGNE-io/aigne-framework/commit/5ed5085203763c70194853c56edc13acf56d81c6))
25
+ * add modalities support for chat model ([#454](https://github.com/AIGNE-io/aigne-framework/issues/454)) ([70d1bf6](https://github.com/AIGNE-io/aigne-framework/commit/70d1bf631f4e711235d89c6df8ee210a19179b30))
26
+ * add reasoningEffort option for chat model ([#680](https://github.com/AIGNE-io/aigne-framework/issues/680)) ([f69d232](https://github.com/AIGNE-io/aigne-framework/commit/f69d232d714d4a3e4946bdc8c6598747c9bcbd57))
27
+ * add thinking support to Gemini chat models ([#650](https://github.com/AIGNE-io/aigne-framework/issues/650)) ([09b828b](https://github.com/AIGNE-io/aigne-framework/commit/09b828ba668d90cc6aac68a5e8190adb146b5e45))
28
+ * **cli:** add retry functionality and improve error handling for AIGNE Hub ([#348](https://github.com/AIGNE-io/aigne-framework/issues/348)) ([672c93a](https://github.com/AIGNE-io/aigne-framework/commit/672c93abbba8b4b234f6d810536ff4b603a97e1e))
29
+ * improve image model architecture and file handling ([#527](https://github.com/AIGNE-io/aigne-framework/issues/527)) ([4db50aa](https://github.com/AIGNE-io/aigne-framework/commit/4db50aa0387a1a0f045ca11aaa61613e36ca7597))
30
+ * **memory:** support did space memory adapter ([#229](https://github.com/AIGNE-io/aigne-framework/issues/229)) ([6f69b64](https://github.com/AIGNE-io/aigne-framework/commit/6f69b64e98b963db9d6ab5357306b445385eaa68))
31
+ * **models:** support aigne hub models ([#416](https://github.com/AIGNE-io/aigne-framework/issues/416)) ([b4f014c](https://github.com/AIGNE-io/aigne-framework/commit/b4f014cf5ed08ef930d3ddfc278d3610e64c6af3))
32
+ * **models:** support gemini and ideogram images models ([#412](https://github.com/AIGNE-io/aigne-framework/issues/412)) ([6534fec](https://github.com/AIGNE-io/aigne-framework/commit/6534fecb0bdfb4b0a4440d44c0e563b9a029a68f))
33
+ * **models:** support gemini and ideogram images models ([#412](https://github.com/AIGNE-io/aigne-framework/issues/412)) ([6534fec](https://github.com/AIGNE-io/aigne-framework/commit/6534fecb0bdfb4b0a4440d44c0e563b9a029a68f))
34
+ * **model:** support video model ([#647](https://github.com/AIGNE-io/aigne-framework/issues/647)) ([de81742](https://github.com/AIGNE-io/aigne-framework/commit/de817421ef1dd3246d0d8c51ff12f0a855658f9f))
35
+ * support custom prefer input file type ([#469](https://github.com/AIGNE-io/aigne-framework/issues/469)) ([db0161b](https://github.com/AIGNE-io/aigne-framework/commit/db0161bbac52542c771ee2f40f361636b0668075))
36
+
37
+
38
+ ### Bug Fixes
39
+
40
+ * add prefer input file type option for image model ([#536](https://github.com/AIGNE-io/aigne-framework/issues/536)) ([3cba8a5](https://github.com/AIGNE-io/aigne-framework/commit/3cba8a5562233a1567b49b6dd5c446c0760f5c4c))
41
+ * bump version ([93a1c10](https://github.com/AIGNE-io/aigne-framework/commit/93a1c10cf35f88eaafe91092481f5d087bd5b3a9))
42
+ * **core:** make getCredential async for aigne-hub mount point retrieval ([#372](https://github.com/AIGNE-io/aigne-framework/issues/372)) ([34ce7a6](https://github.com/AIGNE-io/aigne-framework/commit/34ce7a645fa83994d3dfe0f29ca70098cfecac9c))
43
+ * correct calculate token usage for gemini model ([7fd1328](https://github.com/AIGNE-io/aigne-framework/commit/7fd13289d3d0f8e062211f7c6dd5cb56e5318c1b))
44
+ * **docs:** update video mode docs ([#695](https://github.com/AIGNE-io/aigne-framework/issues/695)) ([d691001](https://github.com/AIGNE-io/aigne-framework/commit/d69100169457c16c14f2f3e2f7fcd6b2a99330f3))
45
+ * **gemini:** handle empty responses when files are present ([#648](https://github.com/AIGNE-io/aigne-framework/issues/648)) ([f4e259c](https://github.com/AIGNE-io/aigne-framework/commit/f4e259c5e5c687c347bb5cf29cbb0b5bf4d0d4a1))
46
+ * **gemini:** implement retry mechanism for empty responses with structured output fallback ([#638](https://github.com/AIGNE-io/aigne-framework/issues/638)) ([d33c8bb](https://github.com/AIGNE-io/aigne-framework/commit/d33c8bb9711aadddef9687d6cf472a179cd8ed9c))
47
+ * **gemini:** include thoughts token count in output token usage ([#669](https://github.com/AIGNE-io/aigne-framework/issues/669)) ([f6ff10c](https://github.com/AIGNE-io/aigne-framework/commit/f6ff10c33b0612a0bc416842c5a5bec3850a3fe6))
48
+ * **gemini:** should include at least one user message ([#521](https://github.com/AIGNE-io/aigne-framework/issues/521)) ([eb2752e](https://github.com/AIGNE-io/aigne-framework/commit/eb2752ed7d78f59c435ecc3ccb7227e804e3781e))
49
+ * **gemini:** use StructuredOutputError to trigger retry for missing JSON response ([#660](https://github.com/AIGNE-io/aigne-framework/issues/660)) ([e8826ed](https://github.com/AIGNE-io/aigne-framework/commit/e8826ed96db57bfcce0b577881bf0d2fd828c269))
50
+ * improve image model parameters ([#530](https://github.com/AIGNE-io/aigne-framework/issues/530)) ([d66b5ca](https://github.com/AIGNE-io/aigne-framework/commit/d66b5ca01e14baad2712cc1a84930cdb63703232))
51
+ * **models:** add image parameters support for video generation ([#684](https://github.com/AIGNE-io/aigne-framework/issues/684)) ([b048b7f](https://github.com/AIGNE-io/aigne-framework/commit/b048b7f92bd7a532dbdbeb6fb5fa5499bae6b953))
52
+ * **models:** add imageConfig to gemini image model ([#621](https://github.com/AIGNE-io/aigne-framework/issues/621)) ([252de7a](https://github.com/AIGNE-io/aigne-framework/commit/252de7a10701c4f5302c2fff977c88e5e833b7b1))
53
+ * **models:** add mineType for transform file ([#667](https://github.com/AIGNE-io/aigne-framework/issues/667)) ([155a173](https://github.com/AIGNE-io/aigne-framework/commit/155a173e75aff1dbe870a1305455a4300942e07a))
54
+ * **models:** aigne hub video params ([#665](https://github.com/AIGNE-io/aigne-framework/issues/665)) ([d00f836](https://github.com/AIGNE-io/aigne-framework/commit/d00f8368422d8e3707b974e1aff06714731ebb28))
55
+ * **models:** auto retry when got emtpy response from gemini ([#636](https://github.com/AIGNE-io/aigne-framework/issues/636)) ([9367cef](https://github.com/AIGNE-io/aigne-framework/commit/9367cef49ea4c0c87b8a36b454deb2efaee6886f))
56
+ * **models:** enhance gemini model tool use with status fields ([#634](https://github.com/AIGNE-io/aigne-framework/issues/634)) ([067b175](https://github.com/AIGNE-io/aigne-framework/commit/067b175c8e31bb5b1a6d0fc5a5cfb2d070d8d709))
57
+ * **models:** improve message structure handling and enable auto-message options ([#657](https://github.com/AIGNE-io/aigne-framework/issues/657)) ([233d70c](https://github.com/AIGNE-io/aigne-framework/commit/233d70cb292b937200fada8434f33d957d766ad6))
58
+ * **model:** transform local file to base64 before request llm ([#462](https://github.com/AIGNE-io/aigne-framework/issues/462)) ([58ef5d7](https://github.com/AIGNE-io/aigne-framework/commit/58ef5d77046c49f3c4eed15b7f0cc283cbbcd74a))
59
+ * **model:** updated default video duration settings for AI video models ([#663](https://github.com/AIGNE-io/aigne-framework/issues/663)) ([1203941](https://github.com/AIGNE-io/aigne-framework/commit/12039411aaef77ba665e8edfb0fe6f8097c43e39))
60
+ * should not return local path from aigne hub service ([#460](https://github.com/AIGNE-io/aigne-framework/issues/460)) ([c959717](https://github.com/AIGNE-io/aigne-framework/commit/c95971774f7e84dbeb3313f60b3e6464e2bb22e4))
61
+ * standardize file parameter naming across models ([#534](https://github.com/AIGNE-io/aigne-framework/issues/534)) ([f159a9d](https://github.com/AIGNE-io/aigne-framework/commit/f159a9d6af21ec0e99641996b150560929845845))
62
+ * support gemini-2.0-flash model for image model ([#429](https://github.com/AIGNE-io/aigne-framework/issues/429)) ([5a0bba1](https://github.com/AIGNE-io/aigne-framework/commit/5a0bba197cf8785384b70302f86cf702d04b7fc4))
63
+ * support optional field sturectured output for gemini ([#468](https://github.com/AIGNE-io/aigne-framework/issues/468)) ([70c6279](https://github.com/AIGNE-io/aigne-framework/commit/70c62795039a2862e3333f26707329489bf938de))
64
+ * **transport:** improve HTTP client option handling and error serialization ([#445](https://github.com/AIGNE-io/aigne-framework/issues/445)) ([d3bcdd2](https://github.com/AIGNE-io/aigne-framework/commit/d3bcdd23ab8011a7d40fc157fd61eb240494c7a5))
65
+ * update deps compatibility in CommonJS environment ([#580](https://github.com/AIGNE-io/aigne-framework/issues/580)) ([a1e35d0](https://github.com/AIGNE-io/aigne-framework/commit/a1e35d016405accb51c1aeb6a544503a1c78e912))
66
+
67
+
68
+ ### Dependencies
69
+
70
+ * The following workspace dependencies were updated
71
+ * dependencies
72
+ * @aigne/core bumped to 1.69.0-beta.1
73
+ * devDependencies
74
+ * @aigne/test-utils bumped to 0.5.63-beta.2
75
+
76
+ ## [0.14.10-beta.1](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.10-beta...gemini-v0.14.10-beta.1) (2025-11-18)
77
+
78
+
79
+ ### Dependencies
80
+
81
+ * The following workspace dependencies were updated
82
+ * dependencies
83
+ * @aigne/core bumped to 1.68.3-beta.1
84
+ * devDependencies
85
+ * @aigne/test-utils bumped to 0.5.63-beta.1
86
+
3
87
  ## [0.14.10-beta](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.9...gemini-v0.14.10-beta) (2025-11-17)
4
88
 
5
89
 
@@ -57,10 +57,10 @@ export declare class GeminiChatModel extends ChatModel {
57
57
  modalities?: import("@aigne/core").Modality[] | {
58
58
  $get: string;
59
59
  } | undefined;
60
- preferInputFileType?: "file" | "url" | {
60
+ preferInputFileType?: "url" | "file" | {
61
61
  $get: string;
62
62
  } | undefined;
63
- reasoningEffort?: number | "minimal" | "low" | "medium" | "high" | {
63
+ reasoningEffort?: number | "high" | "low" | "minimal" | "medium" | {
64
64
  $get: string;
65
65
  } | undefined;
66
66
  }> | undefined;
@@ -82,6 +82,12 @@ export declare class GeminiChatModel extends ChatModel {
82
82
  low: number;
83
83
  minimal: number;
84
84
  };
85
+ protected thinkingLevelMap: {
86
+ high: string;
87
+ medium: string;
88
+ low: string;
89
+ minimal: string;
90
+ };
85
91
  protected getThinkingBudget(model: string, effort: ChatModelInputOptions["reasoningEffort"]): {
86
92
  support: boolean;
87
93
  budget?: number;
@@ -92,6 +92,12 @@ class GeminiChatModel extends core_1.ChatModel {
92
92
  low: 5000,
93
93
  minimal: 200,
94
94
  };
95
+ thinkingLevelMap = {
96
+ high: "high",
97
+ medium: "high",
98
+ low: "low",
99
+ minimal: "high",
100
+ };
95
101
  getThinkingBudget(model, effort) {
96
102
  const m = this.thinkingBudgetModelMap.find((i) => i.pattern.test(model));
97
103
  if (!m?.support)
@@ -110,16 +116,29 @@ class GeminiChatModel extends core_1.ChatModel {
110
116
  const model = modelOptions.model || this.credential.model;
111
117
  const { contents, config } = await this.buildContents(input);
112
118
  const thinkingBudget = this.getThinkingBudget(model, modelOptions.reasoningEffort);
119
+ const getThinkingConfig = () => {
120
+ if (thinkingBudget.support) {
121
+ return {
122
+ includeThoughts: true,
123
+ thinkingBudget: thinkingBudget.budget,
124
+ };
125
+ }
126
+ if (model.includes("gemini-3")) {
127
+ const thinkingLevel = typeof modelOptions.reasoningEffort === "string"
128
+ ? this.thinkingLevelMap[modelOptions.reasoningEffort] || this.thinkingLevelMap["high"]
129
+ : this.thinkingLevelMap["high"];
130
+ return {
131
+ includeThoughts: true,
132
+ thinkingLevel: thinkingLevel,
133
+ };
134
+ }
135
+ return undefined;
136
+ };
113
137
  const parameters = {
114
138
  model,
115
139
  contents,
116
140
  config: {
117
- thinkingConfig: thinkingBudget.support
118
- ? {
119
- includeThoughts: true,
120
- thinkingBudget: thinkingBudget.budget,
121
- }
122
- : undefined,
141
+ thinkingConfig: getThinkingConfig(),
123
142
  responseModalities: modelOptions.modalities,
124
143
  temperature: modelOptions.temperature,
125
144
  topP: modelOptions.topP,
@@ -171,14 +190,21 @@ class GeminiChatModel extends core_1.ChatModel {
171
190
  json = part.functionCall.args;
172
191
  }
173
192
  else {
174
- toolCalls.push({
193
+ const toolCall = {
175
194
  id: part.functionCall.id || (0, uuid_1.v7)(),
176
195
  type: "function",
177
196
  function: {
178
197
  name: part.functionCall.name,
179
198
  arguments: part.functionCall.args || {},
180
199
  },
181
- });
200
+ };
201
+ // Preserve thought_signature for 3.x models
202
+ if (part.thoughtSignature && model.includes("gemini-3")) {
203
+ toolCall.metadata = {
204
+ thoughtSignature: part.thoughtSignature,
205
+ };
206
+ }
207
+ toolCalls.push(toolCall);
182
208
  yield { delta: { json: { toolCalls } } };
183
209
  }
184
210
  }
@@ -340,13 +366,20 @@ class GeminiChatModel extends core_1.ChatModel {
340
366
  role: msg.role === "agent" ? "model" : msg.role === "user" ? "user" : undefined,
341
367
  };
342
368
  if (msg.toolCalls) {
343
- content.parts = msg.toolCalls.map((call) => ({
344
- functionCall: {
345
- id: call.id,
346
- name: call.function.name,
347
- args: call.function.arguments,
348
- },
349
- }));
369
+ content.parts = msg.toolCalls.map((call) => {
370
+ const part = {
371
+ functionCall: {
372
+ id: call.id,
373
+ name: call.function.name,
374
+ args: call.function.arguments,
375
+ },
376
+ };
377
+ // Restore thought_signature for 3.x models
378
+ if (call.metadata?.thoughtSignature) {
379
+ part.thoughtSignature = call.metadata.thoughtSignature;
380
+ }
381
+ return part;
382
+ });
350
383
  }
351
384
  else if (msg.toolCallId) {
352
385
  const call = input.messages
@@ -57,10 +57,10 @@ export declare class GeminiChatModel extends ChatModel {
57
57
  modalities?: import("@aigne/core").Modality[] | {
58
58
  $get: string;
59
59
  } | undefined;
60
- preferInputFileType?: "file" | "url" | {
60
+ preferInputFileType?: "url" | "file" | {
61
61
  $get: string;
62
62
  } | undefined;
63
- reasoningEffort?: number | "minimal" | "low" | "medium" | "high" | {
63
+ reasoningEffort?: number | "high" | "low" | "minimal" | "medium" | {
64
64
  $get: string;
65
65
  } | undefined;
66
66
  }> | undefined;
@@ -82,6 +82,12 @@ export declare class GeminiChatModel extends ChatModel {
82
82
  low: number;
83
83
  minimal: number;
84
84
  };
85
+ protected thinkingLevelMap: {
86
+ high: string;
87
+ medium: string;
88
+ low: string;
89
+ minimal: string;
90
+ };
85
91
  protected getThinkingBudget(model: string, effort: ChatModelInputOptions["reasoningEffort"]): {
86
92
  support: boolean;
87
93
  budget?: number;
@@ -57,10 +57,10 @@ export declare class GeminiChatModel extends ChatModel {
57
57
  modalities?: import("@aigne/core").Modality[] | {
58
58
  $get: string;
59
59
  } | undefined;
60
- preferInputFileType?: "file" | "url" | {
60
+ preferInputFileType?: "url" | "file" | {
61
61
  $get: string;
62
62
  } | undefined;
63
- reasoningEffort?: number | "minimal" | "low" | "medium" | "high" | {
63
+ reasoningEffort?: number | "high" | "low" | "minimal" | "medium" | {
64
64
  $get: string;
65
65
  } | undefined;
66
66
  }> | undefined;
@@ -82,6 +82,12 @@ export declare class GeminiChatModel extends ChatModel {
82
82
  low: number;
83
83
  minimal: number;
84
84
  };
85
+ protected thinkingLevelMap: {
86
+ high: string;
87
+ medium: string;
88
+ low: string;
89
+ minimal: string;
90
+ };
85
91
  protected getThinkingBudget(model: string, effort: ChatModelInputOptions["reasoningEffort"]): {
86
92
  support: boolean;
87
93
  budget?: number;
@@ -89,6 +89,12 @@ export class GeminiChatModel extends ChatModel {
89
89
  low: 5000,
90
90
  minimal: 200,
91
91
  };
92
+ thinkingLevelMap = {
93
+ high: "high",
94
+ medium: "high",
95
+ low: "low",
96
+ minimal: "high",
97
+ };
92
98
  getThinkingBudget(model, effort) {
93
99
  const m = this.thinkingBudgetModelMap.find((i) => i.pattern.test(model));
94
100
  if (!m?.support)
@@ -107,16 +113,29 @@ export class GeminiChatModel extends ChatModel {
107
113
  const model = modelOptions.model || this.credential.model;
108
114
  const { contents, config } = await this.buildContents(input);
109
115
  const thinkingBudget = this.getThinkingBudget(model, modelOptions.reasoningEffort);
116
+ const getThinkingConfig = () => {
117
+ if (thinkingBudget.support) {
118
+ return {
119
+ includeThoughts: true,
120
+ thinkingBudget: thinkingBudget.budget,
121
+ };
122
+ }
123
+ if (model.includes("gemini-3")) {
124
+ const thinkingLevel = typeof modelOptions.reasoningEffort === "string"
125
+ ? this.thinkingLevelMap[modelOptions.reasoningEffort] || this.thinkingLevelMap["high"]
126
+ : this.thinkingLevelMap["high"];
127
+ return {
128
+ includeThoughts: true,
129
+ thinkingLevel: thinkingLevel,
130
+ };
131
+ }
132
+ return undefined;
133
+ };
110
134
  const parameters = {
111
135
  model,
112
136
  contents,
113
137
  config: {
114
- thinkingConfig: thinkingBudget.support
115
- ? {
116
- includeThoughts: true,
117
- thinkingBudget: thinkingBudget.budget,
118
- }
119
- : undefined,
138
+ thinkingConfig: getThinkingConfig(),
120
139
  responseModalities: modelOptions.modalities,
121
140
  temperature: modelOptions.temperature,
122
141
  topP: modelOptions.topP,
@@ -168,14 +187,21 @@ export class GeminiChatModel extends ChatModel {
168
187
  json = part.functionCall.args;
169
188
  }
170
189
  else {
171
- toolCalls.push({
190
+ const toolCall = {
172
191
  id: part.functionCall.id || v7(),
173
192
  type: "function",
174
193
  function: {
175
194
  name: part.functionCall.name,
176
195
  arguments: part.functionCall.args || {},
177
196
  },
178
- });
197
+ };
198
+ // Preserve thought_signature for 3.x models
199
+ if (part.thoughtSignature && model.includes("gemini-3")) {
200
+ toolCall.metadata = {
201
+ thoughtSignature: part.thoughtSignature,
202
+ };
203
+ }
204
+ toolCalls.push(toolCall);
179
205
  yield { delta: { json: { toolCalls } } };
180
206
  }
181
207
  }
@@ -337,13 +363,20 @@ export class GeminiChatModel extends ChatModel {
337
363
  role: msg.role === "agent" ? "model" : msg.role === "user" ? "user" : undefined,
338
364
  };
339
365
  if (msg.toolCalls) {
340
- content.parts = msg.toolCalls.map((call) => ({
341
- functionCall: {
342
- id: call.id,
343
- name: call.function.name,
344
- args: call.function.arguments,
345
- },
346
- }));
366
+ content.parts = msg.toolCalls.map((call) => {
367
+ const part = {
368
+ functionCall: {
369
+ id: call.id,
370
+ name: call.function.name,
371
+ args: call.function.arguments,
372
+ },
373
+ };
374
+ // Restore thought_signature for 3.x models
375
+ if (call.metadata?.thoughtSignature) {
376
+ part.thoughtSignature = call.metadata.thoughtSignature;
377
+ }
378
+ return part;
379
+ });
347
380
  }
348
381
  else if (msg.toolCallId) {
349
382
  const call = input.messages
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aigne/gemini",
3
- "version": "0.14.10-beta",
3
+ "version": "0.14.10-beta.3",
4
4
  "description": "AIGNE Gemini SDK for integrating with Google's Gemini AI models",
5
5
  "publishConfig": {
6
6
  "access": "public"
@@ -36,10 +36,10 @@
36
36
  },
37
37
  "dependencies": {
38
38
  "@aigne/uuid": "^13.0.1",
39
- "@google/genai": "^1.24.0",
39
+ "@google/genai": "^1.30.0",
40
40
  "zod": "^3.25.67",
41
41
  "zod-to-json-schema": "^3.24.6",
42
- "@aigne/core": "^1.68.3-beta",
42
+ "@aigne/core": "^1.69.0-beta.2",
43
43
  "@aigne/platform-helpers": "^0.6.4"
44
44
  },
45
45
  "devDependencies": {
@@ -48,7 +48,7 @@
48
48
  "npm-run-all": "^4.1.5",
49
49
  "rimraf": "^6.0.1",
50
50
  "typescript": "^5.9.2",
51
- "@aigne/test-utils": "^0.5.63-beta"
51
+ "@aigne/test-utils": "^0.5.63-beta.3"
52
52
  },
53
53
  "scripts": {
54
54
  "lint": "tsc --noEmit",