@aigne/gemini 0.14.16-beta.9 → 0.14.17-beta

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,293 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.14.17-beta](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16...gemini-v0.14.17-beta) (2026-01-20)
4
+
5
+
6
+ ### Dependencies
7
+
8
+ * The following workspace dependencies were updated
9
+ * dependencies
10
+ * @aigne/core bumped to 1.73.0-beta
11
+ * devDependencies
12
+ * @aigne/test-utils bumped to 0.5.70-beta
13
+
14
+ ## [0.14.16](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.26...gemini-v0.14.16) (2026-01-16)
15
+
16
+
17
+ ### Dependencies
18
+
19
+ * The following workspace dependencies were updated
20
+ * dependencies
21
+ * @aigne/core bumped to 1.72.0
22
+ * @aigne/platform-helpers bumped to 0.6.7
23
+ * devDependencies
24
+ * @aigne/test-utils bumped to 0.5.69
25
+
26
+ ## [0.14.16-beta.26](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.25...gemini-v0.14.16-beta.26) (2026-01-16)
27
+
28
+
29
+ ### Features
30
+
31
+ * add dynamic model options resolution with getter pattern ([#708](https://github.com/AIGNE-io/aigne-framework/issues/708)) ([5ed5085](https://github.com/AIGNE-io/aigne-framework/commit/5ed5085203763c70194853c56edc13acf56d81c6))
32
+ * add modalities support for chat model ([#454](https://github.com/AIGNE-io/aigne-framework/issues/454)) ([70d1bf6](https://github.com/AIGNE-io/aigne-framework/commit/70d1bf631f4e711235d89c6df8ee210a19179b30))
33
+ * add prompt caching for OpenAI/Gemini/Anthropic and cache token display ([#838](https://github.com/AIGNE-io/aigne-framework/issues/838)) ([46c628f](https://github.com/AIGNE-io/aigne-framework/commit/46c628f180572ea1b955d1a9888aad6145204842))
34
+ * add reasoningEffort option for chat model ([#680](https://github.com/AIGNE-io/aigne-framework/issues/680)) ([f69d232](https://github.com/AIGNE-io/aigne-framework/commit/f69d232d714d4a3e4946bdc8c6598747c9bcbd57))
35
+ * add thinking support to Gemini chat models ([#650](https://github.com/AIGNE-io/aigne-framework/issues/650)) ([09b828b](https://github.com/AIGNE-io/aigne-framework/commit/09b828ba668d90cc6aac68a5e8190adb146b5e45))
36
+ * **core:** add nested getter pattern support for model options ([#796](https://github.com/AIGNE-io/aigne-framework/issues/796)) ([824b2fe](https://github.com/AIGNE-io/aigne-framework/commit/824b2fe55cb2a24620e2bb73b470532918fa2996))
37
+ * improve image model architecture and file handling ([#527](https://github.com/AIGNE-io/aigne-framework/issues/527)) ([4db50aa](https://github.com/AIGNE-io/aigne-framework/commit/4db50aa0387a1a0f045ca11aaa61613e36ca7597))
38
+ * **models:** support gemini 3.x thinking level and thoughtSignature ([#760](https://github.com/AIGNE-io/aigne-framework/issues/760)) ([243f2d4](https://github.com/AIGNE-io/aigne-framework/commit/243f2d457792a20ba2b87378576092e6f88e319c))
39
+ * **model:** support video model ([#647](https://github.com/AIGNE-io/aigne-framework/issues/647)) ([de81742](https://github.com/AIGNE-io/aigne-framework/commit/de817421ef1dd3246d0d8c51ff12f0a855658f9f))
40
+ * support custom prefer input file type ([#469](https://github.com/AIGNE-io/aigne-framework/issues/469)) ([db0161b](https://github.com/AIGNE-io/aigne-framework/commit/db0161bbac52542c771ee2f40f361636b0668075))
41
+ * support define agent by third library & orchestrator agent refactor ([#799](https://github.com/AIGNE-io/aigne-framework/issues/799)) ([7264b11](https://github.com/AIGNE-io/aigne-framework/commit/7264b11ab6eed787e928367f09aa08d254968d40))
42
+
43
+
44
+ ### Bug Fixes
45
+
46
+ * add prefer input file type option for image model ([#536](https://github.com/AIGNE-io/aigne-framework/issues/536)) ([3cba8a5](https://github.com/AIGNE-io/aigne-framework/commit/3cba8a5562233a1567b49b6dd5c446c0760f5c4c))
47
+ * bump version ([696560f](https://github.com/AIGNE-io/aigne-framework/commit/696560fa2673eddcb4d00ac0523fbbbde7273cb3))
48
+ * bump version ([70d217c](https://github.com/AIGNE-io/aigne-framework/commit/70d217c8360dd0dda7f5f17011c4e92ec836e801))
49
+ * bump version ([af04b69](https://github.com/AIGNE-io/aigne-framework/commit/af04b6931951afa35d52065430acc7fef4b10087))
50
+ * bump version ([ba7ad18](https://github.com/AIGNE-io/aigne-framework/commit/ba7ad184fcf32b49bf0507a3cb638d20fb00690d))
51
+ * bump version ([93a1c10](https://github.com/AIGNE-io/aigne-framework/commit/93a1c10cf35f88eaafe91092481f5d087bd5b3a9))
52
+ * **core:** preserve Agent Skill in session compact and support complex tool result content ([#876](https://github.com/AIGNE-io/aigne-framework/issues/876)) ([edb86ae](https://github.com/AIGNE-io/aigne-framework/commit/edb86ae2b9cfe56a8f08b276f843606e310566cf))
53
+ * **core:** simplify token-estimator logic for remaining characters ([45d43cc](https://github.com/AIGNE-io/aigne-framework/commit/45d43ccd3afd636cfb459eea2e6551e8f9c53765))
54
+ * correct calculate token usage for gemini model ([7fd1328](https://github.com/AIGNE-io/aigne-framework/commit/7fd13289d3d0f8e062211f7c6dd5cb56e5318c1b))
55
+ * correct run example & doc improvements ([#707](https://github.com/AIGNE-io/aigne-framework/issues/707)) ([f98fc5d](https://github.com/AIGNE-io/aigne-framework/commit/f98fc5df28fd6ce6134128c2f0e5395c1554b740))
56
+ * **docs:** update video mode docs ([#695](https://github.com/AIGNE-io/aigne-framework/issues/695)) ([d691001](https://github.com/AIGNE-io/aigne-framework/commit/d69100169457c16c14f2f3e2f7fcd6b2a99330f3))
57
+ * **gemini:** handle empty responses when files are present ([#648](https://github.com/AIGNE-io/aigne-framework/issues/648)) ([f4e259c](https://github.com/AIGNE-io/aigne-framework/commit/f4e259c5e5c687c347bb5cf29cbb0b5bf4d0d4a1))
58
+ * **gemini:** implement retry mechanism for empty responses with structured output fallback ([#638](https://github.com/AIGNE-io/aigne-framework/issues/638)) ([d33c8bb](https://github.com/AIGNE-io/aigne-framework/commit/d33c8bb9711aadddef9687d6cf472a179cd8ed9c))
59
+ * **gemini:** include thoughts token count in output token usage ([#669](https://github.com/AIGNE-io/aigne-framework/issues/669)) ([f6ff10c](https://github.com/AIGNE-io/aigne-framework/commit/f6ff10c33b0612a0bc416842c5a5bec3850a3fe6))
60
+ * **gemini:** properly handle thinking level for gemini 3.x models ([#763](https://github.com/AIGNE-io/aigne-framework/issues/763)) ([a5dc892](https://github.com/AIGNE-io/aigne-framework/commit/a5dc8921635811ed9ca2ff9e3e0699006f79cf22))
61
+ * **gemini:** return reasoningEffort in model options for gemini-3 ([#765](https://github.com/AIGNE-io/aigne-framework/issues/765)) ([682bfda](https://github.com/AIGNE-io/aigne-framework/commit/682bfda353b31fd432232baa57f8e0b0838eb76d))
62
+ * **gemini:** should include at least one user message ([#521](https://github.com/AIGNE-io/aigne-framework/issues/521)) ([eb2752e](https://github.com/AIGNE-io/aigne-framework/commit/eb2752ed7d78f59c435ecc3ccb7227e804e3781e))
63
+ * **gemini:** use StructuredOutputError to trigger retry for missing JSON response ([#660](https://github.com/AIGNE-io/aigne-framework/issues/660)) ([e8826ed](https://github.com/AIGNE-io/aigne-framework/commit/e8826ed96db57bfcce0b577881bf0d2fd828c269))
64
+ * improve image model parameters ([#530](https://github.com/AIGNE-io/aigne-framework/issues/530)) ([d66b5ca](https://github.com/AIGNE-io/aigne-framework/commit/d66b5ca01e14baad2712cc1a84930cdb63703232))
65
+ * improve test coverage tracking and reporting ([#903](https://github.com/AIGNE-io/aigne-framework/issues/903)) ([031144e](https://github.com/AIGNE-io/aigne-framework/commit/031144e74f29e882cffe52ffda8f7a18c76ace7f))
66
+ * **model:** handle large video files by uploading to Files API ([#769](https://github.com/AIGNE-io/aigne-framework/issues/769)) ([5fd7661](https://github.com/AIGNE-io/aigne-framework/commit/5fd76613bd7301cc76bde933de2095a6d86f8c7e))
67
+ * **models:** add image parameters support for video generation ([#684](https://github.com/AIGNE-io/aigne-framework/issues/684)) ([b048b7f](https://github.com/AIGNE-io/aigne-framework/commit/b048b7f92bd7a532dbdbeb6fb5fa5499bae6b953))
68
+ * **models:** add imageConfig to gemini image model ([#621](https://github.com/AIGNE-io/aigne-framework/issues/621)) ([252de7a](https://github.com/AIGNE-io/aigne-framework/commit/252de7a10701c4f5302c2fff977c88e5e833b7b1))
69
+ * **models:** add mineType for transform file ([#667](https://github.com/AIGNE-io/aigne-framework/issues/667)) ([155a173](https://github.com/AIGNE-io/aigne-framework/commit/155a173e75aff1dbe870a1305455a4300942e07a))
70
+ * **models:** aigne hub video params ([#665](https://github.com/AIGNE-io/aigne-framework/issues/665)) ([d00f836](https://github.com/AIGNE-io/aigne-framework/commit/d00f8368422d8e3707b974e1aff06714731ebb28))
71
+ * **models:** auto retry when got emtpy response from gemini ([#636](https://github.com/AIGNE-io/aigne-framework/issues/636)) ([9367cef](https://github.com/AIGNE-io/aigne-framework/commit/9367cef49ea4c0c87b8a36b454deb2efaee6886f))
72
+ * **models:** enhance gemini model tool use with status fields ([#634](https://github.com/AIGNE-io/aigne-framework/issues/634)) ([067b175](https://github.com/AIGNE-io/aigne-framework/commit/067b175c8e31bb5b1a6d0fc5a5cfb2d070d8d709))
73
+ * **models:** improve message structure handling and enable auto-message options ([#657](https://github.com/AIGNE-io/aigne-framework/issues/657)) ([233d70c](https://github.com/AIGNE-io/aigne-framework/commit/233d70cb292b937200fada8434f33d957d766ad6))
74
+ * **models:** parallel tool calls for gemini model ([#844](https://github.com/AIGNE-io/aigne-framework/issues/844)) ([adfae33](https://github.com/AIGNE-io/aigne-framework/commit/adfae337709295b594a8f5da61213535d2ef61aa))
75
+ * **model:** transform local file to base64 before request llm ([#462](https://github.com/AIGNE-io/aigne-framework/issues/462)) ([58ef5d7](https://github.com/AIGNE-io/aigne-framework/commit/58ef5d77046c49f3c4eed15b7f0cc283cbbcd74a))
76
+ * **model:** updated default video duration settings for AI video models ([#663](https://github.com/AIGNE-io/aigne-framework/issues/663)) ([1203941](https://github.com/AIGNE-io/aigne-framework/commit/12039411aaef77ba665e8edfb0fe6f8097c43e39))
77
+ * should not return local path from aigne hub service ([#460](https://github.com/AIGNE-io/aigne-framework/issues/460)) ([c959717](https://github.com/AIGNE-io/aigne-framework/commit/c95971774f7e84dbeb3313f60b3e6464e2bb22e4))
78
+ * standardize file parameter naming across models ([#534](https://github.com/AIGNE-io/aigne-framework/issues/534)) ([f159a9d](https://github.com/AIGNE-io/aigne-framework/commit/f159a9d6af21ec0e99641996b150560929845845))
79
+ * support gemini-2.0-flash model for image model ([#429](https://github.com/AIGNE-io/aigne-framework/issues/429)) ([5a0bba1](https://github.com/AIGNE-io/aigne-framework/commit/5a0bba197cf8785384b70302f86cf702d04b7fc4))
80
+ * support optional field sturectured output for gemini ([#468](https://github.com/AIGNE-io/aigne-framework/issues/468)) ([70c6279](https://github.com/AIGNE-io/aigne-framework/commit/70c62795039a2862e3333f26707329489bf938de))
81
+ * **transport:** improve HTTP client option handling and error serialization ([#445](https://github.com/AIGNE-io/aigne-framework/issues/445)) ([d3bcdd2](https://github.com/AIGNE-io/aigne-framework/commit/d3bcdd23ab8011a7d40fc157fd61eb240494c7a5))
82
+ * update deps compatibility in CommonJS environment ([#580](https://github.com/AIGNE-io/aigne-framework/issues/580)) ([a1e35d0](https://github.com/AIGNE-io/aigne-framework/commit/a1e35d016405accb51c1aeb6a544503a1c78e912))
83
+
84
+
85
+ ### Dependencies
86
+
87
+ * The following workspace dependencies were updated
88
+ * dependencies
89
+ * @aigne/core bumped to 1.72.0-beta.25
90
+ * devDependencies
91
+ * @aigne/test-utils bumped to 0.5.69-beta.25
92
+
93
+ ## [0.14.16-beta.25](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.24...gemini-v0.14.16-beta.25) (2026-01-16)
94
+
95
+
96
+ ### Dependencies
97
+
98
+ * The following workspace dependencies were updated
99
+ * dependencies
100
+ * @aigne/core bumped to 1.72.0-beta.24
101
+ * devDependencies
102
+ * @aigne/test-utils bumped to 0.5.69-beta.24
103
+
104
+ ## [0.14.16-beta.24](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.23...gemini-v0.14.16-beta.24) (2026-01-15)
105
+
106
+
107
+ ### Dependencies
108
+
109
+ * The following workspace dependencies were updated
110
+ * dependencies
111
+ * @aigne/core bumped to 1.72.0-beta.23
112
+ * devDependencies
113
+ * @aigne/test-utils bumped to 0.5.69-beta.23
114
+
115
+ ## [0.14.16-beta.23](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.22...gemini-v0.14.16-beta.23) (2026-01-15)
116
+
117
+
118
+ ### Dependencies
119
+
120
+ * The following workspace dependencies were updated
121
+ * dependencies
122
+ * @aigne/core bumped to 1.72.0-beta.22
123
+ * devDependencies
124
+ * @aigne/test-utils bumped to 0.5.69-beta.22
125
+
126
+ ## [0.14.16-beta.22](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.21...gemini-v0.14.16-beta.22) (2026-01-15)
127
+
128
+
129
+ ### Dependencies
130
+
131
+ * The following workspace dependencies were updated
132
+ * dependencies
133
+ * @aigne/core bumped to 1.72.0-beta.21
134
+ * devDependencies
135
+ * @aigne/test-utils bumped to 0.5.69-beta.21
136
+
137
+ ## [0.14.16-beta.21](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.20...gemini-v0.14.16-beta.21) (2026-01-15)
138
+
139
+
140
+ ### Dependencies
141
+
142
+ * The following workspace dependencies were updated
143
+ * dependencies
144
+ * @aigne/core bumped to 1.72.0-beta.20
145
+ * devDependencies
146
+ * @aigne/test-utils bumped to 0.5.69-beta.20
147
+
148
+ ## [0.14.16-beta.20](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.19...gemini-v0.14.16-beta.20) (2026-01-14)
149
+
150
+
151
+ ### Bug Fixes
152
+
153
+ * improve test coverage tracking and reporting ([#903](https://github.com/AIGNE-io/aigne-framework/issues/903)) ([031144e](https://github.com/AIGNE-io/aigne-framework/commit/031144e74f29e882cffe52ffda8f7a18c76ace7f))
154
+
155
+
156
+ ### Dependencies
157
+
158
+ * The following workspace dependencies were updated
159
+ * dependencies
160
+ * @aigne/core bumped to 1.72.0-beta.19
161
+ * @aigne/platform-helpers bumped to 0.6.7-beta.2
162
+ * devDependencies
163
+ * @aigne/test-utils bumped to 0.5.69-beta.19
164
+
165
+ ## [0.14.16-beta.19](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.18...gemini-v0.14.16-beta.19) (2026-01-13)
166
+
167
+
168
+ ### Dependencies
169
+
170
+ * The following workspace dependencies were updated
171
+ * dependencies
172
+ * @aigne/core bumped to 1.72.0-beta.18
173
+ * devDependencies
174
+ * @aigne/test-utils bumped to 0.5.69-beta.18
175
+
176
+ ## [0.14.16-beta.18](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.17...gemini-v0.14.16-beta.18) (2026-01-12)
177
+
178
+
179
+ ### Dependencies
180
+
181
+ * The following workspace dependencies were updated
182
+ * dependencies
183
+ * @aigne/core bumped to 1.72.0-beta.17
184
+ * devDependencies
185
+ * @aigne/test-utils bumped to 0.5.69-beta.17
186
+
187
+ ## [0.14.16-beta.17](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.16...gemini-v0.14.16-beta.17) (2026-01-12)
188
+
189
+
190
+ ### Dependencies
191
+
192
+ * The following workspace dependencies were updated
193
+ * dependencies
194
+ * @aigne/core bumped to 1.72.0-beta.16
195
+ * devDependencies
196
+ * @aigne/test-utils bumped to 0.5.69-beta.16
197
+
198
+ ## [0.14.16-beta.16](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.15...gemini-v0.14.16-beta.16) (2026-01-10)
199
+
200
+
201
+ ### Bug Fixes
202
+
203
+ * **core:** simplify token-estimator logic for remaining characters ([45d43cc](https://github.com/AIGNE-io/aigne-framework/commit/45d43ccd3afd636cfb459eea2e6551e8f9c53765))
204
+
205
+
206
+ ### Dependencies
207
+
208
+ * The following workspace dependencies were updated
209
+ * dependencies
210
+ * @aigne/core bumped to 1.72.0-beta.15
211
+ * devDependencies
212
+ * @aigne/test-utils bumped to 0.5.69-beta.15
213
+
214
+ ## [0.14.16-beta.15](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.14...gemini-v0.14.16-beta.15) (2026-01-09)
215
+
216
+
217
+ ### Dependencies
218
+
219
+ * The following workspace dependencies were updated
220
+ * dependencies
221
+ * @aigne/core bumped to 1.72.0-beta.14
222
+ * devDependencies
223
+ * @aigne/test-utils bumped to 0.5.69-beta.14
224
+
225
+ ## [0.14.16-beta.14](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.13...gemini-v0.14.16-beta.14) (2026-01-08)
226
+
227
+
228
+ ### Bug Fixes
229
+
230
+ * bump version ([696560f](https://github.com/AIGNE-io/aigne-framework/commit/696560fa2673eddcb4d00ac0523fbbbde7273cb3))
231
+
232
+
233
+ ### Dependencies
234
+
235
+ * The following workspace dependencies were updated
236
+ * dependencies
237
+ * @aigne/core bumped to 1.72.0-beta.13
238
+ * @aigne/platform-helpers bumped to 0.6.7-beta.1
239
+ * devDependencies
240
+ * @aigne/test-utils bumped to 0.5.69-beta.13
241
+
242
+ ## [0.14.16-beta.13](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.12...gemini-v0.14.16-beta.13) (2026-01-07)
243
+
244
+
245
+ ### Dependencies
246
+
247
+ * The following workspace dependencies were updated
248
+ * dependencies
249
+ * @aigne/core bumped to 1.72.0-beta.12
250
+ * devDependencies
251
+ * @aigne/test-utils bumped to 0.5.69-beta.12
252
+
253
+ ## [0.14.16-beta.12](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.11...gemini-v0.14.16-beta.12) (2026-01-06)
254
+
255
+
256
+ ### Bug Fixes
257
+
258
+ * **core:** preserve Agent Skill in session compact and support complex tool result content ([#876](https://github.com/AIGNE-io/aigne-framework/issues/876)) ([edb86ae](https://github.com/AIGNE-io/aigne-framework/commit/edb86ae2b9cfe56a8f08b276f843606e310566cf))
259
+
260
+
261
+ ### Dependencies
262
+
263
+ * The following workspace dependencies were updated
264
+ * dependencies
265
+ * @aigne/core bumped to 1.72.0-beta.11
266
+ * devDependencies
267
+ * @aigne/test-utils bumped to 0.5.69-beta.11
268
+
269
+ ## [0.14.16-beta.11](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.10...gemini-v0.14.16-beta.11) (2026-01-06)
270
+
271
+
272
+ ### Dependencies
273
+
274
+ * The following workspace dependencies were updated
275
+ * dependencies
276
+ * @aigne/core bumped to 1.72.0-beta.10
277
+ * devDependencies
278
+ * @aigne/test-utils bumped to 0.5.69-beta.10
279
+
280
+ ## [0.14.16-beta.10](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.9...gemini-v0.14.16-beta.10) (2026-01-02)
281
+
282
+
283
+ ### Dependencies
284
+
285
+ * The following workspace dependencies were updated
286
+ * dependencies
287
+ * @aigne/core bumped to 1.72.0-beta.9
288
+ * devDependencies
289
+ * @aigne/test-utils bumped to 0.5.69-beta.9
290
+
3
291
  ## [0.14.16-beta.9](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.16-beta.8...gemini-v0.14.16-beta.9) (2025-12-31)
4
292
 
5
293
 
@@ -67,6 +67,8 @@ export declare class GeminiChatModel extends ChatModel {
67
67
  $get: string;
68
68
  } | undefined;
69
69
  }> | undefined;
70
+ countTokens(input: ChatModelInput): Promise<number>;
71
+ private contentUnionToContent;
70
72
  process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
71
73
  protected thinkingBudgetModelMap: ({
72
74
  pattern: RegExp;
@@ -104,10 +106,12 @@ export declare class GeminiChatModel extends ChatModel {
104
106
  budget?: number;
105
107
  level?: ThinkingLevel;
106
108
  };
109
+ private getParameters;
107
110
  private processInput;
108
111
  private buildConfig;
109
112
  private buildTools;
110
113
  private buildVideoContentParts;
111
114
  private buildContents;
115
+ private contentToParts;
112
116
  private ensureMessagesHasUserMessage;
113
117
  }
@@ -61,6 +61,40 @@ class GeminiChatModel extends core_1.ChatModel {
61
61
  get modelOptions() {
62
62
  return this.options?.modelOptions;
63
63
  }
64
+ async countTokens(input) {
65
+ const { model, ...request } = await this.getParameters(input);
66
+ const contents = [];
67
+ const { systemInstruction, tools } = request.config ?? {};
68
+ if (systemInstruction)
69
+ contents.push(this.contentUnionToContent(systemInstruction));
70
+ if (tools?.length)
71
+ contents.push({ role: "system", parts: [{ text: JSON.stringify(tools) }] });
72
+ contents.push(...[request.contents].flat().map(this.contentUnionToContent));
73
+ const tokens = (await this.googleClient.models.countTokens({
74
+ model,
75
+ contents,
76
+ })).totalTokens;
77
+ if (!(0, type_utils_js_1.isNil)(tokens))
78
+ return tokens;
79
+ return super.countTokens(input);
80
+ }
81
+ contentUnionToContent(content) {
82
+ if (typeof content === "object" && "parts" in content) {
83
+ return { role: "system", parts: content.parts };
84
+ }
85
+ else if (typeof content === "string") {
86
+ return { role: "system", parts: [{ text: content }] };
87
+ }
88
+ else if (Array.isArray(content)) {
89
+ return {
90
+ role: "system",
91
+ parts: content.map((i) => (typeof i === "string" ? { text: i } : i)),
92
+ };
93
+ }
94
+ else {
95
+ return { role: "system", parts: [content] };
96
+ }
97
+ }
64
98
  process(input, options) {
65
99
  return this.processInput(input, options);
66
100
  }
@@ -135,10 +169,10 @@ class GeminiChatModel extends core_1.ChatModel {
135
169
  budget = Math.min(m.max, budget);
136
170
  return { support: true, budget };
137
171
  }
138
- async *processInput(input, options) {
172
+ async getParameters(input) {
139
173
  const { modelOptions = {} } = input;
140
174
  const model = modelOptions.model || this.credential.model;
141
- const { contents, config } = await this.buildContents(input, options);
175
+ const { contents, config } = await this.buildContents(input);
142
176
  const thinkingBudget = this.getThinkingBudget(model, modelOptions.reasoningEffort);
143
177
  const parameters = {
144
178
  model,
@@ -160,6 +194,10 @@ class GeminiChatModel extends core_1.ChatModel {
160
194
  ...(await this.buildConfig(input)),
161
195
  },
162
196
  };
197
+ return parameters;
198
+ }
199
+ async *processInput(input, options) {
200
+ const parameters = await this.getParameters(input);
163
201
  const response = await this.googleClient.models.generateContentStream(parameters);
164
202
  let usage = {
165
203
  inputTokens: 0,
@@ -211,7 +249,7 @@ class GeminiChatModel extends core_1.ChatModel {
211
249
  },
212
250
  };
213
251
  // Preserve thought_signature for 3.x models
214
- if (part.thoughtSignature && model.includes("gemini-3")) {
252
+ if (part.thoughtSignature && parameters.model.includes("gemini-3")) {
215
253
  toolCall.metadata = {
216
254
  thoughtSignature: part.thoughtSignature,
217
255
  };
@@ -362,8 +400,8 @@ class GeminiChatModel extends core_1.ChatModel {
362
400
  };
363
401
  return { tools, toolConfig: { functionCallingConfig } };
364
402
  }
365
- async buildVideoContentParts(media, options) {
366
- const { path: filePath, mimeType: fileMimeType } = await this.transformFileType("local", media, options);
403
+ async buildVideoContentParts(media) {
404
+ const { path: filePath, mimeType: fileMimeType } = await this.transformFileType("local", media);
367
405
  if (filePath) {
368
406
  const stats = await index_js_1.nodejs.fs.stat(filePath);
369
407
  const fileSizeInBytes = stats.size;
@@ -394,7 +432,7 @@ class GeminiChatModel extends core_1.ChatModel {
394
432
  }
395
433
  }
396
434
  }
397
- async buildContents(input, options) {
435
+ async buildContents(input) {
398
436
  const result = {
399
437
  contents: [],
400
438
  };
@@ -438,55 +476,46 @@ class GeminiChatModel extends core_1.ChatModel {
438
476
  .find((c) => c?.id === msg.toolCallId);
439
477
  if (!call)
440
478
  throw new Error(`Tool call not found: ${msg.toolCallId}`);
441
- const output = (0, yaml_1.parse)(msg.content);
442
- const isError = "error" in output && Boolean(input.error);
443
- const response = {
444
- tool: call.function.name,
479
+ if (!msg.content)
480
+ throw new Error("Tool call must have content");
481
+ // parse tool result as a record
482
+ let toolResult;
483
+ {
484
+ let text;
485
+ if (typeof msg.content === "string")
486
+ text = msg.content;
487
+ else if (msg.content?.length === 1) {
488
+ const first = msg.content[0];
489
+ if (first?.type === "text")
490
+ text = first.text;
491
+ }
492
+ if (text) {
493
+ try {
494
+ const obj = (0, yaml_1.parse)(text);
495
+ if ((0, type_utils_js_1.isRecord)(obj))
496
+ toolResult = obj;
497
+ }
498
+ catch {
499
+ // ignore
500
+ }
501
+ if (!toolResult)
502
+ toolResult = { result: text };
503
+ }
504
+ }
505
+ const functionResponse = {
506
+ id: msg.toolCallId,
507
+ name: call.function.name,
445
508
  };
446
- // NOTE: base on the documentation of gemini api, the content should include `output` field for successful result or `error` field for failed result,
447
- // and base on the actual test, add a tool field presenting the tool name can improve the LLM understanding that which tool is called.
448
- if (isError) {
449
- Object.assign(response, { status: "error" }, output);
509
+ if (toolResult) {
510
+ functionResponse.response = toolResult;
450
511
  }
451
512
  else {
452
- Object.assign(response, { status: "success" });
453
- if ("output" in output) {
454
- Object.assign(response, output);
455
- }
456
- else {
457
- Object.assign(response, { output });
458
- }
513
+ functionResponse.parts = await this.contentToParts(msg.content);
459
514
  }
460
- content.parts = [
461
- {
462
- functionResponse: {
463
- id: msg.toolCallId,
464
- name: call.function.name,
465
- response,
466
- },
467
- },
468
- ];
515
+ content.parts = [{ functionResponse }];
469
516
  }
470
- else if (typeof msg.content === "string") {
471
- content.parts = [{ text: msg.content }];
472
- }
473
- else if (Array.isArray(msg.content)) {
474
- content.parts = await Promise.all(msg.content.map(async (item) => {
475
- switch (item.type) {
476
- case "text":
477
- return { text: item.text };
478
- case "url":
479
- return { fileData: { fileUri: item.url, mimeType: item.mimeType } };
480
- case "file": {
481
- const part = await this.buildVideoContentParts(item, options);
482
- if (part)
483
- return part;
484
- return { inlineData: { data: item.data, mimeType: item.mimeType } };
485
- }
486
- case "local":
487
- throw new Error(`Unsupported local file: ${item.path}, it should be converted to base64 at ChatModel`);
488
- }
489
- }));
517
+ else if (msg.content) {
518
+ content.parts = await this.contentToParts(msg.content);
490
519
  }
491
520
  return content;
492
521
  }))).filter(type_utils_js_1.isNonNullable);
@@ -497,6 +526,26 @@ class GeminiChatModel extends core_1.ChatModel {
497
526
  }
498
527
  return result;
499
528
  }
529
+ async contentToParts(content) {
530
+ if (typeof content === "string")
531
+ return [{ text: content }];
532
+ return Promise.all(content.map(async (item) => {
533
+ switch (item.type) {
534
+ case "text":
535
+ return { text: item.text };
536
+ case "url":
537
+ return { fileData: { fileUri: item.url, mimeType: item.mimeType } };
538
+ case "file": {
539
+ const part = await this.buildVideoContentParts(item);
540
+ if (part)
541
+ return part;
542
+ return { inlineData: { data: item.data, mimeType: item.mimeType } };
543
+ }
544
+ case "local":
545
+ throw new Error(`Unsupported local file: ${item.path}, it should be converted to base64 at ChatModel`);
546
+ }
547
+ }));
548
+ }
500
549
  ensureMessagesHasUserMessage(systems, contents) {
501
550
  // no messages but system messages
502
551
  if (!contents.length && systems.length) {
@@ -28,7 +28,7 @@ export declare class GeminiImageModel extends ImageModel<GeminiImageModelInput,
28
28
  * @param input The input to process
29
29
  * @returns The generated response
30
30
  */
31
- process(input: GeminiImageModelInput, options: AgentInvokeOptions): Promise<ImageModelOutput>;
31
+ process(input: GeminiImageModelInput, _options: AgentInvokeOptions): Promise<ImageModelOutput>;
32
32
  private generateImageByImagenModel;
33
33
  private generateImageByGeminiModel;
34
34
  }
@@ -52,7 +52,7 @@ class GeminiImageModel extends core_1.ImageModel {
52
52
  * @param input The input to process
53
53
  * @returns The generated response
54
54
  */
55
- async process(input, options) {
55
+ async process(input, _options) {
56
56
  const model = input.modelOptions?.model || this.credential.model;
57
57
  const responseFormat = input.responseFormat || "base64";
58
58
  if (responseFormat === "url") {
@@ -61,7 +61,7 @@ class GeminiImageModel extends core_1.ImageModel {
61
61
  if (model.includes("imagen")) {
62
62
  return this.generateImageByImagenModel(input);
63
63
  }
64
- return this.generateImageByGeminiModel(input, options);
64
+ return this.generateImageByGeminiModel(input);
65
65
  }
66
66
  async generateImageByImagenModel(input) {
67
67
  const model = input.modelOptions?.model || this.credential.model;
@@ -100,7 +100,7 @@ class GeminiImageModel extends core_1.ImageModel {
100
100
  model,
101
101
  };
102
102
  }
103
- async generateImageByGeminiModel(input, options) {
103
+ async generateImageByGeminiModel(input) {
104
104
  const model = input.modelOptions?.model || this.credential.model;
105
105
  const mergedInput = { ...this.modelOptions, ...input.modelOptions, ...input };
106
106
  const inputKeys = [
@@ -135,7 +135,7 @@ class GeminiImageModel extends core_1.ImageModel {
135
135
  "imageConfig",
136
136
  ];
137
137
  const images = await Promise.all((0, type_utils_js_1.flat)(input.image).map(async (image) => {
138
- const { data, mimeType } = await this.transformFileType("file", image, options);
138
+ const { data, mimeType } = await this.transformFileType("file", image);
139
139
  return { inlineData: { data, mimeType } };
140
140
  }));
141
141
  const response = await this.client.models.generateContent({
@@ -88,7 +88,7 @@ class GeminiVideoModel extends core_1.VideoModel {
88
88
  if (mergedInput.personGeneration)
89
89
  config.personGeneration = mergedInput.personGeneration;
90
90
  if (mergedInput.lastFrame) {
91
- config.lastFrame = await this.transformFileType("file", mergedInput.lastFrame, options).then((file) => {
91
+ config.lastFrame = await this.transformFileType("file", mergedInput.lastFrame).then((file) => {
92
92
  return {
93
93
  imageBytes: file.data,
94
94
  mimeType: file.mimeType,
@@ -97,7 +97,7 @@ class GeminiVideoModel extends core_1.VideoModel {
97
97
  }
98
98
  if (mergedInput.referenceImages) {
99
99
  config.referenceImages = await Promise.all(mergedInput.referenceImages.map(async (image) => {
100
- return await this.transformFileType("file", image, options).then((file) => {
100
+ return await this.transformFileType("file", image).then((file) => {
101
101
  return {
102
102
  image: {
103
103
  imageBytes: file.data,
@@ -113,7 +113,7 @@ class GeminiVideoModel extends core_1.VideoModel {
113
113
  config,
114
114
  };
115
115
  if (mergedInput.image) {
116
- params.image = await this.transformFileType("file", mergedInput.image, options).then((file) => {
116
+ params.image = await this.transformFileType("file", mergedInput.image).then((file) => {
117
117
  return {
118
118
  imageBytes: file.data,
119
119
  mimeType: file.mimeType,
@@ -67,6 +67,8 @@ export declare class GeminiChatModel extends ChatModel {
67
67
  $get: string;
68
68
  } | undefined;
69
69
  }> | undefined;
70
+ countTokens(input: ChatModelInput): Promise<number>;
71
+ private contentUnionToContent;
70
72
  process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
71
73
  protected thinkingBudgetModelMap: ({
72
74
  pattern: RegExp;
@@ -104,10 +106,12 @@ export declare class GeminiChatModel extends ChatModel {
104
106
  budget?: number;
105
107
  level?: ThinkingLevel;
106
108
  };
109
+ private getParameters;
107
110
  private processInput;
108
111
  private buildConfig;
109
112
  private buildTools;
110
113
  private buildVideoContentParts;
111
114
  private buildContents;
115
+ private contentToParts;
112
116
  private ensureMessagesHasUserMessage;
113
117
  }
@@ -28,7 +28,7 @@ export declare class GeminiImageModel extends ImageModel<GeminiImageModelInput,
28
28
  * @param input The input to process
29
29
  * @returns The generated response
30
30
  */
31
- process(input: GeminiImageModelInput, options: AgentInvokeOptions): Promise<ImageModelOutput>;
31
+ process(input: GeminiImageModelInput, _options: AgentInvokeOptions): Promise<ImageModelOutput>;
32
32
  private generateImageByImagenModel;
33
33
  private generateImageByGeminiModel;
34
34
  }
@@ -67,6 +67,8 @@ export declare class GeminiChatModel extends ChatModel {
67
67
  $get: string;
68
68
  } | undefined;
69
69
  }> | undefined;
70
+ countTokens(input: ChatModelInput): Promise<number>;
71
+ private contentUnionToContent;
70
72
  process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
71
73
  protected thinkingBudgetModelMap: ({
72
74
  pattern: RegExp;
@@ -104,10 +106,12 @@ export declare class GeminiChatModel extends ChatModel {
104
106
  budget?: number;
105
107
  level?: ThinkingLevel;
106
108
  };
109
+ private getParameters;
107
110
  private processInput;
108
111
  private buildConfig;
109
112
  private buildTools;
110
113
  private buildVideoContentParts;
111
114
  private buildContents;
115
+ private contentToParts;
112
116
  private ensureMessagesHasUserMessage;
113
117
  }
@@ -1,7 +1,7 @@
1
1
  import { agentProcessResultToObject, ChatModel, StructuredOutputError, safeParseJSON, } from "@aigne/core";
2
2
  import { logger } from "@aigne/core/utils/logger.js";
3
3
  import { mergeUsage } from "@aigne/core/utils/model-utils.js";
4
- import { isNonNullable } from "@aigne/core/utils/type-utils.js";
4
+ import { isNil, isNonNullable, isRecord, } from "@aigne/core/utils/type-utils.js";
5
5
  import { nodejs } from "@aigne/platform-helpers/nodejs/index.js";
6
6
  import { v7 } from "@aigne/uuid";
7
7
  import { createPartFromUri, createUserContent, FunctionCallingConfigMode, GoogleGenAI, ThinkingLevel, } from "@google/genai";
@@ -58,6 +58,40 @@ export class GeminiChatModel extends ChatModel {
58
58
  get modelOptions() {
59
59
  return this.options?.modelOptions;
60
60
  }
61
+ async countTokens(input) {
62
+ const { model, ...request } = await this.getParameters(input);
63
+ const contents = [];
64
+ const { systemInstruction, tools } = request.config ?? {};
65
+ if (systemInstruction)
66
+ contents.push(this.contentUnionToContent(systemInstruction));
67
+ if (tools?.length)
68
+ contents.push({ role: "system", parts: [{ text: JSON.stringify(tools) }] });
69
+ contents.push(...[request.contents].flat().map(this.contentUnionToContent));
70
+ const tokens = (await this.googleClient.models.countTokens({
71
+ model,
72
+ contents,
73
+ })).totalTokens;
74
+ if (!isNil(tokens))
75
+ return tokens;
76
+ return super.countTokens(input);
77
+ }
78
+ contentUnionToContent(content) {
79
+ if (typeof content === "object" && "parts" in content) {
80
+ return { role: "system", parts: content.parts };
81
+ }
82
+ else if (typeof content === "string") {
83
+ return { role: "system", parts: [{ text: content }] };
84
+ }
85
+ else if (Array.isArray(content)) {
86
+ return {
87
+ role: "system",
88
+ parts: content.map((i) => (typeof i === "string" ? { text: i } : i)),
89
+ };
90
+ }
91
+ else {
92
+ return { role: "system", parts: [content] };
93
+ }
94
+ }
61
95
  process(input, options) {
62
96
  return this.processInput(input, options);
63
97
  }
@@ -132,10 +166,10 @@ export class GeminiChatModel extends ChatModel {
132
166
  budget = Math.min(m.max, budget);
133
167
  return { support: true, budget };
134
168
  }
135
- async *processInput(input, options) {
169
+ async getParameters(input) {
136
170
  const { modelOptions = {} } = input;
137
171
  const model = modelOptions.model || this.credential.model;
138
- const { contents, config } = await this.buildContents(input, options);
172
+ const { contents, config } = await this.buildContents(input);
139
173
  const thinkingBudget = this.getThinkingBudget(model, modelOptions.reasoningEffort);
140
174
  const parameters = {
141
175
  model,
@@ -157,6 +191,10 @@ export class GeminiChatModel extends ChatModel {
157
191
  ...(await this.buildConfig(input)),
158
192
  },
159
193
  };
194
+ return parameters;
195
+ }
196
+ async *processInput(input, options) {
197
+ const parameters = await this.getParameters(input);
160
198
  const response = await this.googleClient.models.generateContentStream(parameters);
161
199
  let usage = {
162
200
  inputTokens: 0,
@@ -208,7 +246,7 @@ export class GeminiChatModel extends ChatModel {
208
246
  },
209
247
  };
210
248
  // Preserve thought_signature for 3.x models
211
- if (part.thoughtSignature && model.includes("gemini-3")) {
249
+ if (part.thoughtSignature && parameters.model.includes("gemini-3")) {
212
250
  toolCall.metadata = {
213
251
  thoughtSignature: part.thoughtSignature,
214
252
  };
@@ -359,8 +397,8 @@ export class GeminiChatModel extends ChatModel {
359
397
  };
360
398
  return { tools, toolConfig: { functionCallingConfig } };
361
399
  }
362
- async buildVideoContentParts(media, options) {
363
- const { path: filePath, mimeType: fileMimeType } = await this.transformFileType("local", media, options);
400
+ async buildVideoContentParts(media) {
401
+ const { path: filePath, mimeType: fileMimeType } = await this.transformFileType("local", media);
364
402
  if (filePath) {
365
403
  const stats = await nodejs.fs.stat(filePath);
366
404
  const fileSizeInBytes = stats.size;
@@ -391,7 +429,7 @@ export class GeminiChatModel extends ChatModel {
391
429
  }
392
430
  }
393
431
  }
394
- async buildContents(input, options) {
432
+ async buildContents(input) {
395
433
  const result = {
396
434
  contents: [],
397
435
  };
@@ -435,55 +473,46 @@ export class GeminiChatModel extends ChatModel {
435
473
  .find((c) => c?.id === msg.toolCallId);
436
474
  if (!call)
437
475
  throw new Error(`Tool call not found: ${msg.toolCallId}`);
438
- const output = parse(msg.content);
439
- const isError = "error" in output && Boolean(input.error);
440
- const response = {
441
- tool: call.function.name,
476
+ if (!msg.content)
477
+ throw new Error("Tool call must have content");
478
+ // parse tool result as a record
479
+ let toolResult;
480
+ {
481
+ let text;
482
+ if (typeof msg.content === "string")
483
+ text = msg.content;
484
+ else if (msg.content?.length === 1) {
485
+ const first = msg.content[0];
486
+ if (first?.type === "text")
487
+ text = first.text;
488
+ }
489
+ if (text) {
490
+ try {
491
+ const obj = parse(text);
492
+ if (isRecord(obj))
493
+ toolResult = obj;
494
+ }
495
+ catch {
496
+ // ignore
497
+ }
498
+ if (!toolResult)
499
+ toolResult = { result: text };
500
+ }
501
+ }
502
+ const functionResponse = {
503
+ id: msg.toolCallId,
504
+ name: call.function.name,
442
505
  };
443
- // NOTE: base on the documentation of gemini api, the content should include `output` field for successful result or `error` field for failed result,
444
- // and base on the actual test, add a tool field presenting the tool name can improve the LLM understanding that which tool is called.
445
- if (isError) {
446
- Object.assign(response, { status: "error" }, output);
506
+ if (toolResult) {
507
+ functionResponse.response = toolResult;
447
508
  }
448
509
  else {
449
- Object.assign(response, { status: "success" });
450
- if ("output" in output) {
451
- Object.assign(response, output);
452
- }
453
- else {
454
- Object.assign(response, { output });
455
- }
510
+ functionResponse.parts = await this.contentToParts(msg.content);
456
511
  }
457
- content.parts = [
458
- {
459
- functionResponse: {
460
- id: msg.toolCallId,
461
- name: call.function.name,
462
- response,
463
- },
464
- },
465
- ];
512
+ content.parts = [{ functionResponse }];
466
513
  }
467
- else if (typeof msg.content === "string") {
468
- content.parts = [{ text: msg.content }];
469
- }
470
- else if (Array.isArray(msg.content)) {
471
- content.parts = await Promise.all(msg.content.map(async (item) => {
472
- switch (item.type) {
473
- case "text":
474
- return { text: item.text };
475
- case "url":
476
- return { fileData: { fileUri: item.url, mimeType: item.mimeType } };
477
- case "file": {
478
- const part = await this.buildVideoContentParts(item, options);
479
- if (part)
480
- return part;
481
- return { inlineData: { data: item.data, mimeType: item.mimeType } };
482
- }
483
- case "local":
484
- throw new Error(`Unsupported local file: ${item.path}, it should be converted to base64 at ChatModel`);
485
- }
486
- }));
514
+ else if (msg.content) {
515
+ content.parts = await this.contentToParts(msg.content);
487
516
  }
488
517
  return content;
489
518
  }))).filter(isNonNullable);
@@ -494,6 +523,26 @@ export class GeminiChatModel extends ChatModel {
494
523
  }
495
524
  return result;
496
525
  }
526
+ async contentToParts(content) {
527
+ if (typeof content === "string")
528
+ return [{ text: content }];
529
+ return Promise.all(content.map(async (item) => {
530
+ switch (item.type) {
531
+ case "text":
532
+ return { text: item.text };
533
+ case "url":
534
+ return { fileData: { fileUri: item.url, mimeType: item.mimeType } };
535
+ case "file": {
536
+ const part = await this.buildVideoContentParts(item);
537
+ if (part)
538
+ return part;
539
+ return { inlineData: { data: item.data, mimeType: item.mimeType } };
540
+ }
541
+ case "local":
542
+ throw new Error(`Unsupported local file: ${item.path}, it should be converted to base64 at ChatModel`);
543
+ }
544
+ }));
545
+ }
497
546
  ensureMessagesHasUserMessage(systems, contents) {
498
547
  // no messages but system messages
499
548
  if (!contents.length && systems.length) {
@@ -28,7 +28,7 @@ export declare class GeminiImageModel extends ImageModel<GeminiImageModelInput,
28
28
  * @param input The input to process
29
29
  * @returns The generated response
30
30
  */
31
- process(input: GeminiImageModelInput, options: AgentInvokeOptions): Promise<ImageModelOutput>;
31
+ process(input: GeminiImageModelInput, _options: AgentInvokeOptions): Promise<ImageModelOutput>;
32
32
  private generateImageByImagenModel;
33
33
  private generateImageByGeminiModel;
34
34
  }
@@ -49,7 +49,7 @@ export class GeminiImageModel extends ImageModel {
49
49
  * @param input The input to process
50
50
  * @returns The generated response
51
51
  */
52
- async process(input, options) {
52
+ async process(input, _options) {
53
53
  const model = input.modelOptions?.model || this.credential.model;
54
54
  const responseFormat = input.responseFormat || "base64";
55
55
  if (responseFormat === "url") {
@@ -58,7 +58,7 @@ export class GeminiImageModel extends ImageModel {
58
58
  if (model.includes("imagen")) {
59
59
  return this.generateImageByImagenModel(input);
60
60
  }
61
- return this.generateImageByGeminiModel(input, options);
61
+ return this.generateImageByGeminiModel(input);
62
62
  }
63
63
  async generateImageByImagenModel(input) {
64
64
  const model = input.modelOptions?.model || this.credential.model;
@@ -97,7 +97,7 @@ export class GeminiImageModel extends ImageModel {
97
97
  model,
98
98
  };
99
99
  }
100
- async generateImageByGeminiModel(input, options) {
100
+ async generateImageByGeminiModel(input) {
101
101
  const model = input.modelOptions?.model || this.credential.model;
102
102
  const mergedInput = { ...this.modelOptions, ...input.modelOptions, ...input };
103
103
  const inputKeys = [
@@ -132,7 +132,7 @@ export class GeminiImageModel extends ImageModel {
132
132
  "imageConfig",
133
133
  ];
134
134
  const images = await Promise.all(flat(input.image).map(async (image) => {
135
- const { data, mimeType } = await this.transformFileType("file", image, options);
135
+ const { data, mimeType } = await this.transformFileType("file", image);
136
136
  return { inlineData: { data, mimeType } };
137
137
  }));
138
138
  const response = await this.client.models.generateContent({
@@ -85,7 +85,7 @@ export class GeminiVideoModel extends VideoModel {
85
85
  if (mergedInput.personGeneration)
86
86
  config.personGeneration = mergedInput.personGeneration;
87
87
  if (mergedInput.lastFrame) {
88
- config.lastFrame = await this.transformFileType("file", mergedInput.lastFrame, options).then((file) => {
88
+ config.lastFrame = await this.transformFileType("file", mergedInput.lastFrame).then((file) => {
89
89
  return {
90
90
  imageBytes: file.data,
91
91
  mimeType: file.mimeType,
@@ -94,7 +94,7 @@ export class GeminiVideoModel extends VideoModel {
94
94
  }
95
95
  if (mergedInput.referenceImages) {
96
96
  config.referenceImages = await Promise.all(mergedInput.referenceImages.map(async (image) => {
97
- return await this.transformFileType("file", image, options).then((file) => {
97
+ return await this.transformFileType("file", image).then((file) => {
98
98
  return {
99
99
  image: {
100
100
  imageBytes: file.data,
@@ -110,7 +110,7 @@ export class GeminiVideoModel extends VideoModel {
110
110
  config,
111
111
  };
112
112
  if (mergedInput.image) {
113
- params.image = await this.transformFileType("file", mergedInput.image, options).then((file) => {
113
+ params.image = await this.transformFileType("file", mergedInput.image).then((file) => {
114
114
  return {
115
115
  imageBytes: file.data,
116
116
  mimeType: file.mimeType,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aigne/gemini",
3
- "version": "0.14.16-beta.9",
3
+ "version": "0.14.17-beta",
4
4
  "description": "AIGNE Gemini SDK for integrating with Google's Gemini AI models",
5
5
  "publishConfig": {
6
6
  "access": "public"
@@ -40,8 +40,8 @@
40
40
  "yaml": "^2.8.1",
41
41
  "zod": "^3.25.67",
42
42
  "zod-to-json-schema": "^3.24.6",
43
- "@aigne/platform-helpers": "^0.6.7-beta",
44
- "@aigne/core": "^1.72.0-beta.8"
43
+ "@aigne/core": "^1.73.0-beta",
44
+ "@aigne/platform-helpers": "^0.6.7"
45
45
  },
46
46
  "devDependencies": {
47
47
  "@types/bun": "^1.2.22",
@@ -49,7 +49,7 @@
49
49
  "npm-run-all": "^4.1.5",
50
50
  "rimraf": "^6.0.1",
51
51
  "typescript": "^5.9.2",
52
- "@aigne/test-utils": "^0.5.69-beta.8"
52
+ "@aigne/test-utils": "^0.5.70-beta"
53
53
  },
54
54
  "scripts": {
55
55
  "lint": "tsc --noEmit",