@aigne/gemini 0.14.2-beta.9 → 0.14.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +54 -0
- package/lib/cjs/gemini-chat-model.js +55 -13
- package/lib/esm/gemini-chat-model.js +56 -14
- package/package.json +5 -4
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,59 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## [0.14.2](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.2-beta.12...gemini-v0.14.2) (2025-10-19)
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
### Dependencies
|
|
7
|
+
|
|
8
|
+
* The following workspace dependencies were updated
|
|
9
|
+
* dependencies
|
|
10
|
+
* @aigne/core bumped to 1.63.0
|
|
11
|
+
* devDependencies
|
|
12
|
+
* @aigne/test-utils bumped to 0.5.55
|
|
13
|
+
|
|
14
|
+
## [0.14.2-beta.12](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.2-beta.11...gemini-v0.14.2-beta.12) (2025-10-17)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
### Dependencies
|
|
18
|
+
|
|
19
|
+
* The following workspace dependencies were updated
|
|
20
|
+
* dependencies
|
|
21
|
+
* @aigne/core bumped to 1.63.0-beta.12
|
|
22
|
+
* devDependencies
|
|
23
|
+
* @aigne/test-utils bumped to 0.5.55-beta.12
|
|
24
|
+
|
|
25
|
+
## [0.14.2-beta.11](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.2-beta.10...gemini-v0.14.2-beta.11) (2025-10-17)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
### Bug Fixes
|
|
29
|
+
|
|
30
|
+
* **gemini:** implement retry mechanism for empty responses with structured output fallback ([#638](https://github.com/AIGNE-io/aigne-framework/issues/638)) ([d33c8bb](https://github.com/AIGNE-io/aigne-framework/commit/d33c8bb9711aadddef9687d6cf472a179cd8ed9c))
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
### Dependencies
|
|
34
|
+
|
|
35
|
+
* The following workspace dependencies were updated
|
|
36
|
+
* dependencies
|
|
37
|
+
* @aigne/core bumped to 1.63.0-beta.11
|
|
38
|
+
* devDependencies
|
|
39
|
+
* @aigne/test-utils bumped to 0.5.55-beta.11
|
|
40
|
+
|
|
41
|
+
## [0.14.2-beta.10](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.2-beta.9...gemini-v0.14.2-beta.10) (2025-10-16)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
### Bug Fixes
|
|
45
|
+
|
|
46
|
+
* correct calculate token usage for gemini model ([7fd1328](https://github.com/AIGNE-io/aigne-framework/commit/7fd13289d3d0f8e062211f7c6dd5cb56e5318c1b))
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
### Dependencies
|
|
50
|
+
|
|
51
|
+
* The following workspace dependencies were updated
|
|
52
|
+
* dependencies
|
|
53
|
+
* @aigne/core bumped to 1.63.0-beta.10
|
|
54
|
+
* devDependencies
|
|
55
|
+
* @aigne/test-utils bumped to 0.5.55-beta.10
|
|
56
|
+
|
|
3
57
|
## [0.14.2-beta.9](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.14.2-beta.8...gemini-v0.14.2-beta.9) (2025-10-16)
|
|
4
58
|
|
|
5
59
|
|
|
@@ -3,11 +3,14 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.GeminiChatModel = void 0;
|
|
4
4
|
const core_1 = require("@aigne/core");
|
|
5
5
|
const logger_js_1 = require("@aigne/core/utils/logger.js");
|
|
6
|
+
const model_utils_js_1 = require("@aigne/core/utils/model-utils.js");
|
|
6
7
|
const type_utils_js_1 = require("@aigne/core/utils/type-utils.js");
|
|
7
8
|
const uuid_1 = require("@aigne/uuid");
|
|
8
9
|
const genai_1 = require("@google/genai");
|
|
10
|
+
const zod_1 = require("zod");
|
|
11
|
+
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
9
12
|
const GEMINI_DEFAULT_CHAT_MODEL = "gemini-2.0-flash";
|
|
10
|
-
const
|
|
13
|
+
const OUTPUT_FUNCTION_NAME = "output";
|
|
11
14
|
/**
|
|
12
15
|
* Implementation of the ChatModel interface for Google's Gemini API
|
|
13
16
|
*
|
|
@@ -75,7 +78,7 @@ class GeminiChatModel extends core_1.ChatModel {
|
|
|
75
78
|
},
|
|
76
79
|
};
|
|
77
80
|
const response = await this.googleClient.models.generateContentStream(parameters);
|
|
78
|
-
|
|
81
|
+
let usage = {
|
|
79
82
|
inputTokens: 0,
|
|
80
83
|
outputTokens: 0,
|
|
81
84
|
};
|
|
@@ -107,7 +110,7 @@ class GeminiChatModel extends core_1.ChatModel {
|
|
|
107
110
|
});
|
|
108
111
|
}
|
|
109
112
|
if (part.functionCall?.name) {
|
|
110
|
-
if (part.functionCall.name ===
|
|
113
|
+
if (part.functionCall.name === OUTPUT_FUNCTION_NAME) {
|
|
111
114
|
json = part.functionCall.args;
|
|
112
115
|
}
|
|
113
116
|
else {
|
|
@@ -126,8 +129,10 @@ class GeminiChatModel extends core_1.ChatModel {
|
|
|
126
129
|
}
|
|
127
130
|
}
|
|
128
131
|
if (chunk.usageMetadata) {
|
|
129
|
-
|
|
130
|
-
|
|
132
|
+
if (chunk.usageMetadata.promptTokenCount)
|
|
133
|
+
usage.inputTokens = chunk.usageMetadata.promptTokenCount;
|
|
134
|
+
if (chunk.usageMetadata.candidatesTokenCount)
|
|
135
|
+
usage.outputTokens = chunk.usageMetadata.candidatesTokenCount;
|
|
131
136
|
}
|
|
132
137
|
}
|
|
133
138
|
if (input.responseFormat?.type === "json_schema") {
|
|
@@ -137,16 +142,53 @@ class GeminiChatModel extends core_1.ChatModel {
|
|
|
137
142
|
else if (text) {
|
|
138
143
|
yield { delta: { json: { json: (0, core_1.safeParseJSON)(text) } } };
|
|
139
144
|
}
|
|
140
|
-
else {
|
|
141
|
-
|
|
142
|
-
throw new core_1.StructuredOutputError("No JSON response from the model");
|
|
145
|
+
else if (!toolCalls.length) {
|
|
146
|
+
throw new Error("No JSON response from the model");
|
|
143
147
|
}
|
|
144
148
|
}
|
|
145
149
|
else if (!toolCalls.length) {
|
|
150
|
+
// NOTE: gemini-2.5-pro sometimes returns an empty response,
|
|
151
|
+
// so we check here and retry with structured output mode (empty responses occur less frequently with tool calls)
|
|
146
152
|
if (!text) {
|
|
147
|
-
logger_js_1.logger.
|
|
148
|
-
|
|
149
|
-
|
|
153
|
+
logger_js_1.logger.warn("Empty response from Gemini, retrying with structured output mode");
|
|
154
|
+
try {
|
|
155
|
+
const outputSchema = zod_1.z.object({
|
|
156
|
+
output: zod_1.z.string().describe("The final answer from the model"),
|
|
157
|
+
});
|
|
158
|
+
const response = await this.process({
|
|
159
|
+
...input,
|
|
160
|
+
responseFormat: {
|
|
161
|
+
type: "json_schema",
|
|
162
|
+
jsonSchema: {
|
|
163
|
+
name: "output",
|
|
164
|
+
schema: (0, zod_to_json_schema_1.zodToJsonSchema)(outputSchema),
|
|
165
|
+
},
|
|
166
|
+
},
|
|
167
|
+
});
|
|
168
|
+
const result = await (0, core_1.agentProcessResultToObject)(response);
|
|
169
|
+
// Merge retry usage with the original usage
|
|
170
|
+
usage = (0, model_utils_js_1.mergeUsage)(usage, result.usage);
|
|
171
|
+
// Return the tool calls if retry has tool calls
|
|
172
|
+
if (result.toolCalls?.length) {
|
|
173
|
+
toolCalls.push(...result.toolCalls);
|
|
174
|
+
yield { delta: { json: { toolCalls } } };
|
|
175
|
+
}
|
|
176
|
+
// Return the text from structured output of retry
|
|
177
|
+
else {
|
|
178
|
+
if (!result.json)
|
|
179
|
+
throw new Error("Retrying with structured output mode got no json response");
|
|
180
|
+
const parsed = outputSchema.safeParse(result.json);
|
|
181
|
+
if (!parsed.success)
|
|
182
|
+
throw new Error("Retrying with structured output mode got invalid json response");
|
|
183
|
+
text = parsed.data.output;
|
|
184
|
+
yield { delta: { text: { text } } };
|
|
185
|
+
logger_js_1.logger.warn("Empty response from Gemini, retried with structured output mode successfully");
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
catch (error) {
|
|
189
|
+
logger_js_1.logger.error("Empty response from Gemini, retrying with structured output mode failed", error);
|
|
190
|
+
throw new core_1.StructuredOutputError("No response from the model");
|
|
191
|
+
}
|
|
150
192
|
}
|
|
151
193
|
}
|
|
152
194
|
yield { delta: { json: { usage, files: files.length ? files : undefined } } };
|
|
@@ -161,8 +203,8 @@ class GeminiChatModel extends core_1.ChatModel {
|
|
|
161
203
|
config.tools.push({
|
|
162
204
|
functionDeclarations: [
|
|
163
205
|
{
|
|
164
|
-
name:
|
|
165
|
-
description: "Output the final response
|
|
206
|
+
name: OUTPUT_FUNCTION_NAME,
|
|
207
|
+
description: "Output the final response",
|
|
166
208
|
parametersJsonSchema: input.responseFormat.jsonSchema.schema,
|
|
167
209
|
},
|
|
168
210
|
],
|
|
@@ -1,10 +1,13 @@
|
|
|
1
|
-
import { ChatModel, StructuredOutputError, safeParseJSON, } from "@aigne/core";
|
|
1
|
+
import { agentProcessResultToObject, ChatModel, StructuredOutputError, safeParseJSON, } from "@aigne/core";
|
|
2
2
|
import { logger } from "@aigne/core/utils/logger.js";
|
|
3
|
+
import { mergeUsage } from "@aigne/core/utils/model-utils.js";
|
|
3
4
|
import { isNonNullable } from "@aigne/core/utils/type-utils.js";
|
|
4
5
|
import { v7 } from "@aigne/uuid";
|
|
5
6
|
import { FunctionCallingConfigMode, GoogleGenAI, } from "@google/genai";
|
|
7
|
+
import { z } from "zod";
|
|
8
|
+
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
6
9
|
const GEMINI_DEFAULT_CHAT_MODEL = "gemini-2.0-flash";
|
|
7
|
-
const
|
|
10
|
+
const OUTPUT_FUNCTION_NAME = "output";
|
|
8
11
|
/**
|
|
9
12
|
* Implementation of the ChatModel interface for Google's Gemini API
|
|
10
13
|
*
|
|
@@ -72,7 +75,7 @@ export class GeminiChatModel extends ChatModel {
|
|
|
72
75
|
},
|
|
73
76
|
};
|
|
74
77
|
const response = await this.googleClient.models.generateContentStream(parameters);
|
|
75
|
-
|
|
78
|
+
let usage = {
|
|
76
79
|
inputTokens: 0,
|
|
77
80
|
outputTokens: 0,
|
|
78
81
|
};
|
|
@@ -104,7 +107,7 @@ export class GeminiChatModel extends ChatModel {
|
|
|
104
107
|
});
|
|
105
108
|
}
|
|
106
109
|
if (part.functionCall?.name) {
|
|
107
|
-
if (part.functionCall.name ===
|
|
110
|
+
if (part.functionCall.name === OUTPUT_FUNCTION_NAME) {
|
|
108
111
|
json = part.functionCall.args;
|
|
109
112
|
}
|
|
110
113
|
else {
|
|
@@ -123,8 +126,10 @@ export class GeminiChatModel extends ChatModel {
|
|
|
123
126
|
}
|
|
124
127
|
}
|
|
125
128
|
if (chunk.usageMetadata) {
|
|
126
|
-
|
|
127
|
-
|
|
129
|
+
if (chunk.usageMetadata.promptTokenCount)
|
|
130
|
+
usage.inputTokens = chunk.usageMetadata.promptTokenCount;
|
|
131
|
+
if (chunk.usageMetadata.candidatesTokenCount)
|
|
132
|
+
usage.outputTokens = chunk.usageMetadata.candidatesTokenCount;
|
|
128
133
|
}
|
|
129
134
|
}
|
|
130
135
|
if (input.responseFormat?.type === "json_schema") {
|
|
@@ -134,16 +139,53 @@ export class GeminiChatModel extends ChatModel {
|
|
|
134
139
|
else if (text) {
|
|
135
140
|
yield { delta: { json: { json: safeParseJSON(text) } } };
|
|
136
141
|
}
|
|
137
|
-
else {
|
|
138
|
-
|
|
139
|
-
throw new StructuredOutputError("No JSON response from the model");
|
|
142
|
+
else if (!toolCalls.length) {
|
|
143
|
+
throw new Error("No JSON response from the model");
|
|
140
144
|
}
|
|
141
145
|
}
|
|
142
146
|
else if (!toolCalls.length) {
|
|
147
|
+
// NOTE: gemini-2.5-pro sometimes returns an empty response,
|
|
148
|
+
// so we check here and retry with structured output mode (empty responses occur less frequently with tool calls)
|
|
143
149
|
if (!text) {
|
|
144
|
-
logger.
|
|
145
|
-
|
|
146
|
-
|
|
150
|
+
logger.warn("Empty response from Gemini, retrying with structured output mode");
|
|
151
|
+
try {
|
|
152
|
+
const outputSchema = z.object({
|
|
153
|
+
output: z.string().describe("The final answer from the model"),
|
|
154
|
+
});
|
|
155
|
+
const response = await this.process({
|
|
156
|
+
...input,
|
|
157
|
+
responseFormat: {
|
|
158
|
+
type: "json_schema",
|
|
159
|
+
jsonSchema: {
|
|
160
|
+
name: "output",
|
|
161
|
+
schema: zodToJsonSchema(outputSchema),
|
|
162
|
+
},
|
|
163
|
+
},
|
|
164
|
+
});
|
|
165
|
+
const result = await agentProcessResultToObject(response);
|
|
166
|
+
// Merge retry usage with the original usage
|
|
167
|
+
usage = mergeUsage(usage, result.usage);
|
|
168
|
+
// Return the tool calls if retry has tool calls
|
|
169
|
+
if (result.toolCalls?.length) {
|
|
170
|
+
toolCalls.push(...result.toolCalls);
|
|
171
|
+
yield { delta: { json: { toolCalls } } };
|
|
172
|
+
}
|
|
173
|
+
// Return the text from structured output of retry
|
|
174
|
+
else {
|
|
175
|
+
if (!result.json)
|
|
176
|
+
throw new Error("Retrying with structured output mode got no json response");
|
|
177
|
+
const parsed = outputSchema.safeParse(result.json);
|
|
178
|
+
if (!parsed.success)
|
|
179
|
+
throw new Error("Retrying with structured output mode got invalid json response");
|
|
180
|
+
text = parsed.data.output;
|
|
181
|
+
yield { delta: { text: { text } } };
|
|
182
|
+
logger.warn("Empty response from Gemini, retried with structured output mode successfully");
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
catch (error) {
|
|
186
|
+
logger.error("Empty response from Gemini, retrying with structured output mode failed", error);
|
|
187
|
+
throw new StructuredOutputError("No response from the model");
|
|
188
|
+
}
|
|
147
189
|
}
|
|
148
190
|
}
|
|
149
191
|
yield { delta: { json: { usage, files: files.length ? files : undefined } } };
|
|
@@ -158,8 +200,8 @@ export class GeminiChatModel extends ChatModel {
|
|
|
158
200
|
config.tools.push({
|
|
159
201
|
functionDeclarations: [
|
|
160
202
|
{
|
|
161
|
-
name:
|
|
162
|
-
description: "Output the final response
|
|
203
|
+
name: OUTPUT_FUNCTION_NAME,
|
|
204
|
+
description: "Output the final response",
|
|
163
205
|
parametersJsonSchema: input.responseFormat.jsonSchema.schema,
|
|
164
206
|
},
|
|
165
207
|
],
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aigne/gemini",
|
|
3
|
-
"version": "0.14.2
|
|
3
|
+
"version": "0.14.2",
|
|
4
4
|
"description": "AIGNE Gemini SDK for integrating with Google's Gemini AI models",
|
|
5
5
|
"publishConfig": {
|
|
6
6
|
"access": "public"
|
|
@@ -38,8 +38,9 @@
|
|
|
38
38
|
"@aigne/uuid": "^13.0.1",
|
|
39
39
|
"@google/genai": "^1.24.0",
|
|
40
40
|
"zod": "^3.25.67",
|
|
41
|
-
"
|
|
42
|
-
"@aigne/core": "^1.63.0
|
|
41
|
+
"zod-to-json-schema": "^3.24.6",
|
|
42
|
+
"@aigne/core": "^1.63.0",
|
|
43
|
+
"@aigne/platform-helpers": "^0.6.3"
|
|
43
44
|
},
|
|
44
45
|
"devDependencies": {
|
|
45
46
|
"@types/bun": "^1.2.22",
|
|
@@ -47,7 +48,7 @@
|
|
|
47
48
|
"npm-run-all": "^4.1.5",
|
|
48
49
|
"rimraf": "^6.0.1",
|
|
49
50
|
"typescript": "^5.9.2",
|
|
50
|
-
"@aigne/test-utils": "^0.5.55
|
|
51
|
+
"@aigne/test-utils": "^0.5.55"
|
|
51
52
|
},
|
|
52
53
|
"scripts": {
|
|
53
54
|
"lint": "tsc --noEmit",
|