ag-common 0.0.875 → 0.0.877
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/helpers/google/gemini.js +83 -44
- package/dist/api/helpers/index.d.ts +1 -0
- package/dist/api/helpers/index.js +1 -0
- package/dist/api/helpers/retryOnError.d.ts +8 -0
- package/dist/api/helpers/retryOnError.js +57 -0
- package/dist/index.d.ts +0 -1
- package/dist/index.js +0 -1
- package/package.json +1 -1
|
@@ -15,14 +15,18 @@ const array_1 = require("../../../common/helpers/array");
|
|
|
15
15
|
const async_1 = require("../../../common/helpers/async");
|
|
16
16
|
const fetch_1 = require("../../../common/helpers/fetch");
|
|
17
17
|
const log_1 = require("../../../common/helpers/log");
|
|
18
|
+
const node_cache_1 = require("../../../common/helpers/node-cache");
|
|
19
|
+
const retryOnError_1 = require("../retryOnError");
|
|
18
20
|
const apikey_1 = require("./apikey");
|
|
19
21
|
let genAIs;
|
|
20
|
-
|
|
21
|
-
const
|
|
22
|
+
const geminiModelsCache = new node_cache_1.TypedNodeCache({ stdTTL: 86400 });
|
|
23
|
+
const geminiModelsCacheKey = 'gemini-models-v1';
|
|
24
|
+
const FALLBACK_GEMINI_MODELS = [
|
|
22
25
|
'gemini-3-flash-preview',
|
|
23
26
|
'gemini-3-pro-preview',
|
|
24
27
|
'gemini-2.5-pro',
|
|
25
28
|
'gemini-2.5-flash',
|
|
29
|
+
'gemini-2.5-flash-lite',
|
|
26
30
|
];
|
|
27
31
|
// Helper to sort models based on preference
|
|
28
32
|
const sortModelsByPreference = (models, prefer) => {
|
|
@@ -54,15 +58,53 @@ const sortModelsByPreference = (models, prefer) => {
|
|
|
54
58
|
// Default behavior - no sorting
|
|
55
59
|
return modelArray;
|
|
56
60
|
};
|
|
61
|
+
const normalizeModelName = (name) => name.replace(/^models\//, '');
|
|
62
|
+
const getAvailableGeminiModels = () => __awaiter(void 0, void 0, void 0, function* () {
|
|
63
|
+
var _a, _b;
|
|
64
|
+
const cachedModels = geminiModelsCache.get(geminiModelsCacheKey);
|
|
65
|
+
if (cachedModels && cachedModels.length > 0) {
|
|
66
|
+
return cachedModels;
|
|
67
|
+
}
|
|
68
|
+
const keyServiceCombinations = (0, apikey_1.getAvailableCombinations)('gemini');
|
|
69
|
+
const key = (_a = keyServiceCombinations[0]) === null || _a === void 0 ? void 0 : _a.key;
|
|
70
|
+
if (!key) {
|
|
71
|
+
(0, log_1.warn)('No GOOGLE_API_KEY available. Falling back to default Gemini models.');
|
|
72
|
+
return [...FALLBACK_GEMINI_MODELS];
|
|
73
|
+
}
|
|
74
|
+
try {
|
|
75
|
+
const response = yield fetch(`https://generativelanguage.googleapis.com/v1beta/models?key=${encodeURIComponent(key)}`);
|
|
76
|
+
if (!response.ok) {
|
|
77
|
+
throw new Error(`model list fetch failed with status ${response.status}`);
|
|
78
|
+
}
|
|
79
|
+
const payload = (yield response.json());
|
|
80
|
+
const discoveredModels = [
|
|
81
|
+
...new Set(((_b = payload.models) !== null && _b !== void 0 ? _b : [])
|
|
82
|
+
.filter((m) => { var _a; return ((_a = m.supportedGenerationMethods) !== null && _a !== void 0 ? _a : []).includes('generateContent'); })
|
|
83
|
+
.map((m) => { var _a; return normalizeModelName((_a = m.name) !== null && _a !== void 0 ? _a : ''); })
|
|
84
|
+
.filter((name) => name.startsWith('gemini') && !name.includes('embedding'))),
|
|
85
|
+
];
|
|
86
|
+
if (discoveredModels.length === 0) {
|
|
87
|
+
throw new Error('no gemini generateContent models discovered');
|
|
88
|
+
}
|
|
89
|
+
geminiModelsCache.set(geminiModelsCacheKey, discoveredModels);
|
|
90
|
+
(0, log_1.info)(`loaded ${discoveredModels.length} Gemini models from API`);
|
|
91
|
+
return discoveredModels;
|
|
92
|
+
}
|
|
93
|
+
catch (e) {
|
|
94
|
+
(0, log_1.warn)(`Failed to load Gemini models from API. Falling back to defaults. ${String(e)}`);
|
|
95
|
+
return [...FALLBACK_GEMINI_MODELS];
|
|
96
|
+
}
|
|
97
|
+
});
|
|
57
98
|
// Helper to get available key+model combinations
|
|
58
|
-
const getAvailableGeminiCombinations = (prefer) => {
|
|
99
|
+
const getAvailableGeminiCombinations = (prefer) => __awaiter(void 0, void 0, void 0, function* () {
|
|
59
100
|
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
|
|
60
101
|
if (!genAIs || genAIs.length === 0) {
|
|
61
102
|
const keyServiceCombinations = (0, apikey_1.getAvailableCombinations)('gemini');
|
|
62
103
|
const keys = keyServiceCombinations.map((combo) => combo.key);
|
|
63
104
|
genAIs = keys.map((k) => [k, new genai_1.GoogleGenAI({ apiKey: k })]);
|
|
64
105
|
}
|
|
65
|
-
const
|
|
106
|
+
const availableModels = yield getAvailableGeminiModels();
|
|
107
|
+
const sortedModels = sortModelsByPreference(availableModels, prefer);
|
|
66
108
|
const combinations = [];
|
|
67
109
|
for (const [key, ai] of genAIs) {
|
|
68
110
|
for (const model of sortedModels) {
|
|
@@ -74,7 +116,7 @@ const getAvailableGeminiCombinations = (prefer) => {
|
|
|
74
116
|
}
|
|
75
117
|
}
|
|
76
118
|
return combinations;
|
|
77
|
-
};
|
|
119
|
+
});
|
|
78
120
|
const geminiPromptImage = (_a) => __awaiter(void 0, [_a], void 0, function* ({ prompt, urls, ident, prefer, }) {
|
|
79
121
|
let images = [];
|
|
80
122
|
if (urls && urls.length > 0) {
|
|
@@ -94,53 +136,50 @@ const geminiPromptImage = (_a) => __awaiter(void 0, [_a], void 0, function* ({ p
|
|
|
94
136
|
});
|
|
95
137
|
exports.geminiPromptImage = geminiPromptImage;
|
|
96
138
|
const geminiPromptDirect = (_a) => __awaiter(void 0, [_a], void 0, function* ({ prompt, images = [], ident, prefer, groundedSearch = false, }) {
|
|
97
|
-
var _b;
|
|
98
|
-
const combinations = getAvailableGeminiCombinations(prefer);
|
|
99
|
-
if (combinations.length === 0) {
|
|
100
|
-
throw new Error('No available API key and model combinations');
|
|
101
|
-
}
|
|
102
|
-
const [key, ai, selectedModel] = combinations[0];
|
|
103
139
|
const parts = images.map((i) => ({
|
|
104
140
|
inlineData: {
|
|
105
141
|
data: Buffer.from(i.arraybuffer).toString('base64'),
|
|
106
142
|
mimeType: i.type,
|
|
107
143
|
},
|
|
108
144
|
}));
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
};
|
|
115
|
-
// Add grounded search configuration if enabled
|
|
116
|
-
if (groundedSearch) {
|
|
117
|
-
requestConfig.config.tools = [{ googleSearch: {} }];
|
|
118
|
-
}
|
|
119
|
-
(0, log_1.info)('gem query on:' + selectedModel, requestConfig);
|
|
120
|
-
try {
|
|
121
|
-
const response = yield ai.models.generateContent(requestConfig);
|
|
122
|
-
const rawtext = ((_b = response.text) !== null && _b !== void 0 ? _b : '')
|
|
123
|
-
.replace(/```(json)?/gi, '')
|
|
124
|
-
.replace(/:[ ]+undefined/gim, ': null');
|
|
125
|
-
(0, log_1.info)('gem response');
|
|
126
|
-
(0, log_1.debug)('gem prompt:' + prompt, ident);
|
|
127
|
-
(0, log_1.debug)('gem response:' + rawtext);
|
|
128
|
-
(0, log_1.debug)('gem query usage:' + JSON.stringify(response.usageMetadata));
|
|
129
|
-
return rawtext;
|
|
130
|
-
}
|
|
131
|
-
catch (e) {
|
|
132
|
-
const em = e.message.toLowerCase();
|
|
133
|
-
const mod = `gemini-${selectedModel}`;
|
|
134
|
-
if (em.includes('429') || em.includes('safety')) {
|
|
135
|
-
(0, log_1.warn)('throughput exceeded for gemini:', mod);
|
|
136
|
-
(0, apikey_1.blockKeyService)(key, mod);
|
|
145
|
+
return (0, retryOnError_1.retryOnError)(`geminiPromptDirect:${ident !== null && ident !== void 0 ? ident : 'unknown'}`, () => __awaiter(void 0, void 0, void 0, function* () {
|
|
146
|
+
var _a;
|
|
147
|
+
const combinations = yield getAvailableGeminiCombinations(prefer);
|
|
148
|
+
if (combinations.length === 0) {
|
|
149
|
+
throw new Error('No available API key and model combinations');
|
|
137
150
|
}
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
151
|
+
const [key, ai, selectedModel] = combinations[0];
|
|
152
|
+
// Prepare the request configuration
|
|
153
|
+
const requestConfig = {
|
|
154
|
+
model: selectedModel,
|
|
155
|
+
contents: [{ parts: [{ text: prompt }, ...parts] }],
|
|
156
|
+
config: {},
|
|
157
|
+
};
|
|
158
|
+
// Add grounded search configuration if enabled
|
|
159
|
+
if (groundedSearch) {
|
|
160
|
+
requestConfig.config.tools = [{ googleSearch: {} }];
|
|
141
161
|
}
|
|
142
|
-
|
|
143
|
-
|
|
162
|
+
(0, log_1.info)('gem query on:' + selectedModel, requestConfig);
|
|
163
|
+
try {
|
|
164
|
+
const response = yield ai.models.generateContent(requestConfig);
|
|
165
|
+
const rawtext = ((_a = response.text) !== null && _a !== void 0 ? _a : '')
|
|
166
|
+
.replace(/```(json)?/gi, '')
|
|
167
|
+
.replace(/:[ ]+undefined/gim, ': null');
|
|
168
|
+
(0, log_1.info)('gem response');
|
|
169
|
+
(0, log_1.debug)('gem prompt:' + prompt, ident);
|
|
170
|
+
(0, log_1.debug)('gem response:' + rawtext);
|
|
171
|
+
(0, log_1.debug)('gem query usage:' + JSON.stringify(response.usageMetadata));
|
|
172
|
+
return rawtext;
|
|
173
|
+
}
|
|
174
|
+
catch (e) {
|
|
175
|
+
const mod = `gemini-${selectedModel}`;
|
|
176
|
+
if ((0, retryOnError_1.isOverloadedApiKeyError)(e)) {
|
|
177
|
+
(0, log_1.warn)('throughput exceeded for gemini:', mod);
|
|
178
|
+
(0, apikey_1.blockKeyService)(key, mod);
|
|
179
|
+
}
|
|
180
|
+
throw e;
|
|
181
|
+
}
|
|
182
|
+
}), 1, 5000);
|
|
144
183
|
});
|
|
145
184
|
exports.geminiPromptDirect = geminiPromptDirect;
|
|
146
185
|
const resolveGroundedUrl = (url) => __awaiter(void 0, void 0, void 0, function* () {
|
|
@@ -22,6 +22,7 @@ __exportStar(require("./cosmos"), exports);
|
|
|
22
22
|
__exportStar(require("./dynamo"), exports);
|
|
23
23
|
__exportStar(require("./enforceDynamoProvisionCap"), exports);
|
|
24
24
|
__exportStar(require("./google"), exports);
|
|
25
|
+
__exportStar(require("./retryOnError"), exports);
|
|
25
26
|
__exportStar(require("./s3"), exports);
|
|
26
27
|
__exportStar(require("./ses"), exports);
|
|
27
28
|
__exportStar(require("./sqs"), exports);
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
export declare const overloadedMessages: string[];
|
|
2
|
+
export declare const retryableErrorMessages: string[];
|
|
3
|
+
export declare const isOverloadedApiKeyError: (error: Error) => boolean;
|
|
4
|
+
export declare const isRetryableApiError: (error: Error) => boolean;
|
|
5
|
+
export declare function retryOnError<T>(
|
|
6
|
+
/** so we can log retries with useful info */
|
|
7
|
+
debugIdent: string, fn: () => Promise<T>, retries?: number, errorDelay?: number, errorCheck?: (error: Error) => boolean): Promise<T>;
|
|
8
|
+
export declare const sleep: (ms: number) => Promise<unknown>;
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.sleep = exports.isRetryableApiError = exports.isOverloadedApiKeyError = exports.retryableErrorMessages = exports.overloadedMessages = void 0;
|
|
13
|
+
exports.retryOnError = retryOnError;
|
|
14
|
+
const log_1 = require("../../common/helpers/log");
|
|
15
|
+
exports.overloadedMessages = [
|
|
16
|
+
'429',
|
|
17
|
+
'current usage',
|
|
18
|
+
'current quota',
|
|
19
|
+
'overloaded',
|
|
20
|
+
];
|
|
21
|
+
exports.retryableErrorMessages = [
|
|
22
|
+
...exports.overloadedMessages,
|
|
23
|
+
'500',
|
|
24
|
+
'503',
|
|
25
|
+
'safety',
|
|
26
|
+
'invalid_type',
|
|
27
|
+
'expected',
|
|
28
|
+
];
|
|
29
|
+
const errorContainsAnyMessage = (error, messages) => {
|
|
30
|
+
const message = error.message.toLowerCase();
|
|
31
|
+
return messages.some((m) => message.includes(m.toLowerCase()));
|
|
32
|
+
};
|
|
33
|
+
const isOverloadedApiKeyError = (error) => errorContainsAnyMessage(error, exports.overloadedMessages);
|
|
34
|
+
exports.isOverloadedApiKeyError = isOverloadedApiKeyError;
|
|
35
|
+
const isRetryableApiError = (error) => errorContainsAnyMessage(error, exports.retryableErrorMessages);
|
|
36
|
+
exports.isRetryableApiError = isRetryableApiError;
|
|
37
|
+
function retryOnError(debugIdent_1, fn_1) {
|
|
38
|
+
return __awaiter(this, arguments, void 0, function* (
|
|
39
|
+
/** so we can log retries with useful info */
|
|
40
|
+
debugIdent, fn, retries = 1, errorDelay = 2000, errorCheck = exports.isRetryableApiError) {
|
|
41
|
+
try {
|
|
42
|
+
return yield fn();
|
|
43
|
+
}
|
|
44
|
+
catch (error) {
|
|
45
|
+
const e = error;
|
|
46
|
+
const em = e.message;
|
|
47
|
+
if (retries > 0 && errorCheck(e)) {
|
|
48
|
+
(0, log_1.info)(`Operation ${debugIdent} failed. Retrying after ${errorDelay}ms...`, em);
|
|
49
|
+
yield (0, exports.sleep)(errorDelay);
|
|
50
|
+
return retryOnError(debugIdent, fn, retries - 1, errorDelay, errorCheck);
|
|
51
|
+
}
|
|
52
|
+
throw error;
|
|
53
|
+
}
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
57
|
+
exports.sleep = sleep;
|
package/dist/index.d.ts
CHANGED
package/dist/index.js
CHANGED
|
@@ -15,6 +15,5 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
17
|
__exportStar(require("./api"), exports);
|
|
18
|
-
__exportStar(require("./common"), exports);
|
|
19
18
|
__exportStar(require("./node"), exports);
|
|
20
19
|
__exportStar(require("./ui"), exports);
|
package/package.json
CHANGED